sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
davila7/claude-code-templates:cli-tool/components/skills/scientific/scholar-evaluation/scripts/calculate_scores.py | #!/usr/bin/env python3
"""
ScholarEval Score Calculator
Calculate aggregate evaluation scores from dimension-level ratings.
Supports weighted averaging, threshold analysis, and score visualization.
Usage:
python calculate_scores.py --scores <dimension_scores.json> --output <report.txt>
python calculate_scores.py --scores <dimension_scores.json> --weights <weights.json>
python calculate_scores.py --interactive
Author: ScholarEval Framework
License: MIT
"""
import json
import argparse
import sys
from typing import Dict, List, Optional
from pathlib import Path
# Default dimension weights (total = 100%)
DEFAULT_WEIGHTS = {
"problem_formulation": 0.15,
"literature_review": 0.15,
"methodology": 0.20,
"data_collection": 0.10,
"analysis": 0.15,
"results": 0.10,
"writing": 0.10,
"citations": 0.05
}
# Quality level definitions
QUALITY_LEVELS = {
(4.5, 5.0): ("Exceptional", "Ready for top-tier publication"),
(4.0, 4.4): ("Strong", "Publication-ready with minor revisions"),
(3.5, 3.9): ("Good", "Major revisions required, promising work"),
(3.0, 3.4): ("Acceptable", "Significant revisions needed"),
(2.0, 2.9): ("Weak", "Fundamental issues, major rework required"),
(0.0, 1.9): ("Poor", "Not suitable without complete revision")
}
def load_scores(filepath: Path) -> Dict[str, float]:
"""Load dimension scores from JSON file."""
try:
with open(filepath, 'r') as f:
scores = json.load(f)
# Validate scores
for dim, score in scores.items():
if not 1 <= score <= 5:
raise ValueError(f"Score for {dim} must be between 1 and 5, got {score}")
return scores
except FileNotFoundError:
print(f"Error: File not found: {filepath}")
sys.exit(1)
except json.JSONDecodeError:
print(f"Error: Invalid JSON in {filepath}")
sys.exit(1)
except ValueError as e:
print(f"Error: {e}")
sys.exit(1)
def load_weights(filepath: Optional[Path] = None) -> Dict[str, float]:
"""Load dimension weights from JSON file or return defaults."""
if filepath is None:
return DEFAULT_WEIGHTS
try:
with open(filepath, 'r') as f:
weights = json.load(f)
# Validate weights sum to 1.0
total = sum(weights.values())
if not 0.99 <= total <= 1.01: # Allow small floating point errors
raise ValueError(f"Weights must sum to 1.0, got {total}")
return weights
except FileNotFoundError:
print(f"Error: File not found: {filepath}")
sys.exit(1)
except json.JSONDecodeError:
print(f"Error: Invalid JSON in {filepath}")
sys.exit(1)
except ValueError as e:
print(f"Error: {e}")
sys.exit(1)
def calculate_weighted_average(scores: Dict[str, float], weights: Dict[str, float]) -> float:
"""Calculate weighted average score."""
total_score = 0.0
total_weight = 0.0
for dimension, score in scores.items():
# Handle dimension name variations (e.g., "problem_formulation" vs "problem-formulation")
dim_key = dimension.replace('-', '_').lower()
weight = weights.get(dim_key, 0.0)
total_score += score * weight
total_weight += weight
# Normalize if not all dimensions were scored
if total_weight > 0:
return total_score / total_weight * (sum(weights.values()) / total_weight)
return 0.0
def get_quality_level(score: float) -> tuple:
"""Get quality level description for a given score."""
for (low, high), (level, description) in QUALITY_LEVELS.items():
if low <= score <= high:
return level, description
return "Unknown", "Score out of expected range"
def generate_bar_chart(scores: Dict[str, float], max_width: int = 50) -> str:
"""Generate ASCII bar chart of dimension scores."""
lines = []
max_name_len = max(len(name) for name in scores.keys())
for dimension, score in sorted(scores.items(), key=lambda x: x[1], reverse=True):
bar_length = int((score / 5.0) * max_width)
bar = 'β' * bar_length
padding = ' ' * (max_name_len - len(dimension))
lines.append(f" {dimension}{padding} β {bar} {score:.2f}")
return '\n'.join(lines)
def identify_strengths_weaknesses(scores: Dict[str, float]) -> tuple:
"""Identify top strengths and areas for improvement."""
sorted_scores = sorted(scores.items(), key=lambda x: x[1], reverse=True)
strengths = [dim for dim, score in sorted_scores[:3] if score >= 4.0]
weaknesses = [dim for dim, score in sorted_scores[-3:] if score < 3.5]
return strengths, weaknesses
def generate_report(scores: Dict[str, float], weights: Dict[str, float],
output_file: Optional[Path] = None) -> str:
"""Generate comprehensive evaluation report."""
overall_score = calculate_weighted_average(scores, weights)
quality_level, quality_desc = get_quality_level(overall_score)
strengths, weaknesses = identify_strengths_weaknesses(scores)
report_lines = [
"="*70,
"SCHOLAREVAL SCORE REPORT",
"="*70,
"",
f"Overall Score: {overall_score:.2f} / 5.00",
f"Quality Level: {quality_level}",
f"Assessment: {quality_desc}",
"",
"="*70,
"DIMENSION SCORES",
"="*70,
"",
generate_bar_chart(scores),
"",
"="*70,
"DETAILED BREAKDOWN",
"="*70,
""
]
# Add detailed scores with weights
for dimension, score in sorted(scores.items()):
dim_key = dimension.replace('-', '_').lower()
weight = weights.get(dim_key, 0.0)
weighted_contribution = score * weight
percentage = weight * 100
report_lines.append(
f" {dimension:25s} {score:.2f}/5.00 "
f"(weight: {percentage:4.1f}%, contribution: {weighted_contribution:.3f})"
)
report_lines.extend([
"",
"="*70,
"ASSESSMENT SUMMARY",
"="*70,
""
])
if strengths:
report_lines.append("Top Strengths:")
for dim in strengths:
report_lines.append(f" β’ {dim}: {scores[dim]:.2f}/5.00")
report_lines.append("")
if weaknesses:
report_lines.append("Areas for Improvement:")
for dim in weaknesses:
report_lines.append(f" β’ {dim}: {scores[dim]:.2f}/5.00")
report_lines.append("")
# Add recommendations based on score
report_lines.extend([
"="*70,
"RECOMMENDATIONS",
"="*70,
""
])
if overall_score >= 4.5:
report_lines.append(" Excellent work! Ready for submission to top-tier venues.")
elif overall_score >= 4.0:
report_lines.append(" Strong work. Address minor issues identified in weaknesses.")
elif overall_score >= 3.5:
report_lines.append(" Good foundation. Focus on major revisions in weak dimensions.")
elif overall_score >= 3.0:
report_lines.append(" Significant revisions needed. Prioritize weakest dimensions.")
elif overall_score >= 2.0:
report_lines.append(" Major rework required. Consider restructuring approach.")
else:
report_lines.append(" Fundamental revision needed across multiple dimensions.")
report_lines.append("")
report_lines.append("="*70)
report = '\n'.join(report_lines)
# Write to file if specified
if output_file:
try:
with open(output_file, 'w') as f:
f.write(report)
print(f"\nReport saved to: {output_file}")
except IOError as e:
print(f"Error writing to {output_file}: {e}")
return report
def interactive_mode():
"""Run interactive score entry mode."""
print("ScholarEval Interactive Score Calculator")
print("="*50)
print("\nEnter scores for each dimension (1-5):")
print("(Press Enter to skip a dimension)\n")
scores = {}
dimensions = [
"problem_formulation",
"literature_review",
"methodology",
"data_collection",
"analysis",
"results",
"writing",
"citations"
]
for dim in dimensions:
while True:
dim_display = dim.replace('_', ' ').title()
user_input = input(f"{dim_display}: ").strip()
if not user_input:
break
try:
score = float(user_input)
if 1 <= score <= 5:
scores[dim] = score
break
else:
print(" Score must be between 1 and 5")
except ValueError:
print(" Invalid input. Please enter a number between 1 and 5")
if not scores:
print("\nNo scores entered. Exiting.")
return
print("\n" + "="*50)
print("SCORES ENTERED:")
for dim, score in scores.items():
print(f" {dim.replace('_', ' ').title()}: {score}")
print("\nCalculating overall assessment...\n")
report = generate_report(scores, DEFAULT_WEIGHTS)
print(report)
# Ask if user wants to save
save = input("\nSave report to file? (y/n): ").strip().lower()
if save == 'y':
filename = input("Enter filename [scholareval_report.txt]: ").strip()
if not filename:
filename = "scholareval_report.txt"
generate_report(scores, DEFAULT_WEIGHTS, Path(filename))
def main():
parser = argparse.ArgumentParser(
description="Calculate aggregate ScholarEval scores from dimension ratings",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Calculate from JSON file
python calculate_scores.py --scores my_scores.json
# Calculate with custom weights
python calculate_scores.py --scores my_scores.json --weights custom_weights.json
# Save report to file
python calculate_scores.py --scores my_scores.json --output report.txt
# Interactive mode
python calculate_scores.py --interactive
Score JSON Format:
{
"problem_formulation": 4.5,
"literature_review": 4.0,
"methodology": 3.5,
"data_collection": 4.0,
"analysis": 3.5,
"results": 4.0,
"writing": 4.5,
"citations": 4.0
}
Weights JSON Format:
{
"problem_formulation": 0.15,
"literature_review": 0.15,
"methodology": 0.20,
"data_collection": 0.10,
"analysis": 0.15,
"results": 0.10,
"writing": 0.10,
"citations": 0.05
}
"""
)
parser.add_argument('--scores', type=Path, help='Path to JSON file with dimension scores')
parser.add_argument('--weights', type=Path, help='Path to JSON file with dimension weights (optional)')
parser.add_argument('--output', type=Path, help='Path to output report file (optional)')
parser.add_argument('--interactive', '-i', action='store_true', help='Run in interactive mode')
args = parser.parse_args()
# Interactive mode
if args.interactive:
interactive_mode()
return
# File mode
if not args.scores:
parser.print_help()
print("\nError: --scores is required (or use --interactive)")
sys.exit(1)
scores = load_scores(args.scores)
weights = load_weights(args.weights)
report = generate_report(scores, weights, args.output)
# Print to stdout if no output file specified
if not args.output:
print(report)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scholar-evaluation/scripts/calculate_scores.py",
"license": "MIT License",
"lines": 306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-schematics/scripts/generate_schematic.py | #!/usr/bin/env python3
"""
Scientific schematic generation using Nano Banana Pro.
Generate any scientific diagram by describing it in natural language.
Nano Banana Pro handles everything automatically with smart iterative refinement.
Smart iteration: Only regenerates if quality is below threshold for your document type.
Quality review: Uses Gemini 3 Pro for professional scientific evaluation.
Usage:
# Generate for journal paper (highest quality threshold)
python generate_schematic.py "CONSORT flowchart" -o flowchart.png --doc-type journal
# Generate for presentation (lower threshold, faster)
python generate_schematic.py "Transformer architecture" -o transformer.png --doc-type presentation
# Generate for poster
python generate_schematic.py "MAPK signaling pathway" -o pathway.png --doc-type poster
"""
import argparse
import os
import subprocess
import sys
from pathlib import Path
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description="Generate scientific schematics using AI with smart iterative refinement",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
How it works:
Simply describe your diagram in natural language
Nano Banana Pro generates it automatically with:
- Smart iteration (only regenerates if quality is below threshold)
- Quality review by Gemini 3 Pro
- Document-type aware quality thresholds
- Publication-ready output
Document Types (quality thresholds):
journal 8.5/10 - Nature, Science, peer-reviewed journals
conference 8.0/10 - Conference papers
thesis 8.0/10 - Dissertations, theses
grant 8.0/10 - Grant proposals
preprint 7.5/10 - arXiv, bioRxiv, etc.
report 7.5/10 - Technical reports
poster 7.0/10 - Academic posters
presentation 6.5/10 - Slides, talks
default 7.5/10 - General purpose
Examples:
# Generate for journal paper (strict quality)
python generate_schematic.py "CONSORT participant flow" -o flowchart.png --doc-type journal
# Generate for poster (moderate quality)
python generate_schematic.py "Transformer architecture" -o arch.png --doc-type poster
# Generate for slides (faster, lower threshold)
python generate_schematic.py "System diagram" -o system.png --doc-type presentation
# Custom max iterations
python generate_schematic.py "Complex pathway" -o pathway.png --iterations 2
# Verbose output
python generate_schematic.py "Circuit diagram" -o circuit.png -v
Environment Variables:
OPENROUTER_API_KEY Required for AI generation
"""
)
parser.add_argument("prompt",
help="Description of the diagram to generate")
parser.add_argument("-o", "--output", required=True,
help="Output file path")
parser.add_argument("--doc-type", default="default",
choices=["journal", "conference", "poster", "presentation",
"report", "grant", "thesis", "preprint", "default"],
help="Document type for quality threshold (default: default)")
parser.add_argument("--iterations", type=int, default=2,
help="Maximum refinement iterations (default: 2, max: 2)")
parser.add_argument("--api-key",
help="OpenRouter API key (or use OPENROUTER_API_KEY env var)")
parser.add_argument("-v", "--verbose", action="store_true",
help="Verbose output")
args = parser.parse_args()
# Check for API key
api_key = args.api_key or os.getenv("OPENROUTER_API_KEY")
if not api_key:
print("Error: OPENROUTER_API_KEY environment variable not set")
print("\nFor AI generation, you need an OpenRouter API key.")
print("Get one at: https://openrouter.ai/keys")
print("\nSet it with:")
print(" export OPENROUTER_API_KEY='your_api_key'")
print("\nOr use --api-key flag")
sys.exit(1)
# Find AI generation script
script_dir = Path(__file__).parent
ai_script = script_dir / "generate_schematic_ai.py"
if not ai_script.exists():
print(f"Error: AI generation script not found: {ai_script}")
sys.exit(1)
# Build command
cmd = [sys.executable, str(ai_script), args.prompt, "-o", args.output]
if args.doc_type != "default":
cmd.extend(["--doc-type", args.doc_type])
# Enforce max 2 iterations
iterations = min(args.iterations, 2)
if iterations != 2:
cmd.extend(["--iterations", str(iterations)])
if api_key:
cmd.extend(["--api-key", api_key])
if args.verbose:
cmd.append("-v")
# Execute
try:
result = subprocess.run(cmd, check=False)
sys.exit(result.returncode)
except Exception as e:
print(f"Error executing AI generation: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-schematics/scripts/generate_schematic.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-schematics/scripts/generate_schematic_ai.py | #!/usr/bin/env python3
"""
AI-powered scientific schematic generation using Nano Banana Pro.
This script uses a smart iterative refinement approach:
1. Generate initial image with Nano Banana Pro
2. AI quality review using Gemini 3 Pro for scientific critique
3. Only regenerate if quality is below threshold for document type
4. Repeat until quality meets standards (max iterations)
Requirements:
- OPENROUTER_API_KEY environment variable
- requests library
Usage:
python generate_schematic_ai.py "Create a flowchart showing CONSORT participant flow" -o flowchart.png
python generate_schematic_ai.py "Neural network architecture diagram" -o architecture.png --iterations 2
python generate_schematic_ai.py "Simple block diagram" -o diagram.png --doc-type poster
"""
import argparse
import base64
import json
import os
import sys
import time
from pathlib import Path
from typing import Optional, Dict, Any, List, Tuple
try:
import requests
except ImportError:
print("Error: requests library not found. Install with: pip install requests")
sys.exit(1)
# Try to load .env file from multiple potential locations
def _load_env_file():
"""Load .env file from current directory, parent directories, or package directory.
Returns True if a .env file was found and loaded, False otherwise.
Note: This does NOT override existing environment variables.
"""
try:
from dotenv import load_dotenv
except ImportError:
return False # python-dotenv not installed
# Try current working directory first
env_path = Path.cwd() / ".env"
if env_path.exists():
load_dotenv(dotenv_path=env_path, override=False)
return True
# Try parent directories (up to 5 levels)
cwd = Path.cwd()
for _ in range(5):
env_path = cwd / ".env"
if env_path.exists():
load_dotenv(dotenv_path=env_path, override=False)
return True
cwd = cwd.parent
if cwd == cwd.parent: # Reached root
break
# Try the package's parent directory (scientific-writer project root)
script_dir = Path(__file__).resolve().parent
for _ in range(5):
env_path = script_dir / ".env"
if env_path.exists():
load_dotenv(dotenv_path=env_path, override=False)
return True
script_dir = script_dir.parent
if script_dir == script_dir.parent:
break
return False
class ScientificSchematicGenerator:
"""Generate scientific schematics using AI with smart iterative refinement.
Uses Gemini 3 Pro for quality review to determine if regeneration is needed.
Multiple passes only occur if the generated schematic doesn't meet the
quality threshold for the target document type.
"""
# Quality thresholds by document type (score out of 10)
# Higher thresholds for more formal publications
QUALITY_THRESHOLDS = {
"journal": 8.5, # Nature, Science, etc. - highest standards
"conference": 8.0, # Conference papers - high standards
"poster": 7.0, # Academic posters - good quality
"presentation": 6.5, # Slides/talks - clear but less formal
"report": 7.5, # Technical reports - professional
"grant": 8.0, # Grant proposals - must be compelling
"thesis": 8.0, # Dissertations - formal academic
"preprint": 7.5, # arXiv, etc. - good quality
"default": 7.5, # Default threshold
}
# Scientific diagram best practices prompt template
SCIENTIFIC_DIAGRAM_GUIDELINES = """
Create a high-quality scientific diagram with these requirements:
VISUAL QUALITY:
- Clean white or light background (no textures or gradients)
- High contrast for readability and printing
- Professional, publication-ready appearance
- Sharp, clear lines and text
- Adequate spacing between elements to prevent crowding
TYPOGRAPHY:
- Clear, readable sans-serif fonts (Arial, Helvetica style)
- Minimum 10pt font size for all labels
- Consistent font sizes throughout
- All text horizontal or clearly readable
- No overlapping text
SCIENTIFIC STANDARDS:
- Accurate representation of concepts
- Clear labels for all components
- Include scale bars, legends, or axes where appropriate
- Use standard scientific notation and symbols
- Include units where applicable
ACCESSIBILITY:
- Colorblind-friendly color palette (use Okabe-Ito colors if using color)
- High contrast between elements
- Redundant encoding (shapes + colors, not just colors)
- Works well in grayscale
LAYOUT:
- Logical flow (left-to-right or top-to-bottom)
- Clear visual hierarchy
- Balanced composition
- Appropriate use of whitespace
- No clutter or unnecessary decorative elements
"""
def __init__(self, api_key: Optional[str] = None, verbose: bool = False):
"""
Initialize the generator.
Args:
api_key: OpenRouter API key (or use OPENROUTER_API_KEY env var)
verbose: Print detailed progress information
"""
# Priority: 1) explicit api_key param, 2) environment variable, 3) .env file
self.api_key = api_key or os.getenv("OPENROUTER_API_KEY")
# If not found in environment, try loading from .env file
if not self.api_key:
_load_env_file()
self.api_key = os.getenv("OPENROUTER_API_KEY")
if not self.api_key:
raise ValueError(
"OPENROUTER_API_KEY not found. Please either:\n"
" 1. Set the OPENROUTER_API_KEY environment variable\n"
" 2. Add OPENROUTER_API_KEY to your .env file\n"
" 3. Pass api_key parameter to the constructor\n"
"Get your API key from: https://openrouter.ai/keys"
)
self.verbose = verbose
self._last_error = None # Track last error for better reporting
self.base_url = "https://openrouter.ai/api/v1"
# Nano Banana Pro - Google's advanced image generation model
# https://openrouter.ai/google/gemini-3-pro-image-preview
self.image_model = "google/gemini-3-pro-image-preview"
# Gemini 3 Pro for quality review - excellent vision and reasoning
self.review_model = "google/gemini-3-pro"
def _log(self, message: str):
"""Log message if verbose mode is enabled."""
if self.verbose:
print(f"[{time.strftime('%H:%M:%S')}] {message}")
def _make_request(self, model: str, messages: List[Dict[str, Any]],
modalities: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Make a request to OpenRouter API.
Args:
model: Model identifier
messages: List of message dictionaries
modalities: Optional list of modalities (e.g., ["image", "text"])
Returns:
API response as dictionary
"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://github.com/scientific-writer",
"X-Title": "Scientific Schematic Generator"
}
payload = {
"model": model,
"messages": messages
}
if modalities:
payload["modalities"] = modalities
self._log(f"Making request to {model}...")
try:
response = requests.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=payload,
timeout=120
)
# Try to get response body even on error
try:
response_json = response.json()
except json.JSONDecodeError:
response_json = {"raw_text": response.text[:500]}
# Check for HTTP errors but include response body in error message
if response.status_code != 200:
error_detail = response_json.get("error", response_json)
self._log(f"HTTP {response.status_code}: {error_detail}")
raise RuntimeError(f"API request failed (HTTP {response.status_code}): {error_detail}")
return response_json
except requests.exceptions.Timeout:
raise RuntimeError("API request timed out after 120 seconds")
except requests.exceptions.RequestException as e:
raise RuntimeError(f"API request failed: {str(e)}")
def _extract_image_from_response(self, response: Dict[str, Any]) -> Optional[bytes]:
"""
Extract base64-encoded image from API response.
For Nano Banana Pro, images are returned in the 'images' field of the message,
not in the 'content' field.
Args:
response: API response dictionary
Returns:
Image bytes or None if not found
"""
try:
choices = response.get("choices", [])
if not choices:
self._log("No choices in response")
return None
message = choices[0].get("message", {})
# IMPORTANT: Nano Banana Pro returns images in the 'images' field
images = message.get("images", [])
if images and len(images) > 0:
self._log(f"Found {len(images)} image(s) in 'images' field")
# Get first image
first_image = images[0]
if isinstance(first_image, dict):
# Extract image_url
if first_image.get("type") == "image_url":
url = first_image.get("image_url", {})
if isinstance(url, dict):
url = url.get("url", "")
if url and url.startswith("data:image"):
# Extract base64 data after comma
if "," in url:
base64_str = url.split(",", 1)[1]
# Clean whitespace
base64_str = base64_str.replace('\n', '').replace('\r', '').replace(' ', '')
self._log(f"Extracted base64 data (length: {len(base64_str)})")
return base64.b64decode(base64_str)
# Fallback: check content field (for other models or future changes)
content = message.get("content", "")
if self.verbose:
self._log(f"Content type: {type(content)}, length: {len(str(content))}")
# Handle string content
if isinstance(content, str) and "data:image" in content:
import re
match = re.search(r'data:image/[^;]+;base64,([A-Za-z0-9+/=\n\r]+)', content, re.DOTALL)
if match:
base64_str = match.group(1).replace('\n', '').replace('\r', '').replace(' ', '')
self._log(f"Found image in content field (length: {len(base64_str)})")
return base64.b64decode(base64_str)
# Handle list content
if isinstance(content, list):
for i, block in enumerate(content):
if isinstance(block, dict) and block.get("type") == "image_url":
url = block.get("image_url", {})
if isinstance(url, dict):
url = url.get("url", "")
if url and url.startswith("data:image") and "," in url:
base64_str = url.split(",", 1)[1].replace('\n', '').replace('\r', '').replace(' ', '')
self._log(f"Found image in content block {i}")
return base64.b64decode(base64_str)
self._log("No image data found in response")
return None
except Exception as e:
self._log(f"Error extracting image: {str(e)}")
import traceback
if self.verbose:
traceback.print_exc()
return None
def _image_to_base64(self, image_path: str) -> str:
"""
Convert image file to base64 data URL.
Args:
image_path: Path to image file
Returns:
Base64 data URL string
"""
with open(image_path, "rb") as f:
image_data = f.read()
# Determine image type from extension
ext = Path(image_path).suffix.lower()
mime_type = {
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".webp": "image/webp"
}.get(ext, "image/png")
base64_data = base64.b64encode(image_data).decode("utf-8")
return f"data:{mime_type};base64,{base64_data}"
def generate_image(self, prompt: str) -> Optional[bytes]:
"""
Generate an image using Nano Banana Pro.
Args:
prompt: Description of the diagram to generate
Returns:
Image bytes or None if generation failed
"""
self._last_error = None # Reset error
messages = [
{
"role": "user",
"content": prompt
}
]
try:
response = self._make_request(
model=self.image_model,
messages=messages,
modalities=["image", "text"]
)
# Debug: print response structure if verbose
if self.verbose:
self._log(f"Response keys: {response.keys()}")
if "error" in response:
self._log(f"API Error: {response['error']}")
if "choices" in response and response["choices"]:
msg = response["choices"][0].get("message", {})
self._log(f"Message keys: {msg.keys()}")
# Show content preview without printing huge base64 data
content = msg.get("content", "")
if isinstance(content, str):
preview = content[:200] + "..." if len(content) > 200 else content
self._log(f"Content preview: {preview}")
elif isinstance(content, list):
self._log(f"Content is list with {len(content)} items")
for i, item in enumerate(content[:3]):
if isinstance(item, dict):
self._log(f" Item {i}: type={item.get('type')}")
# Check for API errors in response
if "error" in response:
error_msg = response["error"]
if isinstance(error_msg, dict):
error_msg = error_msg.get("message", str(error_msg))
self._last_error = f"API Error: {error_msg}"
print(f"β {self._last_error}")
return None
image_data = self._extract_image_from_response(response)
if image_data:
self._log(f"β Generated image ({len(image_data)} bytes)")
else:
self._last_error = "No image data in API response - model may not support image generation"
self._log(f"β {self._last_error}")
# Additional debug info when image extraction fails
if self.verbose and "choices" in response:
msg = response["choices"][0].get("message", {})
self._log(f"Full message structure: {json.dumps({k: type(v).__name__ for k, v in msg.items()})}")
return image_data
except RuntimeError as e:
self._last_error = str(e)
self._log(f"β Generation failed: {self._last_error}")
return None
except Exception as e:
self._last_error = f"Unexpected error: {str(e)}"
self._log(f"β Generation failed: {self._last_error}")
import traceback
if self.verbose:
traceback.print_exc()
return None
def review_image(self, image_path: str, original_prompt: str,
iteration: int, doc_type: str = "default",
max_iterations: int = 2) -> Tuple[str, float, bool]:
"""
Review generated image using Gemini 3 Pro for quality analysis.
Uses Gemini 3 Pro's superior vision and reasoning capabilities to
evaluate the schematic quality and determine if regeneration is needed.
Args:
image_path: Path to the generated image
original_prompt: Original user prompt
iteration: Current iteration number
doc_type: Document type (journal, poster, presentation, etc.)
max_iterations: Maximum iterations allowed
Returns:
Tuple of (critique text, quality score 0-10, needs_improvement bool)
"""
# Use Gemini 3 Pro for review - excellent vision and analysis
image_data_url = self._image_to_base64(image_path)
# Get quality threshold for this document type
threshold = self.QUALITY_THRESHOLDS.get(doc_type.lower(),
self.QUALITY_THRESHOLDS["default"])
review_prompt = f"""You are an expert reviewer evaluating a scientific diagram for publication quality.
ORIGINAL REQUEST: {original_prompt}
DOCUMENT TYPE: {doc_type} (quality threshold: {threshold}/10)
ITERATION: {iteration}/{max_iterations}
Carefully evaluate this diagram on these criteria:
1. **Scientific Accuracy** (0-2 points)
- Correct representation of concepts
- Proper notation and symbols
- Accurate relationships shown
2. **Clarity and Readability** (0-2 points)
- Easy to understand at a glance
- Clear visual hierarchy
- No ambiguous elements
3. **Label Quality** (0-2 points)
- All important elements labeled
- Labels are readable (appropriate font size)
- Consistent labeling style
4. **Layout and Composition** (0-2 points)
- Logical flow (top-to-bottom or left-to-right)
- Balanced use of space
- No overlapping elements
5. **Professional Appearance** (0-2 points)
- Publication-ready quality
- Clean, crisp lines and shapes
- Appropriate colors/contrast
RESPOND IN THIS EXACT FORMAT:
SCORE: [total score 0-10]
STRENGTHS:
- [strength 1]
- [strength 2]
ISSUES:
- [issue 1 if any]
- [issue 2 if any]
VERDICT: [ACCEPTABLE or NEEDS_IMPROVEMENT]
If score >= {threshold}, the diagram is ACCEPTABLE for {doc_type} publication.
If score < {threshold}, mark as NEEDS_IMPROVEMENT with specific suggestions."""
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": review_prompt
},
{
"type": "image_url",
"image_url": {
"url": image_data_url
}
}
]
}
]
try:
# Use Gemini 3 Pro for high-quality review
response = self._make_request(
model=self.review_model,
messages=messages
)
# Extract text response
choices = response.get("choices", [])
if not choices:
return "Image generated successfully", 8.0
message = choices[0].get("message", {})
content = message.get("content", "")
# Check reasoning field (Nano Banana Pro puts analysis here)
reasoning = message.get("reasoning", "")
if reasoning and not content:
content = reasoning
if isinstance(content, list):
# Extract text from content blocks
text_parts = []
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
text_parts.append(block.get("text", ""))
content = "\n".join(text_parts)
# Try to extract score
score = 7.5 # Default score if extraction fails
import re
# Look for SCORE: X or SCORE: X/10 format
score_match = re.search(r'SCORE:\s*(\d+(?:\.\d+)?)', content, re.IGNORECASE)
if score_match:
score = float(score_match.group(1))
else:
# Fallback: look for any score pattern
score_match = re.search(r'(?:score|rating|quality)[:\s]+(\d+(?:\.\d+)?)\s*(?:/\s*10)?', content, re.IGNORECASE)
if score_match:
score = float(score_match.group(1))
# Determine if improvement is needed based on verdict or score
needs_improvement = False
if "NEEDS_IMPROVEMENT" in content.upper():
needs_improvement = True
elif score < threshold:
needs_improvement = True
self._log(f"β Review complete (Score: {score}/10, Threshold: {threshold}/10)")
self._log(f" Verdict: {'Needs improvement' if needs_improvement else 'Acceptable'}")
return (content if content else "Image generated successfully",
score,
needs_improvement)
except Exception as e:
self._log(f"Review skipped: {str(e)}")
# Don't fail the whole process if review fails - assume acceptable
return "Image generated successfully (review skipped)", 7.5, False
def improve_prompt(self, original_prompt: str, critique: str,
iteration: int) -> str:
"""
Improve the generation prompt based on critique.
Args:
original_prompt: Original user prompt
critique: Review critique from previous iteration
iteration: Current iteration number
Returns:
Improved prompt for next generation
"""
improved_prompt = f"""{self.SCIENTIFIC_DIAGRAM_GUIDELINES}
USER REQUEST: {original_prompt}
ITERATION {iteration}: Based on previous feedback, address these specific improvements:
{critique}
Generate an improved version that addresses all the critique points while maintaining scientific accuracy and professional quality."""
return improved_prompt
def generate_iterative(self, user_prompt: str, output_path: str,
iterations: int = 2,
doc_type: str = "default") -> Dict[str, Any]:
"""
Generate scientific schematic with smart iterative refinement.
Only regenerates if the quality score is below the threshold for the
specified document type. This saves API calls and time when the first
generation is already good enough.
Args:
user_prompt: User's description of desired diagram
output_path: Path to save final image
iterations: Maximum refinement iterations (default: 2, max: 2)
doc_type: Document type for quality threshold (journal, poster, etc.)
Returns:
Dictionary with generation results and metadata
"""
output_path = Path(output_path)
output_dir = output_path.parent
output_dir.mkdir(parents=True, exist_ok=True)
base_name = output_path.stem
extension = output_path.suffix or ".png"
# Get quality threshold for this document type
threshold = self.QUALITY_THRESHOLDS.get(doc_type.lower(),
self.QUALITY_THRESHOLDS["default"])
results = {
"user_prompt": user_prompt,
"doc_type": doc_type,
"quality_threshold": threshold,
"iterations": [],
"final_image": None,
"final_score": 0.0,
"success": False,
"early_stop": False,
"early_stop_reason": None
}
current_prompt = f"""{self.SCIENTIFIC_DIAGRAM_GUIDELINES}
USER REQUEST: {user_prompt}
Generate a publication-quality scientific diagram that meets all the guidelines above."""
print(f"\n{'='*60}")
print(f"Generating Scientific Schematic")
print(f"{'='*60}")
print(f"Description: {user_prompt}")
print(f"Document Type: {doc_type}")
print(f"Quality Threshold: {threshold}/10")
print(f"Max Iterations: {iterations}")
print(f"Output: {output_path}")
print(f"{'='*60}\n")
for i in range(1, iterations + 1):
print(f"\n[Iteration {i}/{iterations}]")
print("-" * 40)
# Generate image
print(f"Generating image...")
image_data = self.generate_image(current_prompt)
if not image_data:
error_msg = getattr(self, '_last_error', 'Image generation failed - no image data returned')
print(f"β Generation failed: {error_msg}")
results["iterations"].append({
"iteration": i,
"success": False,
"error": error_msg
})
continue
# Save iteration image
iter_path = output_dir / f"{base_name}_v{i}{extension}"
with open(iter_path, "wb") as f:
f.write(image_data)
print(f"β Saved: {iter_path}")
# Review image using Gemini 3 Pro
print(f"Reviewing image with Gemini 3 Pro...")
critique, score, needs_improvement = self.review_image(
str(iter_path), user_prompt, i, doc_type, iterations
)
print(f"β Score: {score}/10 (threshold: {threshold}/10)")
# Save iteration results
iteration_result = {
"iteration": i,
"image_path": str(iter_path),
"prompt": current_prompt,
"critique": critique,
"score": score,
"needs_improvement": needs_improvement,
"success": True
}
results["iterations"].append(iteration_result)
# Check if quality is acceptable - STOP EARLY if so
if not needs_improvement:
print(f"\nβ Quality meets {doc_type} threshold ({score} >= {threshold})")
print(f" No further iterations needed!")
results["final_image"] = str(iter_path)
results["final_score"] = score
results["success"] = True
results["early_stop"] = True
results["early_stop_reason"] = f"Quality score {score} meets threshold {threshold} for {doc_type}"
break
# If this is the last iteration, we're done regardless
if i == iterations:
print(f"\nβ Maximum iterations reached")
results["final_image"] = str(iter_path)
results["final_score"] = score
results["success"] = True
break
# Quality below threshold - improve prompt for next iteration
print(f"\nβ Quality below threshold ({score} < {threshold})")
print(f"Improving prompt based on feedback...")
current_prompt = self.improve_prompt(user_prompt, critique, i + 1)
# Copy final version to output path
if results["success"] and results["final_image"]:
final_iter_path = Path(results["final_image"])
if final_iter_path != output_path:
import shutil
shutil.copy(final_iter_path, output_path)
print(f"\nβ Final image: {output_path}")
# Save review log
log_path = output_dir / f"{base_name}_review_log.json"
with open(log_path, "w") as f:
json.dump(results, f, indent=2)
print(f"β Review log: {log_path}")
print(f"\n{'='*60}")
print(f"Generation Complete!")
print(f"Final Score: {results['final_score']}/10")
if results["early_stop"]:
print(f"Iterations Used: {len([r for r in results['iterations'] if r.get('success')])}/{iterations} (early stop)")
print(f"{'='*60}\n")
return results
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description="Generate scientific schematics using AI with smart iterative refinement",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Generate a flowchart for a journal paper
python generate_schematic_ai.py "CONSORT participant flow diagram" -o flowchart.png --doc-type journal
# Generate neural network architecture for presentation (lower threshold)
python generate_schematic_ai.py "Transformer encoder-decoder architecture" -o transformer.png --doc-type presentation
# Generate with custom max iterations for poster
python generate_schematic_ai.py "Biological signaling pathway" -o pathway.png --iterations 2 --doc-type poster
# Verbose output
python generate_schematic_ai.py "Circuit diagram" -o circuit.png -v
Document Types (quality thresholds):
journal 8.5/10 - Nature, Science, peer-reviewed journals
conference 8.0/10 - Conference papers
thesis 8.0/10 - Dissertations, theses
grant 8.0/10 - Grant proposals
preprint 7.5/10 - arXiv, bioRxiv, etc.
report 7.5/10 - Technical reports
poster 7.0/10 - Academic posters
presentation 6.5/10 - Slides, talks
default 7.5/10 - General purpose
Note: Multiple iterations only occur if quality is BELOW the threshold.
If the first generation meets the threshold, no extra API calls are made.
Environment:
OPENROUTER_API_KEY OpenRouter API key (required)
"""
)
parser.add_argument("prompt", help="Description of the diagram to generate")
parser.add_argument("-o", "--output", required=True,
help="Output image path (e.g., diagram.png)")
parser.add_argument("--iterations", type=int, default=2,
help="Maximum refinement iterations (default: 2, max: 2)")
parser.add_argument("--doc-type", default="default",
choices=["journal", "conference", "poster", "presentation",
"report", "grant", "thesis", "preprint", "default"],
help="Document type for quality threshold (default: default)")
parser.add_argument("--api-key", help="OpenRouter API key (or set OPENROUTER_API_KEY)")
parser.add_argument("-v", "--verbose", action="store_true",
help="Verbose output")
args = parser.parse_args()
# Check for API key
api_key = args.api_key or os.getenv("OPENROUTER_API_KEY")
if not api_key:
print("Error: OPENROUTER_API_KEY environment variable not set")
print("\nSet it with:")
print(" export OPENROUTER_API_KEY='your_api_key'")
print("\nOr provide via --api-key flag")
sys.exit(1)
# Validate iterations - enforce max of 2
if args.iterations < 1 or args.iterations > 2:
print("Error: Iterations must be between 1 and 2")
sys.exit(1)
try:
generator = ScientificSchematicGenerator(api_key=api_key, verbose=args.verbose)
results = generator.generate_iterative(
user_prompt=args.prompt,
output_path=args.output,
iterations=args.iterations,
doc_type=args.doc_type
)
if results["success"]:
print(f"\nβ Success! Image saved to: {args.output}")
if results.get("early_stop"):
print(f" (Completed in {len([r for r in results['iterations'] if r.get('success')])} iteration(s) - quality threshold met)")
sys.exit(0)
else:
print(f"\nβ Generation failed. Check review log for details.")
sys.exit(1)
except Exception as e:
print(f"\nβ Error: {str(e)}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-schematics/scripts/generate_schematic_ai.py",
"license": "MIT License",
"lines": 695,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-schematics/test_ai_generation.py | #!/usr/bin/env python3
"""
Test script to verify AI generation implementation.
This script performs dry-run tests without making actual API calls.
It verifies:
1. Script structure and imports
2. Class initialization
3. Method signatures
4. Error handling
5. Command-line interface
Usage:
python test_ai_generation.py
"""
import sys
import os
from pathlib import Path
# Add scripts directory to path
scripts_dir = Path(__file__).parent / "scripts"
sys.path.insert(0, str(scripts_dir))
def test_imports():
"""Test that all required modules can be imported."""
print("Testing imports...")
try:
from generate_schematic_ai import ScientificSchematicGenerator
print("β generate_schematic_ai imports successfully")
return True
except ImportError as e:
print(f"β Import failed: {e}")
return False
def test_class_structure():
"""Test class initialization and structure."""
print("\nTesting class structure...")
try:
from generate_schematic_ai import ScientificSchematicGenerator
# Test initialization with dummy key
generator = ScientificSchematicGenerator(api_key="test_key", verbose=False)
print("β Class initializes successfully")
# Check required methods exist
required_methods = [
'generate_image',
'review_image',
'improve_prompt',
'generate_iterative'
]
for method in required_methods:
if not hasattr(generator, method):
print(f"β Missing method: {method}")
return False
print(f"β Method exists: {method}")
# Check attributes
if not hasattr(generator, 'api_key'):
print("β Missing attribute: api_key")
return False
print("β Attribute exists: api_key")
if not hasattr(generator, 'image_model'):
print("β Missing attribute: image_model")
return False
print(f"β Image model: {generator.image_model}")
if not hasattr(generator, 'review_model'):
print("β Missing attribute: review_model")
return False
print(f"β Review model: {generator.review_model}")
return True
except Exception as e:
print(f"β Class structure test failed: {e}")
return False
def test_error_handling():
"""Test error handling for missing API key."""
print("\nTesting error handling...")
try:
from generate_schematic_ai import ScientificSchematicGenerator
# Clear environment variable
old_key = os.environ.get("OPENROUTER_API_KEY")
if old_key:
del os.environ["OPENROUTER_API_KEY"]
# Try to initialize without key
try:
generator = ScientificSchematicGenerator()
print("β Should have raised ValueError for missing API key")
return False
except ValueError as e:
if "OPENROUTER_API_KEY" in str(e):
print("β Correctly raises ValueError for missing API key")
else:
print(f"β Wrong error message: {e}")
return False
# Restore environment variable
if old_key:
os.environ["OPENROUTER_API_KEY"] = old_key
return True
except Exception as e:
print(f"β Error handling test failed: {e}")
return False
def test_wrapper_script():
"""Test wrapper script structure."""
print("\nTesting wrapper script...")
try:
import generate_schematic
print("β generate_schematic imports successfully")
# Check main functions exist
if not hasattr(generate_schematic, 'main'):
print("β Missing function: main")
return False
print("β Function exists: main")
return True
except Exception as e:
print(f"β Wrapper script test failed: {e}")
return False
def test_prompt_engineering():
"""Test prompt construction."""
print("\nTesting prompt engineering...")
try:
from generate_schematic_ai import ScientificSchematicGenerator
generator = ScientificSchematicGenerator(api_key="test_key", verbose=False)
# Test improve_prompt method
original = "Create a flowchart"
critique = "Add more spacing between boxes"
improved = generator.improve_prompt(original, critique, 2)
if not improved:
print("β improve_prompt returned empty string")
return False
if original not in improved:
print("β Improved prompt doesn't include original")
return False
if critique not in improved:
print("β Improved prompt doesn't include critique")
return False
if "ITERATION 2" not in improved:
print("β Improved prompt doesn't include iteration number")
return False
print("β Prompt engineering works correctly")
print(f" Original length: {len(original)} chars")
print(f" Improved length: {len(improved)} chars")
return True
except Exception as e:
print(f"β Prompt engineering test failed: {e}")
return False
def test_file_paths():
"""Test that all required files exist."""
print("\nTesting file structure...")
base_dir = Path(__file__).parent
required_files = [
"scripts/generate_schematic_ai.py",
"scripts/generate_schematic.py",
"SKILL.md",
"README.md"
]
all_exist = True
for file_path in required_files:
full_path = base_dir / file_path
if full_path.exists():
print(f"β {file_path}")
else:
print(f"β Missing: {file_path}")
all_exist = False
return all_exist
def main():
"""Run all tests."""
print("="*60)
print("Scientific Schematics AI Generation - Verification Tests")
print("="*60)
tests = [
("File Structure", test_file_paths),
("Imports", test_imports),
("Class Structure", test_class_structure),
("Error Handling", test_error_handling),
("Wrapper Script", test_wrapper_script),
("Prompt Engineering", test_prompt_engineering),
]
results = []
for test_name, test_func in tests:
try:
result = test_func()
results.append((test_name, result))
except Exception as e:
print(f"\nβ Test '{test_name}' crashed: {e}")
results.append((test_name, False))
# Summary
print("\n" + "="*60)
print("Test Summary")
print("="*60)
passed = sum(1 for _, result in results if result)
total = len(results)
for test_name, result in results:
status = "β PASS" if result else "β FAIL"
print(f"{status}: {test_name}")
print(f"\nTotal: {passed}/{total} tests passed")
if passed == total:
print("\nβ All tests passed! Implementation verified.")
print("\nNext steps:")
print("1. Set OPENROUTER_API_KEY environment variable")
print("2. Test with actual API call:")
print(" python scripts/generate_schematic.py 'test diagram' -o test.png")
return 0
else:
print(f"\nβ {total - passed} test(s) failed. Please review errors above.")
return 1
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-schematics/test_ai_generation.py",
"license": "MIT License",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-slides/scripts/generate_slide_image.py | #!/usr/bin/env python3
"""
Slide image generation using Nano Banana Pro.
Generate presentation slides or visuals by describing them in natural language.
Nano Banana Pro handles everything automatically with smart iterative refinement.
Two modes:
- Default (full slide): Generate complete slides with title, content, visuals (for PDF workflow)
- Visual only: Generate just images/figures to place on slides (for PPT workflow)
Supports attaching reference images for context (Nano Banana Pro will see these).
Usage:
# Generate full slide for PDF workflow
python generate_slide_image.py "Title: Introduction\\nKey points: AI, ML, Deep Learning" -o slide_01.png
# Generate visual only for PPT workflow
python generate_slide_image.py "Neural network diagram" -o figure.png --visual-only
# With reference images attached
python generate_slide_image.py "Create a slide about this data" -o slide.png --attach chart.png
"""
import argparse
import os
import subprocess
import sys
from pathlib import Path
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description="Generate presentation slides or visuals using Nano Banana Pro AI",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
How it works:
Describe your slide or visual in natural language.
Nano Banana Pro generates it automatically with:
- Smart iteration (only regenerates if quality is below threshold)
- Quality review by Gemini 3 Pro
- Publication-ready output
Modes:
Default (full slide): Generate complete slide with title, content, visuals
Use for PDF workflow where each slide is an image
Visual only: Generate just the image/figure
Use for PPT workflow where you add text separately
Attachments:
Use --attach to provide reference images that Nano Banana Pro will see.
This allows you to say "create a slide about this chart" and attach the chart.
Examples:
# Full slide (default) - for PDF workflow
python generate_slide_image.py "Title: Machine Learning\\nPoints: supervised, unsupervised, reinforcement" -o slide_01.png
# Visual only - for PPT workflow
python generate_slide_image.py "Flowchart showing data pipeline" -o figure.png --visual-only
# With reference images attached
python generate_slide_image.py "Create a slide explaining this chart" -o slide.png --attach chart.png
python generate_slide_image.py "Combine these into a comparison" -o compare.png --attach before.png --attach after.png
# Multiple slides for PDF
python generate_slide_image.py "Title slide: AI Conference 2025" -o slides/01_title.png
python generate_slide_image.py "Title: Introduction\\nOverview of deep learning" -o slides/02_intro.png
Environment Variables:
OPENROUTER_API_KEY Required for AI generation
"""
)
parser.add_argument("prompt", help="Description of the slide or visual to generate")
parser.add_argument("-o", "--output", required=True, help="Output file path")
parser.add_argument("--attach", action="append", dest="attachments", metavar="IMAGE",
help="Attach image file(s) as context (can use multiple times)")
parser.add_argument("--visual-only", action="store_true",
help="Generate just the visual/figure (for PPT workflow)")
parser.add_argument("--iterations", type=int, default=2,
help="Maximum refinement iterations (default: 2, max: 2)")
parser.add_argument("--api-key", help="OpenRouter API key (or use OPENROUTER_API_KEY env var)")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
args = parser.parse_args()
# Check for API key
api_key = args.api_key or os.getenv("OPENROUTER_API_KEY")
if not api_key:
print("Error: OPENROUTER_API_KEY environment variable not set")
print("\nFor AI generation, you need an OpenRouter API key.")
print("Get one at: https://openrouter.ai/keys")
print("\nSet it with:")
print(" export OPENROUTER_API_KEY='your_api_key'")
print("\nOr use --api-key flag")
sys.exit(1)
# Find AI generation script
script_dir = Path(__file__).parent
ai_script = script_dir / "generate_slide_image_ai.py"
if not ai_script.exists():
print(f"Error: AI generation script not found: {ai_script}")
sys.exit(1)
# Build command
cmd = [sys.executable, str(ai_script), args.prompt, "-o", args.output]
# Add attachments
if args.attachments:
for att in args.attachments:
cmd.extend(["--attach", att])
if args.visual_only:
cmd.append("--visual-only")
# Enforce max 2 iterations
iterations = min(args.iterations, 2)
if iterations != 2:
cmd.extend(["--iterations", str(iterations)])
if api_key:
cmd.extend(["--api-key", api_key])
if args.verbose:
cmd.append("-v")
# Execute
try:
result = subprocess.run(cmd, check=False)
sys.exit(result.returncode)
except Exception as e:
print(f"Error executing AI generation: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-slides/scripts/generate_slide_image.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-slides/scripts/generate_slide_image_ai.py | #!/usr/bin/env python3
"""
AI-powered slide image generation using Nano Banana Pro.
This script generates presentation slides or slide visuals using AI:
- full_slide mode: Generate complete slides with title, content, and visuals (for PDF workflow)
- visual_only mode: Generate just images/figures to place on slides (for PPT workflow)
Supports attaching reference images for context (e.g., "create a slide about this chart").
Uses smart iterative refinement:
1. Generate initial image with Nano Banana Pro
2. Quality review using Gemini 3 Pro
3. Only regenerate if quality is below threshold
4. Repeat until quality meets standards (max iterations)
Requirements:
- OPENROUTER_API_KEY environment variable
- requests library
Usage:
# Full slide for PDF workflow
python generate_slide_image_ai.py "Title: Introduction to ML\nKey points: supervised learning, neural networks" -o slide_01.png
# Visual only for PPT workflow
python generate_slide_image_ai.py "Neural network architecture diagram" -o figure.png --visual-only
# With reference images attached
python generate_slide_image_ai.py "Create a slide explaining this chart" -o slide.png --attach chart.png --attach logo.png
"""
import argparse
import base64
import json
import os
import sys
import time
from pathlib import Path
from typing import Optional, Dict, Any, List, Tuple
try:
import requests
except ImportError:
print("Error: requests library not found. Install with: pip install requests")
sys.exit(1)
def _load_env_file():
"""Load .env file from current directory, parent directories, or package directory."""
try:
from dotenv import load_dotenv
except ImportError:
return False
# Try current working directory first
env_path = Path.cwd() / ".env"
if env_path.exists():
load_dotenv(dotenv_path=env_path, override=False)
return True
# Try parent directories (up to 5 levels)
cwd = Path.cwd()
for _ in range(5):
env_path = cwd / ".env"
if env_path.exists():
load_dotenv(dotenv_path=env_path, override=False)
return True
cwd = cwd.parent
if cwd == cwd.parent:
break
# Try the package's parent directory
script_dir = Path(__file__).resolve().parent
for _ in range(5):
env_path = script_dir / ".env"
if env_path.exists():
load_dotenv(dotenv_path=env_path, override=False)
return True
script_dir = script_dir.parent
if script_dir == script_dir.parent:
break
return False
class SlideImageGenerator:
"""Generate presentation slides or visuals using AI with iterative refinement.
Two modes:
- full_slide: Generate complete slide with title, content, visuals (for PDF workflow)
- visual_only: Generate just the image/figure for a slide (for PPT workflow)
"""
# Quality threshold for presentations (lower than journal/conference papers)
QUALITY_THRESHOLD = 6.5
# Guidelines for generating full slides (complete slide images)
FULL_SLIDE_GUIDELINES = """
Create a professional presentation slide image with these requirements:
SLIDE LAYOUT (16:9 aspect ratio):
- Clean, modern slide design
- Clear visual hierarchy: title at top, content below
- Generous margins (at least 5% on all sides)
- Balanced composition with intentional white space
TYPOGRAPHY:
- LARGE, bold title text (easily readable from distance)
- Clear, sans-serif fonts throughout
- High contrast text (dark on light or light on dark)
- Bullet points or key phrases, NOT paragraphs
- Maximum 5-6 lines of text content
- Default author/presenter: "K-Dense" (use this unless another name is specified)
VISUAL ELEMENTS:
- Use GENERIC, simple images and icons - avoid overly specific or detailed imagery
- MINIMAL extra elements - no decorative borders, shadows, or flourishes
- Visuals should support and enhance the message, not distract
- Professional, clean aesthetic with restraint
- Consistent color scheme (2-3 main colors only)
- Prefer abstract/conceptual visuals over literal representations
PROFESSIONAL MINIMALISM:
- Less is more: favor empty space over additional elements
- No unnecessary decorations, gradients, or visual noise
- Clean lines and simple shapes
- Focused content without visual clutter
- Corporate/academic level of professionalism
PRESENTATION QUALITY:
- Designed for projection (high contrast)
- Bold, impactful design that commands attention
- Professional and polished appearance
- No cluttered or busy layouts
- Consistent styling throughout the deck
"""
# Guidelines for generating slide visuals only (figures/images for PPT)
VISUAL_ONLY_GUIDELINES = """
Create a high-quality visual/figure for a presentation slide:
IMAGE QUALITY:
- Clean, professional appearance
- High resolution and sharp details
- Suitable for embedding in a slide
DESIGN:
- Simple, clear composition with MINIMAL elements
- High contrast for projection readability
- No text unless essential to the visual
- Transparent or white background preferred
- GENERIC imagery - avoid overly specific or detailed visuals
PROFESSIONAL MINIMALISM:
- Favor simplicity over complexity
- No decorative elements, shadows, or flourishes
- Clean lines and simple shapes only
- Remove any unnecessary visual noise
- Abstract/conceptual rather than literal representations
STYLE:
- Modern, professional aesthetic
- Colorblind-friendly colors
- Bold but restrained imagery
- Suitable for scientific/professional presentations
- Corporate/academic level of polish
"""
def __init__(self, api_key: Optional[str] = None, verbose: bool = False):
"""
Initialize the generator.
Args:
api_key: OpenRouter API key (or use OPENROUTER_API_KEY env var)
verbose: Print detailed progress information
"""
self.api_key = api_key or os.getenv("OPENROUTER_API_KEY")
if not self.api_key:
_load_env_file()
self.api_key = os.getenv("OPENROUTER_API_KEY")
if not self.api_key:
raise ValueError(
"OPENROUTER_API_KEY not found. Please either:\n"
" 1. Set the OPENROUTER_API_KEY environment variable\n"
" 2. Add OPENROUTER_API_KEY to your .env file\n"
" 3. Pass api_key parameter to the constructor\n"
"Get your API key from: https://openrouter.ai/keys"
)
self.verbose = verbose
self._last_error = None
self.base_url = "https://openrouter.ai/api/v1"
# Nano Banana Pro for image generation
self.image_model = "google/gemini-3-pro-image-preview"
# Gemini 3 Pro for quality review
self.review_model = "google/gemini-3-pro"
def _log(self, message: str):
"""Log message if verbose mode is enabled."""
if self.verbose:
print(f"[{time.strftime('%H:%M:%S')}] {message}")
def _make_request(self, model: str, messages: List[Dict[str, Any]],
modalities: Optional[List[str]] = None) -> Dict[str, Any]:
"""Make a request to OpenRouter API."""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://github.com/scientific-writer",
"X-Title": "Scientific Slide Generator"
}
payload = {
"model": model,
"messages": messages
}
if modalities:
payload["modalities"] = modalities
self._log(f"Making request to {model}...")
try:
response = requests.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=payload,
timeout=120
)
try:
response_json = response.json()
except json.JSONDecodeError:
response_json = {"raw_text": response.text[:500]}
if response.status_code != 200:
error_detail = response_json.get("error", response_json)
self._log(f"HTTP {response.status_code}: {error_detail}")
raise RuntimeError(f"API request failed (HTTP {response.status_code}): {error_detail}")
return response_json
except requests.exceptions.Timeout:
raise RuntimeError("API request timed out after 120 seconds")
except requests.exceptions.RequestException as e:
raise RuntimeError(f"API request failed: {str(e)}")
def _extract_image_from_response(self, response: Dict[str, Any]) -> Optional[bytes]:
"""Extract base64-encoded image from API response."""
try:
choices = response.get("choices", [])
if not choices:
self._log("No choices in response")
return None
message = choices[0].get("message", {})
# Nano Banana Pro returns images in the 'images' field
images = message.get("images", [])
if images and len(images) > 0:
self._log(f"Found {len(images)} image(s) in 'images' field")
first_image = images[0]
if isinstance(first_image, dict):
if first_image.get("type") == "image_url":
url = first_image.get("image_url", {})
if isinstance(url, dict):
url = url.get("url", "")
if url and url.startswith("data:image"):
if "," in url:
base64_str = url.split(",", 1)[1]
base64_str = base64_str.replace('\n', '').replace('\r', '').replace(' ', '')
self._log(f"Extracted base64 data (length: {len(base64_str)})")
return base64.b64decode(base64_str)
# Fallback: check content field
content = message.get("content", "")
if isinstance(content, str) and "data:image" in content:
import re
match = re.search(r'data:image/[^;]+;base64,([A-Za-z0-9+/=\n\r]+)', content, re.DOTALL)
if match:
base64_str = match.group(1).replace('\n', '').replace('\r', '').replace(' ', '')
self._log(f"Found image in content field (length: {len(base64_str)})")
return base64.b64decode(base64_str)
if isinstance(content, list):
for i, block in enumerate(content):
if isinstance(block, dict) and block.get("type") == "image_url":
url = block.get("image_url", {})
if isinstance(url, dict):
url = url.get("url", "")
if url and url.startswith("data:image") and "," in url:
base64_str = url.split(",", 1)[1].replace('\n', '').replace('\r', '').replace(' ', '')
self._log(f"Found image in content block {i}")
return base64.b64decode(base64_str)
self._log("No image data found in response")
return None
except Exception as e:
self._log(f"Error extracting image: {str(e)}")
return None
def _image_to_base64(self, image_path: str) -> str:
"""Convert image file to base64 data URL."""
with open(image_path, "rb") as f:
image_data = f.read()
ext = Path(image_path).suffix.lower()
mime_type = {
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".webp": "image/webp"
}.get(ext, "image/png")
base64_data = base64.b64encode(image_data).decode("utf-8")
return f"data:{mime_type};base64,{base64_data}"
def generate_image(self, prompt: str, attachments: Optional[List[str]] = None) -> Optional[bytes]:
"""
Generate an image using Nano Banana Pro.
Args:
prompt: Text description of the image to generate
attachments: Optional list of image file paths to attach as context
Returns:
Image bytes or None if generation failed
"""
self._last_error = None
# Build content with text and optional image attachments
content = []
# Add text prompt
content.append({
"type": "text",
"text": prompt
})
# Add attached images as context
if attachments:
for img_path in attachments:
try:
img_data_url = self._image_to_base64(img_path)
content.append({
"type": "image_url",
"image_url": {"url": img_data_url}
})
self._log(f"Attached image: {img_path}")
except Exception as e:
self._log(f"Warning: Could not attach {img_path}: {e}")
messages = [
{
"role": "user",
"content": content if attachments else prompt
}
]
try:
response = self._make_request(
model=self.image_model,
messages=messages,
modalities=["image", "text"]
)
if self.verbose:
self._log(f"Response keys: {response.keys()}")
if "error" in response:
self._log(f"API Error: {response['error']}")
if "error" in response:
error_msg = response["error"]
if isinstance(error_msg, dict):
error_msg = error_msg.get("message", str(error_msg))
self._last_error = f"API Error: {error_msg}"
print(f"β {self._last_error}")
return None
image_data = self._extract_image_from_response(response)
if image_data:
self._log(f"β Generated image ({len(image_data)} bytes)")
else:
self._last_error = "No image data in API response"
self._log(f"β {self._last_error}")
return image_data
except RuntimeError as e:
self._last_error = str(e)
self._log(f"β Generation failed: {self._last_error}")
return None
except Exception as e:
self._last_error = f"Unexpected error: {str(e)}"
self._log(f"β Generation failed: {self._last_error}")
return None
def review_image(self, image_path: str, original_prompt: str,
iteration: int, visual_only: bool = False,
max_iterations: int = 2) -> Tuple[str, float, bool]:
"""Review generated image using Gemini 3 Pro."""
image_data_url = self._image_to_base64(image_path)
threshold = self.QUALITY_THRESHOLD
image_type = "slide visual/figure" if visual_only else "presentation slide"
review_prompt = f"""You are an expert reviewer evaluating a {image_type} for presentation quality.
ORIGINAL REQUEST: {original_prompt}
QUALITY THRESHOLD: {threshold}/10
ITERATION: {iteration}/{max_iterations}
Evaluate this {image_type} on these criteria:
1. **Visual Impact** (0-2 points)
- Bold, attention-grabbing design
- Professional appearance
- Suitable for projection
2. **Clarity** (0-2 points)
- Easy to understand at a glance
- Clear visual hierarchy
- Not cluttered or busy
3. **Readability** (0-2 points)
- Text is large and readable (if present)
- High contrast
- Clean typography
4. **Composition** (0-2 points)
- Balanced layout
- Good use of space
- Appropriate margins
5. **Relevance** (0-2 points)
- Matches the requested content
- Appropriate style for presentations
- Professional quality
RESPOND IN THIS EXACT FORMAT:
SCORE: [total score 0-10]
STRENGTHS:
- [strength 1]
- [strength 2]
ISSUES:
- [issue 1 if any]
- [issue 2 if any]
VERDICT: [ACCEPTABLE or NEEDS_IMPROVEMENT]
If score >= {threshold}, the image is ACCEPTABLE.
If score < {threshold}, mark as NEEDS_IMPROVEMENT with specific suggestions."""
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": review_prompt},
{"type": "image_url", "image_url": {"url": image_data_url}}
]
}
]
try:
response = self._make_request(model=self.review_model, messages=messages)
choices = response.get("choices", [])
if not choices:
return "Image generated successfully", 7.0, False
message = choices[0].get("message", {})
content = message.get("content", "")
reasoning = message.get("reasoning", "")
if reasoning and not content:
content = reasoning
if isinstance(content, list):
text_parts = []
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
text_parts.append(block.get("text", ""))
content = "\n".join(text_parts)
# Extract score
score = 7.0
import re
score_match = re.search(r'SCORE:\s*(\d+(?:\.\d+)?)', content, re.IGNORECASE)
if score_match:
score = float(score_match.group(1))
else:
score_match = re.search(r'(?:score|rating|quality)[:\s]+(\d+(?:\.\d+)?)', content, re.IGNORECASE)
if score_match:
score = float(score_match.group(1))
needs_improvement = False
if "NEEDS_IMPROVEMENT" in content.upper():
needs_improvement = True
elif score < threshold:
needs_improvement = True
self._log(f"β Review complete (Score: {score}/10, Threshold: {threshold}/10)")
return (content if content else "Image generated successfully", score, needs_improvement)
except Exception as e:
self._log(f"Review skipped: {str(e)}")
return "Image generated successfully (review skipped)", 7.0, False
def improve_prompt(self, original_prompt: str, critique: str,
iteration: int, visual_only: bool = False) -> str:
"""Improve the generation prompt based on critique."""
guidelines = self.VISUAL_ONLY_GUIDELINES if visual_only else self.FULL_SLIDE_GUIDELINES
return f"""{guidelines}
USER REQUEST: {original_prompt}
ITERATION {iteration}: Based on previous feedback, address these specific improvements:
{critique}
Generate an improved version that addresses all the critique points."""
def generate_slide(self, user_prompt: str, output_path: str,
visual_only: bool = False,
iterations: int = 2,
attachments: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Generate a slide image or visual with iterative refinement.
Args:
user_prompt: Description of the slide/visual to generate
output_path: Path to save final image
visual_only: If True, generate just the visual (for PPT workflow)
iterations: Maximum refinement iterations (default: 2)
attachments: Optional list of image file paths to attach as context
Returns:
Dictionary with generation results and metadata
"""
output_path = Path(output_path)
output_dir = output_path.parent
output_dir.mkdir(parents=True, exist_ok=True)
base_name = output_path.stem
extension = output_path.suffix or ".png"
mode = "visual_only" if visual_only else "full_slide"
guidelines = self.VISUAL_ONLY_GUIDELINES if visual_only else self.FULL_SLIDE_GUIDELINES
results = {
"user_prompt": user_prompt,
"mode": mode,
"quality_threshold": self.QUALITY_THRESHOLD,
"attachments": attachments or [],
"iterations": [],
"final_image": None,
"final_score": 0.0,
"success": False,
"early_stop": False
}
current_prompt = f"""{guidelines}
USER REQUEST: {user_prompt}
Generate a high-quality {'visual/figure' if visual_only else 'presentation slide'} that meets all the guidelines above."""
print(f"\n{'='*60}")
print(f"Generating Slide {'Visual' if visual_only else 'Image'}")
print(f"{'='*60}")
print(f"Description: {user_prompt[:100]}{'...' if len(user_prompt) > 100 else ''}")
print(f"Mode: {mode}")
if attachments:
print(f"Attachments: {len(attachments)} image(s)")
for att in attachments:
print(f" - {att}")
print(f"Quality Threshold: {self.QUALITY_THRESHOLD}/10")
print(f"Max Iterations: {iterations}")
print(f"Output: {output_path}")
print(f"{'='*60}\n")
# Track temporary files for cleanup
temp_files = []
final_image_data = None
for i in range(1, iterations + 1):
print(f"\n[Iteration {i}/{iterations}]")
print("-" * 40)
print(f"Generating image with Nano Banana Pro...")
image_data = self.generate_image(current_prompt, attachments=attachments)
if not image_data:
error_msg = self._last_error or 'Image generation failed'
print(f"β Generation failed: {error_msg}")
results["iterations"].append({
"iteration": i,
"success": False,
"error": error_msg
})
continue
# Save to temporary file for review (will be cleaned up)
import tempfile
temp_fd, temp_path = tempfile.mkstemp(suffix=extension)
os.close(temp_fd)
temp_path = Path(temp_path)
temp_files.append(temp_path)
with open(temp_path, "wb") as f:
f.write(image_data)
print(f"β Generated image (iteration {i})")
print(f"Reviewing image with Gemini 3 Pro...")
critique, score, needs_improvement = self.review_image(
str(temp_path), user_prompt, i, visual_only, iterations
)
print(f"β Score: {score}/10 (threshold: {self.QUALITY_THRESHOLD}/10)")
results["iterations"].append({
"iteration": i,
"critique": critique,
"score": score,
"needs_improvement": needs_improvement,
"success": True
})
if not needs_improvement:
print(f"\nβ Quality meets threshold ({score} >= {self.QUALITY_THRESHOLD})")
final_image_data = image_data
results["final_score"] = score
results["success"] = True
results["early_stop"] = True
break
if i == iterations:
print(f"\nβ Maximum iterations reached")
final_image_data = image_data
results["final_score"] = score
results["success"] = True
break
print(f"\nβ Quality below threshold ({score} < {self.QUALITY_THRESHOLD})")
print(f"Improving prompt...")
current_prompt = self.improve_prompt(user_prompt, critique, i + 1, visual_only)
# Clean up temporary files
for temp_file in temp_files:
try:
if temp_file.exists():
temp_file.unlink()
except Exception:
pass
# Save only the final image to output path
if results["success"] and final_image_data:
with open(output_path, "wb") as f:
f.write(final_image_data)
results["final_image"] = str(output_path)
print(f"\nβ Final image: {output_path}")
print(f"\n{'='*60}")
print(f"Generation Complete!")
print(f"Final Score: {results['final_score']}/10")
if results["early_stop"]:
success_count = len([r for r in results['iterations'] if r.get('success')])
print(f"Iterations Used: {success_count}/{iterations} (early stop)")
print(f"{'='*60}\n")
return results
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description="Generate presentation slides or visuals using Nano Banana Pro AI",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Generate a full slide (for PDF workflow)
python generate_slide_image_ai.py "Title: Machine Learning Basics\\nKey points: supervised learning, neural networks, deep learning" -o slide_01.png
# Generate just a visual/figure (for PPT workflow)
python generate_slide_image_ai.py "Neural network architecture diagram with input, hidden, and output layers" -o figure.png --visual-only
# With reference images attached (Nano Banana Pro will see these)
python generate_slide_image_ai.py "Create a slide explaining this chart with key insights" -o slide.png --attach chart.png
python generate_slide_image_ai.py "Combine these images into a comparison slide" -o compare.png --attach before.png --attach after.png
# With custom iterations
python generate_slide_image_ai.py "Title slide for AI Conference 2025" -o title.png --iterations 2
# Verbose output
python generate_slide_image_ai.py "Data flow diagram" -o flow.png -v
Environment:
OPENROUTER_API_KEY OpenRouter API key (required)
"""
)
parser.add_argument("prompt", help="Description of the slide or visual to generate")
parser.add_argument("-o", "--output", required=True, help="Output image path")
parser.add_argument("--attach", action="append", dest="attachments", metavar="IMAGE",
help="Attach image file(s) as context for generation (can use multiple times)")
parser.add_argument("--visual-only", action="store_true",
help="Generate just the visual/figure (for PPT workflow)")
parser.add_argument("--iterations", type=int, default=2,
help="Maximum refinement iterations (default: 2)")
parser.add_argument("--api-key", help="OpenRouter API key (or set OPENROUTER_API_KEY)")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
args = parser.parse_args()
api_key = args.api_key or os.getenv("OPENROUTER_API_KEY")
if not api_key:
print("Error: OPENROUTER_API_KEY environment variable not set")
print("\nSet it with:")
print(" export OPENROUTER_API_KEY='your_api_key'")
sys.exit(1)
if args.iterations < 1 or args.iterations > 2:
print("Error: Iterations must be between 1 and 2")
sys.exit(1)
# Validate attachments exist
if args.attachments:
for att in args.attachments:
if not Path(att).exists():
print(f"Error: Attachment file not found: {att}")
sys.exit(1)
try:
generator = SlideImageGenerator(api_key=api_key, verbose=args.verbose)
results = generator.generate_slide(
user_prompt=args.prompt,
output_path=args.output,
visual_only=args.visual_only,
iterations=args.iterations,
attachments=args.attachments
)
if results["success"]:
print(f"\nβ Success! Image saved to: {args.output}")
sys.exit(0)
else:
print(f"\nβ Generation failed. Check review log for details.")
sys.exit(1)
except Exception as e:
print(f"\nβ Error: {str(e)}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-slides/scripts/generate_slide_image_ai.py",
"license": "MIT License",
"lines": 621,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-slides/scripts/pdf_to_images.py | #!/usr/bin/env python3
"""
PDF to Images Converter for Presentations
Converts presentation PDFs to images for visual inspection and review.
Supports multiple output formats and resolutions.
Uses PyMuPDF (fitz) as the primary conversion method - no external
dependencies required (no poppler, ghostscript, or ImageMagick needed).
"""
import sys
import argparse
from pathlib import Path
from typing import Optional, List
# Try to import pymupdf (preferred - no external dependencies)
try:
import fitz # PyMuPDF
HAS_PYMUPDF = True
except ImportError:
HAS_PYMUPDF = False
class PDFToImagesConverter:
"""Converts PDF presentations to images."""
def __init__(
self,
pdf_path: str,
output_prefix: str,
dpi: int = 150,
format: str = 'jpg',
first_page: Optional[int] = None,
last_page: Optional[int] = None
):
self.pdf_path = Path(pdf_path)
self.output_prefix = output_prefix
self.dpi = dpi
self.format = format.lower()
self.first_page = first_page
self.last_page = last_page
# Validate format
if self.format not in ['jpg', 'jpeg', 'png']:
raise ValueError(f"Unsupported format: {format}. Use jpg or png.")
def convert(self) -> List[Path]:
"""Convert PDF to images using PyMuPDF."""
if not self.pdf_path.exists():
raise FileNotFoundError(f"PDF not found: {self.pdf_path}")
print(f"Converting: {self.pdf_path.name}")
print(f"Output prefix: {self.output_prefix}")
print(f"DPI: {self.dpi}")
print(f"Format: {self.format}")
if HAS_PYMUPDF:
return self._convert_with_pymupdf()
else:
raise RuntimeError(
"PyMuPDF not installed. Install it with:\n"
" pip install pymupdf\n\n"
"PyMuPDF is a self-contained library - no external dependencies needed."
)
def _convert_with_pymupdf(self) -> List[Path]:
"""Convert using PyMuPDF library (no external dependencies)."""
print("Using PyMuPDF (no external dependencies required)...")
# Open the PDF
doc = fitz.open(self.pdf_path)
# Determine page range
start_page = (self.first_page - 1) if self.first_page else 0
end_page = self.last_page if self.last_page else doc.page_count
# Calculate zoom factor from DPI (72 DPI is the base)
zoom = self.dpi / 72
matrix = fitz.Matrix(zoom, zoom)
output_files = []
output_dir = Path(self.output_prefix).parent
output_dir.mkdir(parents=True, exist_ok=True)
for page_num in range(start_page, end_page):
page = doc[page_num]
# Render page to pixmap
pixmap = page.get_pixmap(matrix=matrix)
# Determine output path
output_path = Path(f"{self.output_prefix}-{page_num + 1:03d}.{self.format}")
# Save the image
if self.format in ['jpg', 'jpeg']:
pixmap.save(str(output_path), output="jpeg")
else:
pixmap.save(str(output_path), output="png")
output_files.append(output_path)
print(f" Created: {output_path.name}")
doc.close()
return output_files
def main():
parser = argparse.ArgumentParser(
description='Convert presentation PDFs to images',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s presentation.pdf slides
β Creates slides-001.jpg, slides-002.jpg, ...
%(prog)s presentation.pdf output/slide --dpi 300 --format png
β Creates output/slide-001.png, slide-002.png, ... at high resolution
%(prog)s presentation.pdf review/s --first 5 --last 10
β Converts only slides 5-10
Output:
Images are named: PREFIX-001.FORMAT, PREFIX-002.FORMAT, etc.
Resolution:
- 150 DPI: Good for screen review (default)
- 200 DPI: Higher quality for detailed inspection
- 300 DPI: Print quality (larger files)
Requirements:
Install PyMuPDF (no external dependencies needed):
pip install pymupdf
"""
)
parser.add_argument(
'pdf_path',
help='Path to PDF presentation'
)
parser.add_argument(
'output_prefix',
help='Output filename prefix (e.g., "slides" or "output/slide")'
)
parser.add_argument(
'--dpi', '-r',
type=int,
default=150,
help='Resolution in DPI (default: 150)'
)
parser.add_argument(
'--format', '-f',
choices=['jpg', 'jpeg', 'png'],
default='jpg',
help='Output format (default: jpg)'
)
parser.add_argument(
'--first',
type=int,
help='First page to convert (1-indexed)'
)
parser.add_argument(
'--last',
type=int,
help='Last page to convert (1-indexed)'
)
args = parser.parse_args()
# Create output directory if needed
output_dir = Path(args.output_prefix).parent
if output_dir != Path('.'):
output_dir.mkdir(parents=True, exist_ok=True)
# Convert
try:
converter = PDFToImagesConverter(
pdf_path=args.pdf_path,
output_prefix=args.output_prefix,
dpi=args.dpi,
format=args.format,
first_page=args.first,
last_page=args.last
)
output_files = converter.convert()
print()
print("=" * 60)
print(f"β
Success! Created {len(output_files)} image(s)")
print("=" * 60)
if output_files:
print(f"\nFirst image: {output_files[0]}")
print(f"Last image: {output_files[-1]}")
# Calculate total size
total_size = sum(f.stat().st_size for f in output_files)
size_mb = total_size / (1024 * 1024)
print(f"Total size: {size_mb:.2f} MB")
print("\nNext steps:")
print(" 1. Review images for layout issues")
print(" 2. Check for text overflow or element overlap")
print(" 3. Verify readability from distance")
print(" 4. Document issues with slide numbers")
sys.exit(0)
except Exception as e:
print(f"\nβ Error: {str(e)}", file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-slides/scripts/pdf_to_images.py",
"license": "MIT License",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-slides/scripts/slides_to_pdf.py | #!/usr/bin/env python3
"""
Combine slide images into a single PDF presentation.
This script takes multiple slide images (PNG, JPG) and combines them
into a single PDF file, maintaining aspect ratio and quality.
Usage:
# Combine all PNG files in a directory
python slides_to_pdf.py slides/*.png -o presentation.pdf
# Combine specific files in order
python slides_to_pdf.py slide_01.png slide_02.png slide_03.png -o presentation.pdf
# From a directory (sorted by filename)
python slides_to_pdf.py slides/ -o presentation.pdf
"""
import argparse
import sys
from pathlib import Path
from typing import List
try:
from PIL import Image
except ImportError:
print("Error: Pillow library not found. Install with: pip install Pillow")
sys.exit(1)
def get_image_files(paths: List[str]) -> List[Path]:
"""
Get list of image files from paths (files or directories).
Args:
paths: List of file paths or directory paths
Returns:
Sorted list of image file paths
"""
image_extensions = {'.png', '.jpg', '.jpeg', '.gif', '.webp', '.bmp'}
image_files = []
for path_str in paths:
path = Path(path_str)
if path.is_file():
if path.suffix.lower() in image_extensions:
image_files.append(path)
else:
print(f"Warning: Skipping non-image file: {path}")
elif path.is_dir():
# Get all images in directory
for ext in image_extensions:
image_files.extend(path.glob(f"*{ext}"))
image_files.extend(path.glob(f"*{ext.upper()}"))
else:
# Try glob pattern
parent = path.parent
pattern = path.name
if parent.exists():
matches = list(parent.glob(pattern))
for match in matches:
if match.suffix.lower() in image_extensions:
image_files.append(match)
# Remove duplicates and sort
image_files = list(set(image_files))
image_files.sort(key=lambda x: x.name)
return image_files
def combine_images_to_pdf(image_paths: List[Path], output_path: Path,
dpi: int = 150, verbose: bool = False) -> bool:
"""
Combine multiple images into a single PDF.
Args:
image_paths: List of image file paths
output_path: Output PDF path
dpi: Resolution for the PDF (default: 150)
verbose: Print progress information
Returns:
True if successful, False otherwise
"""
if not image_paths:
print("Error: No image files found")
return False
if verbose:
print(f"Combining {len(image_paths)} images into PDF...")
# Load all images
images = []
for i, img_path in enumerate(image_paths):
try:
img = Image.open(img_path)
# Convert to RGB if necessary (PDF doesn't support RGBA)
if img.mode in ('RGBA', 'P'):
# Create white background
background = Image.new('RGB', img.size, (255, 255, 255))
if img.mode == 'P':
img = img.convert('RGBA')
background.paste(img, mask=img.split()[-1] if img.mode == 'RGBA' else None)
img = background
elif img.mode != 'RGB':
img = img.convert('RGB')
images.append(img)
if verbose:
print(f" [{i+1}/{len(image_paths)}] Loaded: {img_path.name} ({img.size[0]}x{img.size[1]})")
except Exception as e:
print(f"Error loading {img_path}: {e}")
return False
if not images:
print("Error: No images could be loaded")
return False
# Create output directory if needed
output_path.parent.mkdir(parents=True, exist_ok=True)
# Save as PDF
try:
# First image
first_image = images[0]
# Remaining images (if any)
remaining_images = images[1:] if len(images) > 1 else []
# Save to PDF
first_image.save(
output_path,
"PDF",
resolution=dpi,
save_all=True,
append_images=remaining_images
)
if verbose:
print(f"\nβ PDF created: {output_path}")
print(f" Total slides: {len(images)}")
file_size = output_path.stat().st_size
if file_size > 1024 * 1024:
print(f" File size: {file_size / (1024 * 1024):.1f} MB")
else:
print(f" File size: {file_size / 1024:.1f} KB")
return True
except Exception as e:
print(f"Error creating PDF: {e}")
return False
finally:
# Close all images
for img in images:
img.close()
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(
description="Combine slide images into a single PDF presentation",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Combine PNG files using glob pattern
python slides_to_pdf.py slides/*.png -o presentation.pdf
# Combine specific files in order
python slides_to_pdf.py title.png intro.png methods.png results.png -o talk.pdf
# Combine all images from a directory (sorted by filename)
python slides_to_pdf.py slides/ -o presentation.pdf
# With custom DPI and verbose output
python slides_to_pdf.py slides/*.png -o presentation.pdf --dpi 200 -v
Supported formats: PNG, JPG, JPEG, GIF, WEBP, BMP
Tips:
- Name your slide images with numbers for correct ordering:
01_title.png, 02_intro.png, 03_methods.png, etc.
- Use the generate_slide_image.py script to create slides first
- Standard presentation aspect ratio is 16:9 (1920x1080 or 1280x720)
"""
)
parser.add_argument("images", nargs="+",
help="Image files, directories, or glob patterns")
parser.add_argument("-o", "--output", required=True,
help="Output PDF file path")
parser.add_argument("--dpi", type=int, default=150,
help="PDF resolution in DPI (default: 150)")
parser.add_argument("-v", "--verbose", action="store_true",
help="Verbose output")
args = parser.parse_args()
# Get image files
image_files = get_image_files(args.images)
if not image_files:
print("Error: No image files found matching the specified paths")
print("\nUsage examples:")
print(" python slides_to_pdf.py slides/*.png -o presentation.pdf")
print(" python slides_to_pdf.py slide1.png slide2.png -o presentation.pdf")
sys.exit(1)
print(f"Found {len(image_files)} image(s)")
if args.verbose:
for f in image_files:
print(f" - {f}")
# Combine into PDF
output_path = Path(args.output)
success = combine_images_to_pdf(
image_files,
output_path,
dpi=args.dpi,
verbose=args.verbose
)
if success:
print(f"\nβ PDF created: {output_path}")
sys.exit(0)
else:
print(f"\nβ Failed to create PDF")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-slides/scripts/slides_to_pdf.py",
"license": "MIT License",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-slides/scripts/validate_presentation.py | #!/usr/bin/env python3
"""
Presentation Validation Script
Validates scientific presentations for common issues:
- Slide count vs. duration
- LaTeX compilation
- File size checks
- Basic format validation
"""
import sys
import os
import argparse
import subprocess
from pathlib import Path
from typing import Dict, List, Tuple, Optional
# Try to import PyPDF2 for PDF analysis
try:
import PyPDF2
HAS_PYPDF2 = True
except ImportError:
HAS_PYPDF2 = False
# Try to import python-pptx for PowerPoint analysis
try:
from pptx import Presentation
HAS_PPTX = True
except ImportError:
HAS_PPTX = False
class PresentationValidator:
"""Validates presentations for common issues."""
# Recommended slide counts by duration (min, recommended, max)
SLIDE_GUIDELINES = {
5: (5, 6, 8),
10: (8, 11, 14),
15: (13, 16, 20),
20: (18, 22, 26),
30: (22, 27, 33),
45: (32, 40, 50),
60: (40, 52, 65),
}
def __init__(self, filepath: str, duration: Optional[int] = None):
self.filepath = Path(filepath)
self.duration = duration
self.file_type = self.filepath.suffix.lower()
self.issues = []
self.warnings = []
self.info = []
def validate(self) -> Dict:
"""Run all validations and return results."""
print(f"Validating: {self.filepath.name}")
print(f"File type: {self.file_type}")
print("=" * 60)
# Check file exists
if not self.filepath.exists():
self.issues.append(f"File not found: {self.filepath}")
return self._format_results()
# File size check
self._check_file_size()
# Type-specific validation
if self.file_type == '.pdf':
self._validate_pdf()
elif self.file_type in ['.pptx', '.ppt']:
self._validate_pptx()
elif self.file_type in ['.tex']:
self._validate_latex()
else:
self.warnings.append(f"Unknown file type: {self.file_type}")
return self._format_results()
def _check_file_size(self):
"""Check if file size is reasonable."""
size_mb = self.filepath.stat().st_size / (1024 * 1024)
self.info.append(f"File size: {size_mb:.2f} MB")
if size_mb > 100:
self.issues.append(
f"File is very large ({size_mb:.1f} MB). "
"Consider compressing images."
)
elif size_mb > 50:
self.warnings.append(
f"File is large ({size_mb:.1f} MB). "
"May be slow to email or upload."
)
def _validate_pdf(self):
"""Validate PDF presentation."""
if not HAS_PYPDF2:
self.warnings.append(
"PyPDF2 not installed. Install with: pip install PyPDF2"
)
return
try:
with open(self.filepath, 'rb') as f:
reader = PyPDF2.PdfReader(f)
num_pages = len(reader.pages)
self.info.append(f"Number of slides: {num_pages}")
# Check slide count against duration
if self.duration:
self._check_slide_count(num_pages)
# Get page size
first_page = reader.pages[0]
media_box = first_page.mediabox
width = float(media_box.width)
height = float(media_box.height)
# Convert points to inches (72 points = 1 inch)
width_in = width / 72
height_in = height / 72
aspect = width / height
self.info.append(
f"Slide dimensions: {width_in:.1f}\" Γ {height_in:.1f}\" "
f"(aspect ratio: {aspect:.2f})"
)
# Check common aspect ratios
if abs(aspect - 16/9) < 0.01:
self.info.append("Aspect ratio: 16:9 (widescreen)")
elif abs(aspect - 4/3) < 0.01:
self.info.append("Aspect ratio: 4:3 (standard)")
else:
self.warnings.append(
f"Unusual aspect ratio: {aspect:.2f}. "
"Confirm this matches venue requirements."
)
except Exception as e:
self.issues.append(f"Error reading PDF: {str(e)}")
def _validate_pptx(self):
"""Validate PowerPoint presentation."""
if not HAS_PPTX:
self.warnings.append(
"python-pptx not installed. Install with: pip install python-pptx"
)
return
try:
prs = Presentation(self.filepath)
num_slides = len(prs.slides)
self.info.append(f"Number of slides: {num_slides}")
# Check slide count against duration
if self.duration:
self._check_slide_count(num_slides)
# Get slide dimensions
width_inches = prs.slide_width / 914400 # EMU to inches
height_inches = prs.slide_height / 914400
aspect = prs.slide_width / prs.slide_height
self.info.append(
f"Slide dimensions: {width_inches:.1f}\" Γ {height_inches:.1f}\" "
f"(aspect ratio: {aspect:.2f})"
)
# Check fonts and text
self._check_pptx_content(prs)
except Exception as e:
self.issues.append(f"Error reading PowerPoint: {str(e)}")
def _check_pptx_content(self, prs):
"""Check PowerPoint content for common issues."""
small_text_slides = []
many_bullets_slides = []
for idx, slide in enumerate(prs.slides, start=1):
for shape in slide.shapes:
if not shape.has_text_frame:
continue
text_frame = shape.text_frame
# Check for small fonts
for paragraph in text_frame.paragraphs:
for run in paragraph.runs:
if run.font.size and run.font.size.pt < 18:
small_text_slides.append(idx)
break
# Check for too many bullets
bullet_count = sum(1 for p in text_frame.paragraphs if p.level == 0)
if bullet_count > 6:
many_bullets_slides.append(idx)
# Report issues
if small_text_slides:
unique_slides = sorted(set(small_text_slides))
self.warnings.append(
f"Small text (<18pt) found on slides: {unique_slides[:5]}"
+ (" ..." if len(unique_slides) > 5 else "")
)
if many_bullets_slides:
unique_slides = sorted(set(many_bullets_slides))
self.warnings.append(
f"Many bullets (>6) on slides: {unique_slides[:5]}"
+ (" ..." if len(unique_slides) > 5 else "")
)
def _validate_latex(self):
"""Validate LaTeX Beamer presentation."""
self.info.append("LaTeX source file detected")
# Try to compile
if self._try_compile_latex():
self.info.append("LaTeX compilation: SUCCESS")
# If PDF was generated, validate it
pdf_path = self.filepath.with_suffix('.pdf')
if pdf_path.exists():
pdf_validator = PresentationValidator(str(pdf_path), self.duration)
pdf_results = pdf_validator.validate()
# Merge results
self.info.extend(pdf_results['info'])
self.warnings.extend(pdf_results['warnings'])
self.issues.extend(pdf_results['issues'])
else:
self.issues.append(
"LaTeX compilation failed. Check .log file for errors."
)
def _try_compile_latex(self) -> bool:
"""Try to compile LaTeX file."""
try:
# Try pdflatex
result = subprocess.run(
['pdflatex', '-interaction=nonstopmode', self.filepath.name],
cwd=self.filepath.parent,
capture_output=True,
timeout=60
)
return result.returncode == 0
except (subprocess.TimeoutExpired, FileNotFoundError):
return False
def _check_slide_count(self, num_slides: int):
"""Check if slide count is appropriate for duration."""
if self.duration not in self.SLIDE_GUIDELINES:
# Find nearest duration
durations = sorted(self.SLIDE_GUIDELINES.keys())
nearest = min(durations, key=lambda x: abs(x - self.duration))
min_slides, rec_slides, max_slides = self.SLIDE_GUIDELINES[nearest]
self.info.append(
f"Using guidelines for {nearest}-minute talk "
f"(closest to {self.duration} minutes)"
)
else:
min_slides, rec_slides, max_slides = self.SLIDE_GUIDELINES[self.duration]
self.info.append(
f"Recommended slides for {self.duration}-minute talk: "
f"{min_slides}-{max_slides} (optimal: ~{rec_slides})"
)
if num_slides < min_slides:
self.warnings.append(
f"Fewer slides ({num_slides}) than recommended ({min_slides}-{max_slides}). "
"May have too much time or too little content."
)
elif num_slides > max_slides:
self.warnings.append(
f"More slides ({num_slides}) than recommended ({min_slides}-{max_slides}). "
"Likely to run over time."
)
else:
self.info.append(
f"Slide count ({num_slides}) is within recommended range."
)
def _format_results(self) -> Dict:
"""Format validation results."""
return {
'filepath': str(self.filepath),
'file_type': self.file_type,
'info': self.info,
'warnings': self.warnings,
'issues': self.issues,
'valid': len(self.issues) == 0
}
def print_results(results: Dict):
"""Print validation results in a readable format."""
print()
print("=" * 60)
print("VALIDATION RESULTS")
print("=" * 60)
# Print info
if results['info']:
print("\nπ Information:")
for item in results['info']:
print(f" β’ {item}")
# Print warnings
if results['warnings']:
print("\nβ οΈ Warnings:")
for item in results['warnings']:
print(f" β’ {item}")
# Print issues
if results['issues']:
print("\nβ Issues:")
for item in results['issues']:
print(f" β’ {item}")
# Overall status
print("\n" + "=" * 60)
if results['valid']:
print("β
Validation PASSED")
if results['warnings']:
print(f" ({len(results['warnings'])} warning(s) found)")
else:
print("β Validation FAILED")
print(f" ({len(results['issues'])} issue(s) found)")
print("=" * 60)
def main():
parser = argparse.ArgumentParser(
description='Validate scientific presentations',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s presentation.pdf --duration 15
%(prog)s slides.pptx --duration 45
%(prog)s beamer_talk.tex --duration 20
Supported file types:
- PDF (.pdf)
- PowerPoint (.pptx, .ppt)
- LaTeX Beamer (.tex)
Validation checks:
- Slide count vs. duration
- File size
- Slide dimensions
- Font sizes (PowerPoint)
- LaTeX compilation (Beamer)
"""
)
parser.add_argument(
'filepath',
help='Path to presentation file (PDF, PPTX, or TEX)'
)
parser.add_argument(
'--duration', '-d',
type=int,
help='Presentation duration in minutes'
)
parser.add_argument(
'--quiet', '-q',
action='store_true',
help='Only show issues and warnings'
)
args = parser.parse_args()
# Validate
validator = PresentationValidator(args.filepath, args.duration)
results = validator.validate()
# Print results
if args.quiet:
# Only show warnings and issues
if results['warnings'] or results['issues']:
print_results(results)
else:
print("β
No issues found")
else:
print_results(results)
# Exit with appropriate code
sys.exit(0 if results['valid'] else 1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-slides/scripts/validate_presentation.py",
"license": "MIT License",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-visualization/assets/color_palettes.py | """
Colorblind-Friendly Color Palettes for Scientific Visualization
This module provides carefully curated color palettes optimized for
scientific publications and accessibility.
Usage:
from color_palettes import OKABE_ITO, apply_palette
import matplotlib.pyplot as plt
apply_palette('okabe_ito')
plt.plot([1, 2, 3], [1, 4, 9])
"""
# Okabe-Ito Palette (2008)
# The most widely recommended colorblind-friendly palette
OKABE_ITO = {
'orange': '#E69F00',
'sky_blue': '#56B4E9',
'bluish_green': '#009E73',
'yellow': '#F0E442',
'blue': '#0072B2',
'vermillion': '#D55E00',
'reddish_purple': '#CC79A7',
'black': '#000000'
}
OKABE_ITO_LIST = ['#E69F00', '#56B4E9', '#009E73', '#F0E442',
'#0072B2', '#D55E00', '#CC79A7', '#000000']
# Wong Palette (Nature Methods)
WONG = ['#000000', '#E69F00', '#56B4E9', '#009E73',
'#F0E442', '#0072B2', '#D55E00', '#CC79A7']
# Paul Tol Palettes (https://personal.sron.nl/~pault/)
TOL_BRIGHT = ['#4477AA', '#EE6677', '#228833', '#CCBB44',
'#66CCEE', '#AA3377', '#BBBBBB']
TOL_MUTED = ['#332288', '#88CCEE', '#44AA99', '#117733',
'#999933', '#DDCC77', '#CC6677', '#882255', '#AA4499']
TOL_LIGHT = ['#77AADD', '#EE8866', '#EEDD88', '#FFAABB',
'#99DDFF', '#44BB99', '#BBCC33', '#AAAA00', '#DDDDDD']
TOL_HIGH_CONTRAST = ['#004488', '#DDAA33', '#BB5566']
# Sequential colormaps (for continuous data)
SEQUENTIAL_COLORMAPS = [
'viridis', # Default, perceptually uniform
'plasma', # Perceptually uniform
'inferno', # Perceptually uniform
'magma', # Perceptually uniform
'cividis', # Optimized for colorblind viewers
'YlOrRd', # Yellow-Orange-Red
'YlGnBu', # Yellow-Green-Blue
'Blues', # Single hue
'Greens', # Single hue
'Purples', # Single hue
]
# Diverging colormaps (for data with meaningful center)
DIVERGING_COLORMAPS_SAFE = [
'RdYlBu', # Red-Yellow-Blue (reversed is common)
'RdBu', # Red-Blue
'PuOr', # Purple-Orange (excellent for colorblind)
'BrBG', # Brown-Blue-Green (good for colorblind)
'PRGn', # Purple-Green (use with caution)
'PiYG', # Pink-Yellow-Green (use with caution)
]
# Diverging colormaps to AVOID (red-green combinations)
DIVERGING_COLORMAPS_AVOID = [
'RdGn', # Red-Green (problematic!)
'RdYlGn', # Red-Yellow-Green (problematic!)
]
# Fluorophore colors (traditional - use with caution)
FLUOROPHORES_TRADITIONAL = {
'DAPI': '#0000FF', # Blue
'GFP': '#00FF00', # Green (problematic for colorblind)
'RFP': '#FF0000', # Red
'Cy5': '#FF00FF', # Magenta
'YFP': '#FFFF00', # Yellow
}
# Fluorophore colors (colorblind-friendly alternatives)
FLUOROPHORES_ACCESSIBLE = {
'Channel1': '#0072B2', # Blue
'Channel2': '#E69F00', # Orange (instead of green)
'Channel3': '#D55E00', # Vermillion (instead of red)
'Channel4': '#CC79A7', # Magenta
'Channel5': '#F0E442', # Yellow
}
# Genomics/Bioinformatics
DNA_BASES = {
'A': '#00CC00', # Green
'C': '#0000CC', # Blue
'G': '#FFB300', # Orange
'T': '#CC0000', # Red
}
DNA_BASES_ACCESSIBLE = {
'A': '#009E73', # Bluish Green
'C': '#0072B2', # Blue
'G': '#E69F00', # Orange
'T': '#D55E00', # Vermillion
}
def apply_palette(palette_name='okabe_ito'):
"""
Apply a color palette to matplotlib's default color cycle.
Parameters
----------
palette_name : str
Name of the palette to apply. Options:
'okabe_ito', 'wong', 'tol_bright', 'tol_muted',
'tol_light', 'tol_high_contrast'
Returns
-------
list
List of colors in the palette
Examples
--------
>>> apply_palette('okabe_ito')
>>> plt.plot([1, 2, 3], [1, 4, 9]) # Uses Okabe-Ito colors
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib not installed")
return None
palettes = {
'okabe_ito': OKABE_ITO_LIST,
'wong': WONG,
'tol_bright': TOL_BRIGHT,
'tol_muted': TOL_MUTED,
'tol_light': TOL_LIGHT,
'tol_high_contrast': TOL_HIGH_CONTRAST,
}
if palette_name not in palettes:
available = ', '.join(palettes.keys())
raise ValueError(f"Palette '{palette_name}' not found. Available: {available}")
colors = palettes[palette_name]
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=colors)
return colors
def get_palette(palette_name='okabe_ito'):
"""
Get a color palette as a list.
Parameters
----------
palette_name : str
Name of the palette
Returns
-------
list
List of color hex codes
"""
palettes = {
'okabe_ito': OKABE_ITO_LIST,
'wong': WONG,
'tol_bright': TOL_BRIGHT,
'tol_muted': TOL_MUTED,
'tol_light': TOL_LIGHT,
'tol_high_contrast': TOL_HIGH_CONTRAST,
}
if palette_name not in palettes:
available = ', '.join(palettes.keys())
raise ValueError(f"Palette '{palette_name}' not found. Available: {available}")
return palettes[palette_name]
if __name__ == "__main__":
print("Available colorblind-friendly palettes:")
print(f" - Okabe-Ito: {len(OKABE_ITO_LIST)} colors")
print(f" - Wong: {len(WONG)} colors")
print(f" - Tol Bright: {len(TOL_BRIGHT)} colors")
print(f" - Tol Muted: {len(TOL_MUTED)} colors")
print(f" - Tol Light: {len(TOL_LIGHT)} colors")
print(f" - Tol High Contrast: {len(TOL_HIGH_CONTRAST)} colors")
print("\nOkabe-Ito palette (most recommended):")
for name, color in OKABE_ITO.items():
print(f" {name:15s}: {color}")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-visualization/assets/color_palettes.py",
"license": "MIT License",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-visualization/scripts/figure_export.py | #!/usr/bin/env python3
"""
Figure Export Utilities for Publication-Ready Scientific Figures
This module provides utilities to export matplotlib figures in publication-ready
formats with appropriate settings for various journals.
"""
import matplotlib.pyplot as plt
from pathlib import Path
from typing import List, Optional, Union
def save_publication_figure(
fig: plt.Figure,
filename: Union[str, Path],
formats: List[str] = ['pdf', 'png'],
dpi: int = 300,
transparent: bool = False,
bbox_inches: str = 'tight',
pad_inches: float = 0.1,
facecolor: str = 'white',
**kwargs
) -> List[Path]:
"""
Save a matplotlib figure in multiple formats with publication-quality settings.
Parameters
----------
fig : matplotlib.figure.Figure
The figure to save
filename : str or Path
Base filename (without extension)
formats : list of str, default ['pdf', 'png']
List of file formats to save. Options: 'pdf', 'png', 'eps', 'svg', 'tiff'
dpi : int, default 300
Resolution for raster formats (png, tiff). 300 DPI is minimum for most journals
transparent : bool, default False
If True, save with transparent background
bbox_inches : str, default 'tight'
Bounding box specification. 'tight' removes excess whitespace
pad_inches : float, default 0.1
Padding around the figure when bbox_inches='tight'
facecolor : str, default 'white'
Background color (ignored if transparent=True)
**kwargs
Additional keyword arguments passed to fig.savefig()
Returns
-------
list of Path
List of paths to saved files
Examples
--------
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [1, 4, 9])
>>> save_publication_figure(fig, 'my_plot', formats=['pdf', 'png'], dpi=600)
['my_plot.pdf', 'my_plot.png']
"""
filename = Path(filename)
base_name = filename.stem
output_dir = filename.parent if filename.parent.exists() else Path.cwd()
saved_files = []
for fmt in formats:
output_file = output_dir / f"{base_name}.{fmt}"
# Set format-specific parameters
save_kwargs = {
'dpi': dpi,
'bbox_inches': bbox_inches,
'pad_inches': pad_inches,
'facecolor': facecolor if not transparent else 'none',
'edgecolor': 'none',
'transparent': transparent,
'format': fmt,
}
# Update with user-provided kwargs
save_kwargs.update(kwargs)
# Adjust DPI for vector formats (DPI less relevant)
if fmt in ['pdf', 'eps', 'svg']:
save_kwargs['dpi'] = min(dpi, 300) # Lower DPI for embedded rasters in vector
try:
fig.savefig(output_file, **save_kwargs)
saved_files.append(output_file)
print(f"β Saved: {output_file}")
except Exception as e:
print(f"β Failed to save {output_file}: {e}")
return saved_files
def save_for_journal(
fig: plt.Figure,
filename: Union[str, Path],
journal: str,
figure_type: str = 'combination'
) -> List[Path]:
"""
Save figure with journal-specific requirements.
Parameters
----------
fig : matplotlib.figure.Figure
The figure to save
filename : str or Path
Base filename (without extension)
journal : str
Journal name. Options: 'nature', 'science', 'cell', 'plos', 'acs', 'ieee'
figure_type : str, default 'combination'
Type of figure. Options: 'line_art', 'photo', 'combination'
Returns
-------
list of Path
List of paths to saved files
Examples
--------
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [1, 4, 9])
>>> save_for_journal(fig, 'figure1', journal='nature', figure_type='line_art')
"""
journal = journal.lower()
# Define journal-specific requirements
journal_specs = {
'nature': {
'line_art': {'formats': ['pdf', 'eps'], 'dpi': 1000},
'photo': {'formats': ['tiff'], 'dpi': 300},
'combination': {'formats': ['pdf'], 'dpi': 600},
},
'science': {
'line_art': {'formats': ['eps', 'pdf'], 'dpi': 1000},
'photo': {'formats': ['tiff'], 'dpi': 300},
'combination': {'formats': ['eps'], 'dpi': 600},
},
'cell': {
'line_art': {'formats': ['pdf', 'eps'], 'dpi': 1000},
'photo': {'formats': ['tiff'], 'dpi': 300},
'combination': {'formats': ['pdf'], 'dpi': 600},
},
'plos': {
'line_art': {'formats': ['pdf', 'eps'], 'dpi': 600},
'photo': {'formats': ['tiff', 'png'], 'dpi': 300},
'combination': {'formats': ['tiff'], 'dpi': 300},
},
'acs': {
'line_art': {'formats': ['tiff', 'pdf'], 'dpi': 600},
'photo': {'formats': ['tiff'], 'dpi': 300},
'combination': {'formats': ['tiff'], 'dpi': 600},
},
'ieee': {
'line_art': {'formats': ['pdf', 'eps'], 'dpi': 600},
'photo': {'formats': ['tiff'], 'dpi': 300},
'combination': {'formats': ['pdf'], 'dpi': 300},
},
}
if journal not in journal_specs:
available = ', '.join(journal_specs.keys())
raise ValueError(f"Journal '{journal}' not recognized. Available: {available}")
if figure_type not in journal_specs[journal]:
available = ', '.join(journal_specs[journal].keys())
raise ValueError(f"Figure type '{figure_type}' not valid. Available: {available}")
specs = journal_specs[journal][figure_type]
print(f"Saving for {journal.upper()} ({figure_type}):")
print(f" Formats: {', '.join(specs['formats'])}")
print(f" DPI: {specs['dpi']}")
return save_publication_figure(
fig=fig,
filename=filename,
formats=specs['formats'],
dpi=specs['dpi']
)
def check_figure_size(fig: plt.Figure, journal: str = 'nature') -> dict:
"""
Check if figure dimensions are appropriate for journal requirements.
Parameters
----------
fig : matplotlib.figure.Figure
The figure to check
journal : str, default 'nature'
Journal name
Returns
-------
dict
Dictionary with figure dimensions and compliance status
Examples
--------
>>> fig = plt.figure(figsize=(3.5, 3))
>>> info = check_figure_size(fig, journal='nature')
>>> print(info)
"""
journal = journal.lower()
# Get figure dimensions in inches
width_inches, height_inches = fig.get_size_inches()
width_mm = width_inches * 25.4
height_mm = height_inches * 25.4
# Journal specifications (widths in mm)
specs = {
'nature': {'single': 89, 'double': 183, 'max_height': 247},
'science': {'single': 55, 'double': 175, 'max_height': 233},
'cell': {'single': 85, 'double': 178, 'max_height': 230},
'plos': {'single': 83, 'double': 173, 'max_height': 233},
'acs': {'single': 82.5, 'double': 178, 'max_height': 247},
}
if journal not in specs:
journal_spec = specs['nature']
print(f"Warning: Journal '{journal}' not found, using Nature specifications")
else:
journal_spec = specs[journal]
# Determine column type
column_type = None
width_ok = False
tolerance = 5 # mm tolerance
if abs(width_mm - journal_spec['single']) < tolerance:
column_type = 'single'
width_ok = True
elif abs(width_mm - journal_spec['double']) < tolerance:
column_type = 'double'
width_ok = True
height_ok = height_mm <= journal_spec['max_height']
result = {
'width_inches': width_inches,
'height_inches': height_inches,
'width_mm': width_mm,
'height_mm': height_mm,
'journal': journal,
'column_type': column_type,
'width_ok': width_ok,
'height_ok': height_ok,
'compliant': width_ok and height_ok,
'recommendations': {
'single_column_mm': journal_spec['single'],
'double_column_mm': journal_spec['double'],
'max_height_mm': journal_spec['max_height'],
}
}
# Print report
print(f"\n{'='*60}")
print(f"Figure Size Check for {journal.upper()}")
print(f"{'='*60}")
print(f"Current size: {width_mm:.1f} Γ {height_mm:.1f} mm")
print(f" ({width_inches:.2f} Γ {height_inches:.2f} inches)")
print(f"\n{journal.upper()} specifications:")
print(f" Single column: {journal_spec['single']} mm")
print(f" Double column: {journal_spec['double']} mm")
print(f" Max height: {journal_spec['max_height']} mm")
print(f"\nCompliance:")
print(f" Width: {'β OK' if width_ok else 'β Non-standard'} ({column_type or 'custom'})")
print(f" Height: {'β OK' if height_ok else 'β Too tall'}")
print(f" Overall: {'β COMPLIANT' if result['compliant'] else 'β NEEDS ADJUSTMENT'}")
print(f"{'='*60}\n")
return result
def verify_font_embedding(pdf_path: Union[str, Path]) -> bool:
"""
Check if fonts are embedded in a PDF file.
Note: This requires PyPDF2 or a similar library to be installed.
Parameters
----------
pdf_path : str or Path
Path to PDF file
Returns
-------
bool
True if fonts are embedded, False otherwise
"""
try:
from PyPDF2 import PdfReader
except ImportError:
print("Warning: PyPDF2 not installed. Cannot verify font embedding.")
print("Install with: pip install PyPDF2")
return None
pdf_path = Path(pdf_path)
try:
reader = PdfReader(pdf_path)
# This is a simplified check; full verification is complex
print(f"PDF has {len(reader.pages)} page(s)")
print("Note: Full font embedding verification requires detailed PDF inspection.")
return True
except Exception as e:
print(f"Error reading PDF: {e}")
return False
if __name__ == "__main__":
# Example usage
import numpy as np
# Create example figure
fig, ax = plt.subplots(figsize=(3.5, 2.5))
x = np.linspace(0, 10, 100)
ax.plot(x, np.sin(x), label='sin(x)')
ax.plot(x, np.cos(x), label='cos(x)')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# Check size
check_figure_size(fig, journal='nature')
# Save in multiple formats
print("\nSaving figure...")
save_publication_figure(fig, 'example_figure', formats=['pdf', 'png'], dpi=300)
# Save with journal-specific requirements
print("\nSaving for Nature...")
save_for_journal(fig, 'example_figure_nature', journal='nature', figure_type='line_art')
plt.close(fig)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-visualization/scripts/figure_export.py",
"license": "MIT License",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scientific-visualization/scripts/style_presets.py | #!/usr/bin/env python3
"""
Matplotlib Style Presets for Publication-Ready Scientific Figures
This module provides pre-configured matplotlib styles optimized for
different journals and use cases.
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
from typing import Optional, Dict, Any
# Okabe-Ito colorblind-friendly palette
OKABE_ITO_COLORS = [
'#E69F00', # Orange
'#56B4E9', # Sky Blue
'#009E73', # Bluish Green
'#F0E442', # Yellow
'#0072B2', # Blue
'#D55E00', # Vermillion
'#CC79A7', # Reddish Purple
'#000000' # Black
]
# Paul Tol palettes
TOL_BRIGHT = ['#4477AA', '#EE6677', '#228833', '#CCBB44', '#66CCEE', '#AA3377', '#BBBBBB']
TOL_MUTED = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77', '#CC6677', '#882255', '#AA4499']
TOL_HIGH_CONTRAST = ['#004488', '#DDAA33', '#BB5566']
# Wong palette
WONG_COLORS = ['#000000', '#E69F00', '#56B4E9', '#009E73', '#F0E442', '#0072B2', '#D55E00', '#CC79A7']
def get_base_style() -> Dict[str, Any]:
"""
Get base publication-quality style settings.
Returns
-------
dict
Dictionary of matplotlib rcParams
"""
return {
# Figure
'figure.dpi': 100, # Display DPI (changed on save)
'figure.facecolor': 'white',
'figure.autolayout': False,
'figure.constrained_layout.use': True,
# Font
'font.size': 8,
'font.family': 'sans-serif',
'font.sans-serif': ['Arial', 'Helvetica', 'DejaVu Sans'],
# Axes
'axes.linewidth': 0.5,
'axes.labelsize': 9,
'axes.titlesize': 9,
'axes.labelweight': 'normal',
'axes.spines.top': False,
'axes.spines.right': False,
'axes.spines.left': True,
'axes.spines.bottom': True,
'axes.edgecolor': 'black',
'axes.labelcolor': 'black',
'axes.axisbelow': True,
'axes.prop_cycle': mpl.cycler(color=OKABE_ITO_COLORS),
# Grid
'axes.grid': False,
# Ticks
'xtick.major.size': 3,
'xtick.minor.size': 2,
'xtick.major.width': 0.5,
'xtick.minor.width': 0.5,
'xtick.labelsize': 7,
'xtick.direction': 'out',
'ytick.major.size': 3,
'ytick.minor.size': 2,
'ytick.major.width': 0.5,
'ytick.minor.width': 0.5,
'ytick.labelsize': 7,
'ytick.direction': 'out',
# Lines
'lines.linewidth': 1.5,
'lines.markersize': 4,
'lines.markeredgewidth': 0.5,
# Legend
'legend.fontsize': 7,
'legend.frameon': False,
'legend.loc': 'best',
# Savefig
'savefig.dpi': 300,
'savefig.format': 'pdf',
'savefig.bbox': 'tight',
'savefig.pad_inches': 0.05,
'savefig.transparent': False,
'savefig.facecolor': 'white',
# Image
'image.cmap': 'viridis',
'image.aspect': 'auto',
}
def apply_publication_style(style_name: str = 'default') -> None:
"""
Apply a pre-configured publication style.
Parameters
----------
style_name : str, default 'default'
Name of the style to apply. Options:
- 'default': General publication style
- 'nature': Nature journal style
- 'science': Science journal style
- 'cell': Cell Press style
- 'minimal': Minimal clean style
- 'presentation': Larger fonts for presentations
Examples
--------
>>> apply_publication_style('nature')
>>> fig, ax = plt.subplots()
>>> ax.plot([1, 2, 3], [1, 4, 9])
"""
base_style = get_base_style()
# Style-specific modifications
if style_name == 'nature':
base_style.update({
'font.size': 7,
'axes.labelsize': 8,
'axes.titlesize': 8,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'legend.fontsize': 6,
'savefig.dpi': 600,
})
elif style_name == 'science':
base_style.update({
'font.size': 7,
'axes.labelsize': 8,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'legend.fontsize': 6,
'savefig.dpi': 600,
})
elif style_name == 'cell':
base_style.update({
'font.size': 8,
'axes.labelsize': 9,
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'legend.fontsize': 7,
'savefig.dpi': 600,
})
elif style_name == 'minimal':
base_style.update({
'axes.linewidth': 0.8,
'xtick.major.width': 0.8,
'ytick.major.width': 0.8,
'lines.linewidth': 2,
})
elif style_name == 'presentation':
base_style.update({
'font.size': 14,
'axes.labelsize': 16,
'axes.titlesize': 18,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'legend.fontsize': 12,
'axes.linewidth': 1.5,
'lines.linewidth': 2.5,
'lines.markersize': 8,
})
elif style_name != 'default':
print(f"Warning: Style '{style_name}' not recognized. Using 'default'.")
# Apply the style
plt.rcParams.update(base_style)
print(f"β Applied '{style_name}' publication style")
def set_color_palette(palette_name: str = 'okabe_ito') -> None:
"""
Set a colorblind-friendly color palette.
Parameters
----------
palette_name : str, default 'okabe_ito'
Name of the palette. Options:
- 'okabe_ito': Okabe-Ito palette (8 colors)
- 'wong': Wong palette (8 colors)
- 'tol_bright': Paul Tol bright palette (7 colors)
- 'tol_muted': Paul Tol muted palette (9 colors)
- 'tol_high_contrast': Paul Tol high contrast (3 colors)
Examples
--------
>>> set_color_palette('tol_muted')
>>> fig, ax = plt.subplots()
>>> for i in range(5):
... ax.plot([1, 2, 3], [i, i+1, i+2])
"""
palettes = {
'okabe_ito': OKABE_ITO_COLORS,
'wong': WONG_COLORS,
'tol_bright': TOL_BRIGHT,
'tol_muted': TOL_MUTED,
'tol_high_contrast': TOL_HIGH_CONTRAST,
}
if palette_name not in palettes:
available = ', '.join(palettes.keys())
print(f"Warning: Palette '{palette_name}' not found. Available: {available}")
palette_name = 'okabe_ito'
colors = palettes[palette_name]
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=colors)
print(f"β Applied '{palette_name}' color palette ({len(colors)} colors)")
def configure_for_journal(journal: str, figure_width: str = 'single') -> None:
"""
Configure matplotlib for a specific journal.
Parameters
----------
journal : str
Journal name: 'nature', 'science', 'cell', 'plos', 'acs', 'ieee'
figure_width : str, default 'single'
Figure width: 'single' or 'double' column
Examples
--------
>>> configure_for_journal('nature', figure_width='single')
>>> fig, ax = plt.subplots() # Will have correct size for Nature
"""
journal = journal.lower()
# Journal specifications
journal_configs = {
'nature': {
'single_width': 89, # mm
'double_width': 183,
'style': 'nature',
},
'science': {
'single_width': 55,
'double_width': 175,
'style': 'science',
},
'cell': {
'single_width': 85,
'double_width': 178,
'style': 'cell',
},
'plos': {
'single_width': 83,
'double_width': 173,
'style': 'default',
},
'acs': {
'single_width': 82.5,
'double_width': 178,
'style': 'default',
},
'ieee': {
'single_width': 89,
'double_width': 182,
'style': 'default',
},
}
if journal not in journal_configs:
available = ', '.join(journal_configs.keys())
raise ValueError(f"Journal '{journal}' not recognized. Available: {available}")
config = journal_configs[journal]
# Apply style
apply_publication_style(config['style'])
# Set default figure size
width_mm = config['single_width'] if figure_width == 'single' else config['double_width']
width_inches = width_mm / 25.4
plt.rcParams['figure.figsize'] = (width_inches, width_inches * 0.75) # 4:3 aspect ratio
print(f"β Configured for {journal.upper()} ({figure_width} column: {width_mm} mm)")
def create_style_template(output_file: str = 'publication.mplstyle') -> None:
"""
Create a matplotlib style file that can be used with plt.style.use().
Parameters
----------
output_file : str, default 'publication.mplstyle'
Output filename for the style file
Examples
--------
>>> create_style_template('my_style.mplstyle')
>>> plt.style.use('my_style.mplstyle')
"""
style = get_base_style()
with open(output_file, 'w') as f:
f.write("# Publication-quality matplotlib style\n")
f.write("# Usage: plt.style.use('publication.mplstyle')\n\n")
for key, value in style.items():
if isinstance(value, mpl.cycler):
# Handle cycler specially
colors = [c['color'] for c in value]
f.write(f"axes.prop_cycle : cycler('color', {colors})\n")
else:
f.write(f"{key} : {value}\n")
print(f"β Created style template: {output_file}")
print(f" Use with: plt.style.use('{output_file}')")
def show_color_palettes() -> None:
"""
Display available color palettes for visual inspection.
"""
palettes = {
'Okabe-Ito': OKABE_ITO_COLORS,
'Wong': WONG_COLORS,
'Tol Bright': TOL_BRIGHT,
'Tol Muted': TOL_MUTED,
'Tol High Contrast': TOL_HIGH_CONTRAST,
}
fig, axes = plt.subplots(len(palettes), 1, figsize=(8, len(palettes) * 0.5))
for ax, (name, colors) in zip(axes, palettes.items()):
ax.set_xlim(0, len(colors))
ax.set_ylim(0, 1)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel(name, fontsize=10)
for i, color in enumerate(colors):
ax.add_patch(plt.Rectangle((i, 0), 1, 1, facecolor=color, edgecolor='black', linewidth=0.5))
# Add hex code
ax.text(i + 0.5, 0.5, color, ha='center', va='center',
fontsize=7, color='white' if i >= len(colors) - 1 else 'black')
fig.suptitle('Colorblind-Friendly Palettes', fontsize=12, fontweight='bold')
plt.tight_layout()
plt.show()
def reset_to_default() -> None:
"""
Reset matplotlib to default settings.
"""
mpl.rcdefaults()
print("β Reset to matplotlib defaults")
if __name__ == "__main__":
print("Matplotlib Style Presets for Scientific Figures")
print("=" * 50)
# Show available styles
print("\nAvailable publication styles:")
print(" - default")
print(" - nature")
print(" - science")
print(" - cell")
print(" - minimal")
print(" - presentation")
print("\nAvailable color palettes:")
print(" - okabe_ito (recommended)")
print(" - wong")
print(" - tol_bright")
print(" - tol_muted")
print(" - tol_high_contrast")
print("\nExample usage:")
print(" from style_presets import apply_publication_style, set_color_palette")
print(" apply_publication_style('nature')")
print(" set_color_palette('okabe_ito')")
# Create example figure
print("\nGenerating example figure with 'default' style...")
apply_publication_style('default')
fig, ax = plt.subplots(figsize=(3.5, 2.5))
for i in range(5):
ax.plot([1, 2, 3, 4], [i, i+1, i+0.5, i+2], marker='o', label=f'Series {i+1}')
ax.set_xlabel('Time (hours)')
ax.set_ylabel('Response (AU)')
ax.legend()
fig.suptitle('Example with Publication Style')
plt.tight_layout()
plt.show()
# Show color palettes
print("\nDisplaying color palettes...")
show_color_palettes()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scientific-visualization/scripts/style_presets.py",
"license": "MIT License",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scikit-learn/scripts/classification_pipeline.py | """
Complete classification pipeline example with preprocessing, model training,
hyperparameter tuning, and evaluation.
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
classification_report, confusion_matrix, roc_auc_score,
accuracy_score, precision_score, recall_score, f1_score
)
import warnings
warnings.filterwarnings('ignore')
def create_preprocessing_pipeline(numeric_features, categorical_features):
"""
Create a preprocessing pipeline for mixed data types.
Parameters:
-----------
numeric_features : list
List of numeric feature column names
categorical_features : list
List of categorical feature column names
Returns:
--------
ColumnTransformer
Preprocessing pipeline
"""
# Numeric preprocessing
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())
])
# Categorical preprocessing
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=False))
])
# Combine transformers
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
]
)
return preprocessor
def train_and_evaluate_model(X, y, numeric_features, categorical_features,
test_size=0.2, random_state=42):
"""
Complete pipeline: preprocess, train, tune, and evaluate a classifier.
Parameters:
-----------
X : DataFrame or array
Feature matrix
y : Series or array
Target variable
numeric_features : list
List of numeric feature names
categorical_features : list
List of categorical feature names
test_size : float
Proportion of data for testing
random_state : int
Random seed
Returns:
--------
dict
Dictionary containing trained model, predictions, and metrics
"""
# Split data with stratification
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, stratify=y, random_state=random_state
)
print(f"Training set size: {len(X_train)}")
print(f"Test set size: {len(X_test)}")
print(f"Class distribution in training: {pd.Series(y_train).value_counts().to_dict()}")
# Create preprocessor
preprocessor = create_preprocessing_pipeline(numeric_features, categorical_features)
# Define models to compare
models = {
'Logistic Regression': Pipeline([
('preprocessor', preprocessor),
('classifier', LogisticRegression(max_iter=1000, random_state=random_state))
]),
'Random Forest': Pipeline([
('preprocessor', preprocessor),
('classifier', RandomForestClassifier(n_estimators=100, random_state=random_state))
]),
'Gradient Boosting': Pipeline([
('preprocessor', preprocessor),
('classifier', GradientBoostingClassifier(n_estimators=100, random_state=random_state))
])
}
# Compare models using cross-validation
print("\n" + "="*60)
print("Model Comparison (5-Fold Cross-Validation)")
print("="*60)
cv_results = {}
for name, model in models.items():
scores = cross_val_score(model, X_train, y_train, cv=5, scoring='accuracy')
cv_results[name] = scores.mean()
print(f"{name:20s}: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
# Select best model based on CV
best_model_name = max(cv_results, key=cv_results.get)
best_model = models[best_model_name]
print(f"\nBest model: {best_model_name}")
# Hyperparameter tuning for best model
if best_model_name == 'Random Forest':
param_grid = {
'classifier__n_estimators': [100, 200],
'classifier__max_depth': [10, 20, None],
'classifier__min_samples_split': [2, 5]
}
elif best_model_name == 'Gradient Boosting':
param_grid = {
'classifier__n_estimators': [100, 200],
'classifier__learning_rate': [0.01, 0.1],
'classifier__max_depth': [3, 5]
}
else: # Logistic Regression
param_grid = {
'classifier__C': [0.1, 1.0, 10.0],
'classifier__penalty': ['l2']
}
print("\n" + "="*60)
print("Hyperparameter Tuning")
print("="*60)
grid_search = GridSearchCV(
best_model, param_grid, cv=5, scoring='accuracy',
n_jobs=-1, verbose=0
)
grid_search.fit(X_train, y_train)
print(f"Best parameters: {grid_search.best_params_}")
print(f"Best CV score: {grid_search.best_score_:.4f}")
# Evaluate on test set
tuned_model = grid_search.best_estimator_
y_pred = tuned_model.predict(X_test)
y_pred_proba = tuned_model.predict_proba(X_test)
print("\n" + "="*60)
print("Test Set Evaluation")
print("="*60)
# Calculate metrics
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
print(f"Accuracy: {accuracy:.4f}")
print(f"Precision: {precision:.4f}")
print(f"Recall: {recall:.4f}")
print(f"F1-Score: {f1:.4f}")
# ROC AUC (if binary classification)
if len(np.unique(y)) == 2:
roc_auc = roc_auc_score(y_test, y_pred_proba[:, 1])
print(f"ROC AUC: {roc_auc:.4f}")
print("\n" + "="*60)
print("Classification Report")
print("="*60)
print(classification_report(y_test, y_pred))
print("\n" + "="*60)
print("Confusion Matrix")
print("="*60)
print(confusion_matrix(y_test, y_pred))
# Feature importance (if available)
if hasattr(tuned_model.named_steps['classifier'], 'feature_importances_'):
print("\n" + "="*60)
print("Top 10 Most Important Features")
print("="*60)
feature_names = tuned_model.named_steps['preprocessor'].get_feature_names_out()
importances = tuned_model.named_steps['classifier'].feature_importances_
feature_importance_df = pd.DataFrame({
'feature': feature_names,
'importance': importances
}).sort_values('importance', ascending=False).head(10)
print(feature_importance_df.to_string(index=False))
return {
'model': tuned_model,
'y_test': y_test,
'y_pred': y_pred,
'y_pred_proba': y_pred_proba,
'metrics': {
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': f1
}
}
# Example usage
if __name__ == "__main__":
# Load example dataset
from sklearn.datasets import load_breast_cancer
# Load data
data = load_breast_cancer()
X = pd.DataFrame(data.data, columns=data.feature_names)
y = data.target
# For demonstration, treat all features as numeric
numeric_features = X.columns.tolist()
categorical_features = []
print("="*60)
print("Classification Pipeline Example")
print("Dataset: Breast Cancer Wisconsin")
print("="*60)
# Run complete pipeline
results = train_and_evaluate_model(
X, y, numeric_features, categorical_features,
test_size=0.2, random_state=42
)
print("\n" + "="*60)
print("Pipeline Complete!")
print("="*60)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scikit-learn/scripts/classification_pipeline.py",
"license": "MIT License",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/scikit-learn/scripts/clustering_analysis.py | """
Clustering analysis example with multiple algorithms, evaluation, and visualization.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
from sklearn.mixture import GaussianMixture
from sklearn.metrics import (
silhouette_score, calinski_harabasz_score, davies_bouldin_score
)
import warnings
warnings.filterwarnings('ignore')
def preprocess_for_clustering(X, scale=True, pca_components=None):
"""
Preprocess data for clustering.
Parameters:
-----------
X : array-like
Feature matrix
scale : bool
Whether to standardize features
pca_components : int or None
Number of PCA components (None to skip PCA)
Returns:
--------
array
Preprocessed data
"""
X_processed = X.copy()
if scale:
scaler = StandardScaler()
X_processed = scaler.fit_transform(X_processed)
if pca_components is not None:
pca = PCA(n_components=pca_components)
X_processed = pca.fit_transform(X_processed)
print(f"PCA: Explained variance ratio = {pca.explained_variance_ratio_.sum():.3f}")
return X_processed
def find_optimal_k_kmeans(X, k_range=range(2, 11)):
"""
Find optimal K for K-Means using elbow method and silhouette score.
Parameters:
-----------
X : array-like
Feature matrix (should be scaled)
k_range : range
Range of K values to test
Returns:
--------
dict
Dictionary with inertia and silhouette scores for each K
"""
inertias = []
silhouette_scores = []
for k in k_range:
kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
labels = kmeans.fit_predict(X)
inertias.append(kmeans.inertia_)
silhouette_scores.append(silhouette_score(X, labels))
# Plot results
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
# Elbow plot
ax1.plot(k_range, inertias, 'bo-')
ax1.set_xlabel('Number of clusters (K)')
ax1.set_ylabel('Inertia')
ax1.set_title('Elbow Method')
ax1.grid(True)
# Silhouette plot
ax2.plot(k_range, silhouette_scores, 'ro-')
ax2.set_xlabel('Number of clusters (K)')
ax2.set_ylabel('Silhouette Score')
ax2.set_title('Silhouette Analysis')
ax2.grid(True)
plt.tight_layout()
plt.savefig('clustering_optimization.png', dpi=300, bbox_inches='tight')
print("Saved: clustering_optimization.png")
plt.close()
# Find best K based on silhouette score
best_k = k_range[np.argmax(silhouette_scores)]
print(f"\nRecommended K based on silhouette score: {best_k}")
return {
'k_values': list(k_range),
'inertias': inertias,
'silhouette_scores': silhouette_scores,
'best_k': best_k
}
def compare_clustering_algorithms(X, n_clusters=3):
"""
Compare different clustering algorithms.
Parameters:
-----------
X : array-like
Feature matrix (should be scaled)
n_clusters : int
Number of clusters
Returns:
--------
dict
Dictionary with results for each algorithm
"""
print("="*60)
print(f"Comparing Clustering Algorithms (n_clusters={n_clusters})")
print("="*60)
algorithms = {
'K-Means': KMeans(n_clusters=n_clusters, random_state=42, n_init=10),
'Agglomerative': AgglomerativeClustering(n_clusters=n_clusters, linkage='ward'),
'Gaussian Mixture': GaussianMixture(n_components=n_clusters, random_state=42)
}
# DBSCAN doesn't require n_clusters
# We'll add it separately
dbscan = DBSCAN(eps=0.5, min_samples=5)
dbscan_labels = dbscan.fit_predict(X)
results = {}
for name, algorithm in algorithms.items():
labels = algorithm.fit_predict(X)
# Calculate metrics
silhouette = silhouette_score(X, labels)
calinski = calinski_harabasz_score(X, labels)
davies = davies_bouldin_score(X, labels)
results[name] = {
'labels': labels,
'n_clusters': n_clusters,
'silhouette': silhouette,
'calinski_harabasz': calinski,
'davies_bouldin': davies
}
print(f"\n{name}:")
print(f" Silhouette Score: {silhouette:.4f} (higher is better)")
print(f" Calinski-Harabasz: {calinski:.4f} (higher is better)")
print(f" Davies-Bouldin: {davies:.4f} (lower is better)")
# DBSCAN results
n_clusters_dbscan = len(set(dbscan_labels)) - (1 if -1 in dbscan_labels else 0)
n_noise = list(dbscan_labels).count(-1)
if n_clusters_dbscan > 1:
# Only calculate metrics if we have multiple clusters
mask = dbscan_labels != -1 # Exclude noise
if mask.sum() > 0:
silhouette = silhouette_score(X[mask], dbscan_labels[mask])
calinski = calinski_harabasz_score(X[mask], dbscan_labels[mask])
davies = davies_bouldin_score(X[mask], dbscan_labels[mask])
results['DBSCAN'] = {
'labels': dbscan_labels,
'n_clusters': n_clusters_dbscan,
'n_noise': n_noise,
'silhouette': silhouette,
'calinski_harabasz': calinski,
'davies_bouldin': davies
}
print(f"\nDBSCAN:")
print(f" Clusters found: {n_clusters_dbscan}")
print(f" Noise points: {n_noise}")
print(f" Silhouette Score: {silhouette:.4f} (higher is better)")
print(f" Calinski-Harabasz: {calinski:.4f} (higher is better)")
print(f" Davies-Bouldin: {davies:.4f} (lower is better)")
else:
print(f"\nDBSCAN:")
print(f" Clusters found: {n_clusters_dbscan}")
print(f" Noise points: {n_noise}")
print(" Note: Insufficient clusters for metric calculation")
return results
def visualize_clusters(X, results, true_labels=None):
"""
Visualize clustering results using PCA for 2D projection.
Parameters:
-----------
X : array-like
Feature matrix
results : dict
Dictionary with clustering results
true_labels : array-like or None
True labels (if available) for comparison
"""
# Reduce to 2D using PCA
pca = PCA(n_components=2)
X_2d = pca.fit_transform(X)
# Determine number of subplots
n_plots = len(results)
if true_labels is not None:
n_plots += 1
n_cols = min(3, n_plots)
n_rows = (n_plots + n_cols - 1) // n_cols
fig, axes = plt.subplots(n_rows, n_cols, figsize=(5*n_cols, 4*n_rows))
if n_plots == 1:
axes = np.array([axes])
axes = axes.flatten()
plot_idx = 0
# Plot true labels if available
if true_labels is not None:
ax = axes[plot_idx]
scatter = ax.scatter(X_2d[:, 0], X_2d[:, 1], c=true_labels, cmap='viridis', alpha=0.6)
ax.set_title('True Labels')
ax.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%})')
ax.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%})')
plt.colorbar(scatter, ax=ax)
plot_idx += 1
# Plot clustering results
for name, result in results.items():
ax = axes[plot_idx]
labels = result['labels']
scatter = ax.scatter(X_2d[:, 0], X_2d[:, 1], c=labels, cmap='viridis', alpha=0.6)
# Highlight noise points for DBSCAN
if name == 'DBSCAN' and -1 in labels:
noise_mask = labels == -1
ax.scatter(X_2d[noise_mask, 0], X_2d[noise_mask, 1],
c='red', marker='x', s=100, label='Noise', alpha=0.8)
ax.legend()
title = f"{name} (K={result['n_clusters']})"
if 'silhouette' in result:
title += f"\nSilhouette: {result['silhouette']:.3f}"
ax.set_title(title)
ax.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%})')
ax.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%})')
plt.colorbar(scatter, ax=ax)
plot_idx += 1
# Hide unused subplots
for idx in range(plot_idx, len(axes)):
axes[idx].axis('off')
plt.tight_layout()
plt.savefig('clustering_results.png', dpi=300, bbox_inches='tight')
print("\nSaved: clustering_results.png")
plt.close()
def complete_clustering_analysis(X, true_labels=None, scale=True,
find_k=True, k_range=range(2, 11), n_clusters=3):
"""
Complete clustering analysis workflow.
Parameters:
-----------
X : array-like
Feature matrix
true_labels : array-like or None
True labels (for comparison only, not used in clustering)
scale : bool
Whether to scale features
find_k : bool
Whether to search for optimal K
k_range : range
Range of K values to test
n_clusters : int
Number of clusters to use in comparison
Returns:
--------
dict
Dictionary with all analysis results
"""
print("="*60)
print("Clustering Analysis")
print("="*60)
print(f"Data shape: {X.shape}")
# Preprocess data
X_processed = preprocess_for_clustering(X, scale=scale)
# Find optimal K if requested
optimization_results = None
if find_k:
print("\n" + "="*60)
print("Finding Optimal Number of Clusters")
print("="*60)
optimization_results = find_optimal_k_kmeans(X_processed, k_range=k_range)
# Use recommended K
if optimization_results:
n_clusters = optimization_results['best_k']
# Compare clustering algorithms
comparison_results = compare_clustering_algorithms(X_processed, n_clusters=n_clusters)
# Visualize results
print("\n" + "="*60)
print("Visualizing Results")
print("="*60)
visualize_clusters(X_processed, comparison_results, true_labels=true_labels)
return {
'X_processed': X_processed,
'optimization': optimization_results,
'comparison': comparison_results
}
# Example usage
if __name__ == "__main__":
from sklearn.datasets import load_iris, make_blobs
print("="*60)
print("Example 1: Iris Dataset")
print("="*60)
# Load Iris dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
results_iris = complete_clustering_analysis(
X_iris,
true_labels=y_iris,
scale=True,
find_k=True,
k_range=range(2, 8),
n_clusters=3
)
print("\n" + "="*60)
print("Example 2: Synthetic Dataset with Noise")
print("="*60)
# Create synthetic dataset
X_synth, y_synth = make_blobs(
n_samples=500, n_features=2, centers=4,
cluster_std=0.5, random_state=42
)
# Add noise points
noise = np.random.randn(50, 2) * 3
X_synth = np.vstack([X_synth, noise])
y_synth_with_noise = np.concatenate([y_synth, np.full(50, -1)])
results_synth = complete_clustering_analysis(
X_synth,
true_labels=y_synth_with_noise,
scale=True,
find_k=True,
k_range=range(2, 8),
n_clusters=4
)
print("\n" + "="*60)
print("Analysis Complete!")
print("="*60)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/scikit-learn/scripts/clustering_analysis.py",
"license": "MIT License",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/simpy/scripts/basic_simulation_template.py | #!/usr/bin/env python3
"""
Basic SimPy Simulation Template
This template provides a starting point for building SimPy simulations.
Customize the process functions and parameters for your specific use case.
"""
import simpy
import random
class SimulationConfig:
"""Configuration parameters for the simulation."""
def __init__(self):
self.random_seed = 42
self.num_resources = 2
self.num_processes = 10
self.sim_time = 100
self.arrival_rate = 5.0 # Average time between arrivals
self.service_time_mean = 3.0 # Average service time
self.service_time_std = 1.0 # Service time standard deviation
class SimulationStats:
"""Collect and report simulation statistics."""
def __init__(self):
self.arrival_times = []
self.service_start_times = []
self.departure_times = []
self.wait_times = []
self.service_times = []
def record_arrival(self, time):
self.arrival_times.append(time)
def record_service_start(self, time):
self.service_start_times.append(time)
def record_departure(self, time):
self.departure_times.append(time)
def record_wait_time(self, wait_time):
self.wait_times.append(wait_time)
def record_service_time(self, service_time):
self.service_times.append(service_time)
def report(self):
print("\n" + "=" * 50)
print("SIMULATION STATISTICS")
print("=" * 50)
if self.wait_times:
print(f"Total customers: {len(self.wait_times)}")
print(f"Average wait time: {sum(self.wait_times) / len(self.wait_times):.2f}")
print(f"Max wait time: {max(self.wait_times):.2f}")
print(f"Min wait time: {min(self.wait_times):.2f}")
if self.service_times:
print(f"Average service time: {sum(self.service_times) / len(self.service_times):.2f}")
if self.arrival_times and self.departure_times:
throughput = len(self.departure_times) / max(self.departure_times)
print(f"Throughput: {throughput:.2f} customers/time unit")
print("=" * 50)
def customer_process(env, name, resource, stats, config):
"""
Simulate a customer process.
Args:
env: SimPy environment
name: Customer identifier
resource: Shared resource (e.g., server, machine)
stats: Statistics collector
config: Simulation configuration
"""
# Record arrival
arrival_time = env.now
stats.record_arrival(arrival_time)
print(f"{name} arrived at {arrival_time:.2f}")
# Request resource
with resource.request() as request:
yield request
# Record service start and calculate wait time
service_start = env.now
wait_time = service_start - arrival_time
stats.record_service_start(service_start)
stats.record_wait_time(wait_time)
print(f"{name} started service at {service_start:.2f} (waited {wait_time:.2f})")
# Service time (normally distributed)
service_time = max(0.1, random.gauss(
config.service_time_mean,
config.service_time_std
))
stats.record_service_time(service_time)
yield env.timeout(service_time)
# Record departure
departure_time = env.now
stats.record_departure(departure_time)
print(f"{name} departed at {departure_time:.2f}")
def customer_generator(env, resource, stats, config):
"""
Generate customers arriving at random intervals.
Args:
env: SimPy environment
resource: Shared resource
stats: Statistics collector
config: Simulation configuration
"""
customer_count = 0
while True:
# Wait for next customer arrival (exponential distribution)
inter_arrival_time = random.expovariate(1.0 / config.arrival_rate)
yield env.timeout(inter_arrival_time)
# Create new customer process
customer_count += 1
customer_name = f"Customer {customer_count}"
env.process(customer_process(env, customer_name, resource, stats, config))
def run_simulation(config):
"""
Run the simulation with given configuration.
Args:
config: SimulationConfig object with simulation parameters
Returns:
SimulationStats object with collected statistics
"""
# Set random seed for reproducibility
random.seed(config.random_seed)
# Create environment
env = simpy.Environment()
# Create shared resource
resource = simpy.Resource(env, capacity=config.num_resources)
# Create statistics collector
stats = SimulationStats()
# Start customer generator
env.process(customer_generator(env, resource, stats, config))
# Run simulation
print(f"Starting simulation for {config.sim_time} time units...")
print(f"Resources: {config.num_resources}")
print(f"Average arrival rate: {config.arrival_rate:.2f}")
print(f"Average service time: {config.service_time_mean:.2f}")
print("-" * 50)
env.run(until=config.sim_time)
return stats
def main():
"""Main function to run the simulation."""
# Create configuration
config = SimulationConfig()
# Customize configuration if needed
config.num_resources = 2
config.sim_time = 50
config.arrival_rate = 2.0
config.service_time_mean = 3.0
# Run simulation
stats = run_simulation(config)
# Report statistics
stats.report()
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/simpy/scripts/basic_simulation_template.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/simpy/scripts/resource_monitor.py | #!/usr/bin/env python3
"""
SimPy Resource Monitoring Utilities
This module provides reusable classes and functions for monitoring
SimPy resources during simulation. Includes utilities for tracking
queue lengths, utilization, wait times, and generating reports.
"""
import simpy
from collections import defaultdict
from typing import List, Tuple, Dict, Any
class ResourceMonitor:
"""
Monitor resource usage with detailed statistics tracking.
Tracks:
- Queue lengths over time
- Resource utilization
- Wait times for requests
- Request and release events
"""
def __init__(self, env: simpy.Environment, resource: simpy.Resource, name: str = "Resource"):
"""
Initialize the resource monitor.
Args:
env: SimPy environment
resource: Resource to monitor
name: Name for the resource (for reporting)
"""
self.env = env
self.resource = resource
self.name = name
# Data storage
self.queue_data: List[Tuple[float, int]] = [(0, 0)]
self.utilization_data: List[Tuple[float, float]] = [(0, 0.0)]
self.request_times: Dict[Any, float] = {}
self.wait_times: List[float] = []
self.events: List[Tuple[float, str, Dict]] = []
# Patch the resource
self._patch_resource()
def _patch_resource(self):
"""Patch resource methods to intercept requests and releases."""
original_request = self.resource.request
original_release = self.resource.release
def monitored_request(*args, **kwargs):
req = original_request(*args, **kwargs)
# Record request event
queue_length = len(self.resource.queue)
utilization = self.resource.count / self.resource.capacity
self.queue_data.append((self.env.now, queue_length))
self.utilization_data.append((self.env.now, utilization))
self.events.append((self.env.now, 'request', {
'queue_length': queue_length,
'utilization': utilization
}))
# Store request time for wait time calculation
self.request_times[req] = self.env.now
# Add callback to record when request is granted
def on_granted(event):
if req in self.request_times:
wait_time = self.env.now - self.request_times[req]
self.wait_times.append(wait_time)
del self.request_times[req]
req.callbacks.append(on_granted)
return req
def monitored_release(*args, **kwargs):
result = original_release(*args, **kwargs)
# Record release event
queue_length = len(self.resource.queue)
utilization = self.resource.count / self.resource.capacity
self.queue_data.append((self.env.now, queue_length))
self.utilization_data.append((self.env.now, utilization))
self.events.append((self.env.now, 'release', {
'queue_length': queue_length,
'utilization': utilization
}))
return result
self.resource.request = monitored_request
self.resource.release = monitored_release
def average_queue_length(self) -> float:
"""Calculate time-weighted average queue length."""
if len(self.queue_data) < 2:
return 0.0
total_time = 0.0
weighted_sum = 0.0
for i in range(len(self.queue_data) - 1):
time1, length1 = self.queue_data[i]
time2, length2 = self.queue_data[i + 1]
duration = time2 - time1
total_time += duration
weighted_sum += length1 * duration
return weighted_sum / total_time if total_time > 0 else 0.0
def average_utilization(self) -> float:
"""Calculate time-weighted average utilization."""
if len(self.utilization_data) < 2:
return 0.0
total_time = 0.0
weighted_sum = 0.0
for i in range(len(self.utilization_data) - 1):
time1, util1 = self.utilization_data[i]
time2, util2 = self.utilization_data[i + 1]
duration = time2 - time1
total_time += duration
weighted_sum += util1 * duration
return weighted_sum / total_time if total_time > 0 else 0.0
def average_wait_time(self) -> float:
"""Calculate average wait time for requests."""
return sum(self.wait_times) / len(self.wait_times) if self.wait_times else 0.0
def max_queue_length(self) -> int:
"""Get maximum queue length observed."""
return max(length for _, length in self.queue_data) if self.queue_data else 0
def report(self):
"""Print detailed statistics report."""
print(f"\n{'=' * 60}")
print(f"RESOURCE MONITOR REPORT: {self.name}")
print(f"{'=' * 60}")
print(f"Simulation time: 0.00 to {self.env.now:.2f}")
print(f"Capacity: {self.resource.capacity}")
print(f"\nUtilization:")
print(f" Average: {self.average_utilization():.2%}")
print(f" Final: {self.resource.count / self.resource.capacity:.2%}")
print(f"\nQueue Statistics:")
print(f" Average length: {self.average_queue_length():.2f}")
print(f" Max length: {self.max_queue_length()}")
print(f" Final length: {len(self.resource.queue)}")
print(f"\nWait Time Statistics:")
print(f" Total requests: {len(self.wait_times)}")
if self.wait_times:
print(f" Average wait: {self.average_wait_time():.2f}")
print(f" Max wait: {max(self.wait_times):.2f}")
print(f" Min wait: {min(self.wait_times):.2f}")
print(f"\nEvent Summary:")
print(f" Total events: {len(self.events)}")
request_count = sum(1 for _, event_type, _ in self.events if event_type == 'request')
release_count = sum(1 for _, event_type, _ in self.events if event_type == 'release')
print(f" Requests: {request_count}")
print(f" Releases: {release_count}")
print(f"{'=' * 60}")
def export_csv(self, filename: str):
"""
Export monitoring data to CSV file.
Args:
filename: Output CSV filename
"""
import csv
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Time', 'Event', 'Queue Length', 'Utilization'])
for time, event_type, data in self.events:
writer.writerow([
time,
event_type,
data['queue_length'],
data['utilization']
])
print(f"Data exported to {filename}")
class MultiResourceMonitor:
"""Monitor multiple resources simultaneously."""
def __init__(self, env: simpy.Environment):
"""
Initialize multi-resource monitor.
Args:
env: SimPy environment
"""
self.env = env
self.monitors: Dict[str, ResourceMonitor] = {}
def add_resource(self, resource: simpy.Resource, name: str):
"""
Add a resource to monitor.
Args:
resource: SimPy resource to monitor
name: Name for the resource
"""
monitor = ResourceMonitor(self.env, resource, name)
self.monitors[name] = monitor
return monitor
def report_all(self):
"""Generate reports for all monitored resources."""
for name, monitor in self.monitors.items():
monitor.report()
def summary(self):
"""Print summary statistics for all resources."""
print(f"\n{'=' * 60}")
print("MULTI-RESOURCE SUMMARY")
print(f"{'=' * 60}")
print(f"{'Resource':<20} {'Avg Util':<12} {'Avg Queue':<12} {'Avg Wait':<12}")
print(f"{'-' * 20} {'-' * 12} {'-' * 12} {'-' * 12}")
for name, monitor in self.monitors.items():
print(f"{name:<20} {monitor.average_utilization():<12.2%} "
f"{monitor.average_queue_length():<12.2f} "
f"{monitor.average_wait_time():<12.2f}")
print(f"{'=' * 60}")
class ContainerMonitor:
"""Monitor Container resources (for tracking level changes)."""
def __init__(self, env: simpy.Environment, container: simpy.Container, name: str = "Container"):
"""
Initialize container monitor.
Args:
env: SimPy environment
container: Container to monitor
name: Name for the container
"""
self.env = env
self.container = container
self.name = name
self.level_data: List[Tuple[float, float]] = [(0, container.level)]
self._patch_container()
def _patch_container(self):
"""Patch container methods to track level changes."""
original_put = self.container.put
original_get = self.container.get
def monitored_put(amount):
result = original_put(amount)
def on_put(event):
self.level_data.append((self.env.now, self.container.level))
result.callbacks.append(on_put)
return result
def monitored_get(amount):
result = original_get(amount)
def on_get(event):
self.level_data.append((self.env.now, self.container.level))
result.callbacks.append(on_get)
return result
self.container.put = monitored_put
self.container.get = monitored_get
def average_level(self) -> float:
"""Calculate time-weighted average level."""
if len(self.level_data) < 2:
return self.level_data[0][1] if self.level_data else 0.0
total_time = 0.0
weighted_sum = 0.0
for i in range(len(self.level_data) - 1):
time1, level1 = self.level_data[i]
time2, level2 = self.level_data[i + 1]
duration = time2 - time1
total_time += duration
weighted_sum += level1 * duration
return weighted_sum / total_time if total_time > 0 else 0.0
def report(self):
"""Print container statistics."""
print(f"\n{'=' * 60}")
print(f"CONTAINER MONITOR REPORT: {self.name}")
print(f"{'=' * 60}")
print(f"Capacity: {self.container.capacity}")
print(f"Current level: {self.container.level:.2f}")
print(f"Average level: {self.average_level():.2f}")
print(f"Utilization: {self.average_level() / self.container.capacity:.2%}")
if self.level_data:
levels = [level for _, level in self.level_data]
print(f"Max level: {max(levels):.2f}")
print(f"Min level: {min(levels):.2f}")
print(f"{'=' * 60}")
# Example usage
if __name__ == "__main__":
def example_process(env, name, resource, duration):
"""Example process using a resource."""
with resource.request() as req:
yield req
print(f"{name} started at {env.now}")
yield env.timeout(duration)
print(f"{name} finished at {env.now}")
# Create environment and resource
env = simpy.Environment()
resource = simpy.Resource(env, capacity=2)
# Create monitor
monitor = ResourceMonitor(env, resource, "Example Resource")
# Start processes
for i in range(5):
env.process(example_process(env, f"Process {i}", resource, 3 + i))
# Run simulation
env.run()
# Generate report
monitor.report()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/simpy/scripts/resource_monitor.py",
"license": "MIT License",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/stable-baselines3/scripts/custom_env_template.py | """
Template for creating custom Gymnasium environments compatible with Stable Baselines3.
This template demonstrates:
- Proper Gymnasium environment structure
- Observation and action space definition
- Step and reset implementation
- Validation with SB3's env_checker
- Registration with Gymnasium
"""
import gymnasium as gym
from gymnasium import spaces
import numpy as np
class CustomEnv(gym.Env):
"""
Custom Gymnasium Environment Template.
This is a template for creating custom environments that work with
Stable Baselines3. Modify the observation space, action space, reward
function, and state transitions to match your specific problem.
Example:
A simple grid world where the agent tries to reach a goal position.
"""
# Optional: Provide metadata for rendering modes
metadata = {"render_modes": ["human", "rgb_array"], "render_fps": 30}
def __init__(self, grid_size=5, render_mode=None):
"""
Initialize the environment.
Args:
grid_size: Size of the grid world (grid_size x grid_size)
render_mode: How to render ('human', 'rgb_array', or None)
"""
super().__init__()
self.grid_size = grid_size
self.render_mode = render_mode
# Define action space
# Example: 4 discrete actions (up, down, left, right)
self.action_space = spaces.Discrete(4)
# Define observation space
# Example: 2D position [x, y] in continuous space
# Note: Use np.float32 for observations (SB3 recommendation)
self.observation_space = spaces.Box(
low=0,
high=grid_size - 1,
shape=(2,),
dtype=np.float32,
)
# Alternative observation spaces:
# 1. Discrete: spaces.Discrete(n)
# 2. Multi-discrete: spaces.MultiDiscrete([n1, n2, ...])
# 3. Multi-binary: spaces.MultiBinary(n)
# 4. Box (continuous): spaces.Box(low=, high=, shape=, dtype=np.float32)
# 5. Dict: spaces.Dict({"key1": space1, "key2": space2})
# For image observations (e.g., 84x84 RGB image):
# self.observation_space = spaces.Box(
# low=0,
# high=255,
# shape=(3, 84, 84), # (channels, height, width) - channel-first
# dtype=np.uint8,
# )
# Initialize state
self._agent_position = None
self._goal_position = None
def reset(self, seed=None, options=None):
"""
Reset the environment to initial state.
Args:
seed: Random seed for reproducibility
options: Additional options (optional)
Returns:
observation: Initial observation
info: Additional information dictionary
"""
# Set seed for reproducibility
super().reset(seed=seed)
# Initialize agent position randomly
self._agent_position = self.np_random.integers(0, self.grid_size, size=2)
# Initialize goal position (different from agent)
self._goal_position = self.np_random.integers(0, self.grid_size, size=2)
while np.array_equal(self._agent_position, self._goal_position):
self._goal_position = self.np_random.integers(0, self.grid_size, size=2)
observation = self._get_obs()
info = self._get_info()
return observation, info
def step(self, action):
"""
Execute one step in the environment.
Args:
action: Action to take
Returns:
observation: New observation
reward: Reward for this step
terminated: Whether episode has ended (goal reached)
truncated: Whether episode was truncated (time limit, etc.)
info: Additional information dictionary
"""
# Map action to direction (0: up, 1: down, 2: left, 3: right)
direction = np.array([
[-1, 0], # up
[1, 0], # down
[0, -1], # left
[0, 1], # right
])[action]
# Update agent position (clip to stay within grid)
self._agent_position = np.clip(
self._agent_position + direction,
0,
self.grid_size - 1,
)
# Check if goal is reached
terminated = np.array_equal(self._agent_position, self._goal_position)
# Calculate reward
if terminated:
reward = 1.0 # Goal reached
else:
# Negative reward based on distance to goal (encourages efficiency)
distance = np.linalg.norm(self._agent_position - self._goal_position)
reward = -0.1 * distance
# Episode not truncated in this example (no time limit)
truncated = False
observation = self._get_obs()
info = self._get_info()
return observation, reward, terminated, truncated, info
def _get_obs(self):
"""
Get current observation.
Returns:
observation: Current state as defined by observation_space
"""
# Return agent position as observation
return self._agent_position.astype(np.float32)
# For dict observations:
# return {
# "agent": self._agent_position.astype(np.float32),
# "goal": self._goal_position.astype(np.float32),
# }
def _get_info(self):
"""
Get additional information (for debugging/logging).
Returns:
info: Dictionary with additional information
"""
return {
"agent_position": self._agent_position,
"goal_position": self._goal_position,
"distance_to_goal": np.linalg.norm(
self._agent_position - self._goal_position
),
}
def render(self):
"""
Render the environment.
Returns:
Rendered frame (if render_mode is 'rgb_array')
"""
if self.render_mode == "human":
# Print simple text-based rendering
grid = np.zeros((self.grid_size, self.grid_size), dtype=str)
grid[:, :] = "."
grid[tuple(self._agent_position)] = "A"
grid[tuple(self._goal_position)] = "G"
print("\n" + "=" * (self.grid_size * 2 + 1))
for row in grid:
print(" ".join(row))
print("=" * (self.grid_size * 2 + 1) + "\n")
elif self.render_mode == "rgb_array":
# Return RGB array for video recording
# This is a placeholder - implement proper rendering as needed
canvas = np.zeros((
self.grid_size * 50,
self.grid_size * 50,
3
), dtype=np.uint8)
# Draw agent and goal on canvas
# ... (implement visual rendering)
return canvas
def close(self):
"""
Clean up environment resources.
"""
pass
# Optional: Register the environment with Gymnasium
# This allows creating the environment with gym.make("CustomEnv-v0")
gym.register(
id="CustomEnv-v0",
entry_point=__name__ + ":CustomEnv",
max_episode_steps=100,
)
def validate_environment():
"""
Validate the custom environment with SB3's env_checker.
"""
from stable_baselines3.common.env_checker import check_env
print("Validating custom environment...")
env = CustomEnv()
check_env(env, warn=True)
print("Environment validation passed!")
def test_environment():
"""
Test the custom environment with random actions.
"""
print("Testing environment with random actions...")
env = CustomEnv(render_mode="human")
obs, info = env.reset()
print(f"Initial observation: {obs}")
print(f"Initial info: {info}")
for step in range(10):
action = env.action_space.sample() # Random action
obs, reward, terminated, truncated, info = env.step(action)
print(f"\nStep {step + 1}:")
print(f" Action: {action}")
print(f" Observation: {obs}")
print(f" Reward: {reward:.3f}")
print(f" Terminated: {terminated}")
print(f" Info: {info}")
env.render()
if terminated or truncated:
print("Episode finished!")
break
env.close()
def train_on_custom_env():
"""
Train a PPO agent on the custom environment.
"""
from stable_baselines3 import PPO
print("Training PPO agent on custom environment...")
# Create environment
env = CustomEnv()
# Validate first
from stable_baselines3.common.env_checker import check_env
check_env(env, warn=True)
# Train agent
model = PPO("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=10000)
# Test trained agent
obs, info = env.reset()
for _ in range(20):
action, _states = model.predict(obs, deterministic=True)
obs, reward, terminated, truncated, info = env.step(action)
if terminated or truncated:
print(f"Goal reached! Final reward: {reward}")
break
env.close()
if __name__ == "__main__":
# Validate the environment
validate_environment()
# Test with random actions
# test_environment()
# Train an agent
# train_on_custom_env()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/stable-baselines3/scripts/custom_env_template.py",
"license": "MIT License",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/stable-baselines3/scripts/evaluate_agent.py | """
Template script for evaluating trained RL agents with Stable Baselines3.
This template demonstrates:
- Loading trained models
- Evaluating performance with statistics
- Recording videos of agent behavior
- Visualizing agent performance
"""
import gymnasium as gym
import numpy as np
from stable_baselines3 import PPO
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.vec_env import DummyVecEnv, VecVideoRecorder, VecNormalize
import os
def evaluate_agent(
model_path,
env_id="CartPole-v1",
n_eval_episodes=10,
deterministic=True,
render=False,
record_video=False,
video_folder="./videos/",
vec_normalize_path=None,
):
"""
Evaluate a trained RL agent.
Args:
model_path: Path to the saved model
env_id: Gymnasium environment ID
n_eval_episodes: Number of episodes to evaluate
deterministic: Use deterministic actions
render: Render the environment during evaluation
record_video: Record videos of the agent
video_folder: Folder to save videos
vec_normalize_path: Path to VecNormalize statistics (if used during training)
Returns:
mean_reward: Mean episode reward
std_reward: Standard deviation of episode rewards
"""
# Load the trained model
print(f"Loading model from {model_path}...")
model = PPO.load(model_path)
# Create evaluation environment
if render:
env = gym.make(env_id, render_mode="human")
else:
env = gym.make(env_id)
# Wrap in DummyVecEnv for consistency
env = DummyVecEnv([lambda: env])
# Load VecNormalize statistics if they were used during training
if vec_normalize_path and os.path.exists(vec_normalize_path):
print(f"Loading VecNormalize statistics from {vec_normalize_path}...")
env = VecNormalize.load(vec_normalize_path, env)
env.training = False # Don't update statistics during evaluation
env.norm_reward = False # Don't normalize rewards during evaluation
# Set up video recording if requested
if record_video:
os.makedirs(video_folder, exist_ok=True)
env = VecVideoRecorder(
env,
video_folder,
record_video_trigger=lambda x: x == 0, # Record all episodes
video_length=1000, # Max video length
name_prefix=f"eval-{env_id}",
)
print(f"Recording videos to {video_folder}...")
# Evaluate the agent
print(f"Evaluating for {n_eval_episodes} episodes...")
mean_reward, std_reward = evaluate_policy(
model,
env,
n_eval_episodes=n_eval_episodes,
deterministic=deterministic,
render=False, # VecEnv doesn't support render parameter
return_episode_rewards=False,
)
print(f"Mean reward: {mean_reward:.2f} +/- {std_reward:.2f}")
# Cleanup
env.close()
return mean_reward, std_reward
def watch_agent(
model_path,
env_id="CartPole-v1",
n_episodes=5,
deterministic=True,
vec_normalize_path=None,
):
"""
Watch a trained agent play (with rendering).
Args:
model_path: Path to the saved model
env_id: Gymnasium environment ID
n_episodes: Number of episodes to watch
deterministic: Use deterministic actions
vec_normalize_path: Path to VecNormalize statistics (if used during training)
"""
# Load the trained model
print(f"Loading model from {model_path}...")
model = PPO.load(model_path)
# Create environment with rendering
env = gym.make(env_id, render_mode="human")
# Load VecNormalize statistics if needed
obs_normalization = None
if vec_normalize_path and os.path.exists(vec_normalize_path):
print(f"Loading VecNormalize statistics from {vec_normalize_path}...")
# For rendering, we'll manually apply normalization
dummy_env = DummyVecEnv([lambda: gym.make(env_id)])
vec_env = VecNormalize.load(vec_normalize_path, dummy_env)
obs_normalization = vec_env
dummy_env.close()
# Run episodes
for episode in range(n_episodes):
obs, info = env.reset()
episode_reward = 0
done = False
step = 0
print(f"\nEpisode {episode + 1}/{n_episodes}")
while not done:
# Apply observation normalization if needed
if obs_normalization:
obs_normalized = obs_normalization.normalize_obs(obs)
else:
obs_normalized = obs
# Get action from model
action, _states = model.predict(obs_normalized, deterministic=deterministic)
# Take step in environment
obs, reward, terminated, truncated, info = env.step(action)
done = terminated or truncated
episode_reward += reward
step += 1
print(f"Episode reward: {episode_reward:.2f} ({step} steps)")
env.close()
def compare_models(
model_paths,
env_id="CartPole-v1",
n_eval_episodes=10,
deterministic=True,
):
"""
Compare performance of multiple trained models.
Args:
model_paths: List of paths to saved models
env_id: Gymnasium environment ID
n_eval_episodes: Number of episodes to evaluate each model
deterministic: Use deterministic actions
"""
results = {}
for model_path in model_paths:
print(f"\nEvaluating {model_path}...")
mean_reward, std_reward = evaluate_agent(
model_path,
env_id=env_id,
n_eval_episodes=n_eval_episodes,
deterministic=deterministic,
)
results[model_path] = {"mean": mean_reward, "std": std_reward}
# Print comparison
print("\n" + "=" * 60)
print("Model Comparison Results")
print("=" * 60)
for model_path, stats in results.items():
print(f"{model_path}: {stats['mean']:.2f} +/- {stats['std']:.2f}")
print("=" * 60)
return results
if __name__ == "__main__":
# Example 1: Evaluate a trained model
model_path = "./models/best_model/best_model.zip"
evaluate_agent(
model_path=model_path,
env_id="CartPole-v1",
n_eval_episodes=10,
deterministic=True,
)
# Example 2: Record videos of agent behavior
# evaluate_agent(
# model_path=model_path,
# env_id="CartPole-v1",
# n_eval_episodes=5,
# deterministic=True,
# record_video=True,
# video_folder="./videos/",
# )
# Example 3: Watch agent play with rendering
# watch_agent(
# model_path=model_path,
# env_id="CartPole-v1",
# n_episodes=3,
# deterministic=True,
# )
# Example 4: Compare multiple models
# compare_models(
# model_paths=[
# "./models/model_100k.zip",
# "./models/model_200k.zip",
# "./models/best_model/best_model.zip",
# ],
# env_id="CartPole-v1",
# n_eval_episodes=10,
# )
# Example 5: Evaluate with VecNormalize statistics
# evaluate_agent(
# model_path="./models/best_model/best_model.zip",
# env_id="Pendulum-v1",
# n_eval_episodes=10,
# vec_normalize_path="./models/vec_normalize.pkl",
# )
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/stable-baselines3/scripts/evaluate_agent.py",
"license": "MIT License",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/stable-baselines3/scripts/train_rl_agent.py | """
Template script for training RL agents with Stable Baselines3.
This template demonstrates best practices for:
- Setting up training with proper monitoring
- Using callbacks for evaluation and checkpointing
- Vectorized environments for efficiency
- TensorBoard integration
- Model saving and loading
"""
import gymnasium as gym
from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.callbacks import (
EvalCallback,
CheckpointCallback,
CallbackList,
)
from stable_baselines3.common.vec_env import SubprocVecEnv, VecNormalize
import os
def train_agent(
env_id="CartPole-v1",
algorithm=PPO,
policy="MlpPolicy",
n_envs=4,
total_timesteps=100000,
eval_freq=10000,
save_freq=10000,
log_dir="./logs/",
save_path="./models/",
):
"""
Train an RL agent with best practices.
Args:
env_id: Gymnasium environment ID
algorithm: SB3 algorithm class (PPO, SAC, DQN, etc.)
policy: Policy type ("MlpPolicy", "CnnPolicy", "MultiInputPolicy")
n_envs: Number of parallel environments
total_timesteps: Total training timesteps
eval_freq: Frequency of evaluation (in timesteps)
save_freq: Frequency of model checkpoints (in timesteps)
log_dir: Directory for logs and TensorBoard
save_path: Directory for model checkpoints
"""
# Create directories
os.makedirs(log_dir, exist_ok=True)
os.makedirs(save_path, exist_ok=True)
eval_log_dir = os.path.join(log_dir, "eval")
os.makedirs(eval_log_dir, exist_ok=True)
# Create training environment (vectorized for efficiency)
print(f"Creating {n_envs} parallel training environments...")
env = make_vec_env(
env_id,
n_envs=n_envs,
vec_env_cls=SubprocVecEnv, # Use SubprocVecEnv for parallel execution
# vec_env_cls=DummyVecEnv, # Use DummyVecEnv for lightweight environments
)
# Optional: Add normalization wrapper for better performance
# Uncomment for continuous control tasks
# env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.0)
# Create separate evaluation environment
print("Creating evaluation environment...")
eval_env = make_vec_env(env_id, n_envs=1)
# If using VecNormalize, wrap eval env but set training=False
# eval_env = VecNormalize(eval_env, training=False, norm_reward=False)
# Set up callbacks
eval_callback = EvalCallback(
eval_env,
best_model_save_path=os.path.join(save_path, "best_model"),
log_path=eval_log_dir,
eval_freq=eval_freq // n_envs, # Adjust for number of environments
n_eval_episodes=10,
deterministic=True,
render=False,
)
checkpoint_callback = CheckpointCallback(
save_freq=save_freq // n_envs, # Adjust for number of environments
save_path=save_path,
name_prefix="rl_model",
save_replay_buffer=False, # Set True for off-policy algorithms if needed
)
callback = CallbackList([eval_callback, checkpoint_callback])
# Initialize the agent
print(f"Initializing {algorithm.__name__} agent...")
model = algorithm(
policy,
env,
verbose=1,
tensorboard_log=log_dir,
# Algorithm-specific hyperparameters can be added here
# learning_rate=3e-4,
# n_steps=2048, # For PPO/A2C
# batch_size=64,
# gamma=0.99,
)
# Train the agent
print(f"Training for {total_timesteps} timesteps...")
model.learn(
total_timesteps=total_timesteps,
callback=callback,
tb_log_name=f"{algorithm.__name__}_{env_id}",
)
# Save final model
final_model_path = os.path.join(save_path, "final_model")
print(f"Saving final model to {final_model_path}...")
model.save(final_model_path)
# Save VecNormalize statistics if used
# env.save(os.path.join(save_path, "vec_normalize.pkl"))
print("Training complete!")
print(f"Best model saved at: {os.path.join(save_path, 'best_model')}")
print(f"Final model saved at: {final_model_path}")
print(f"TensorBoard logs: {log_dir}")
print(f"Run 'tensorboard --logdir {log_dir}' to view training progress")
# Cleanup
env.close()
eval_env.close()
return model
if __name__ == "__main__":
# Example: Train PPO on CartPole
train_agent(
env_id="CartPole-v1",
algorithm=PPO,
policy="MlpPolicy",
n_envs=4,
total_timesteps=100000,
)
# Example: Train SAC on continuous control task
# from stable_baselines3 import SAC
# train_agent(
# env_id="Pendulum-v1",
# algorithm=SAC,
# policy="MlpPolicy",
# n_envs=4,
# total_timesteps=50000,
# )
# Example: Train DQN on discrete task
# from stable_baselines3 import DQN
# train_agent(
# env_id="LunarLander-v2",
# algorithm=DQN,
# policy="MlpPolicy",
# n_envs=1, # DQN typically uses single env
# total_timesteps=100000,
# )
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/stable-baselines3/scripts/train_rl_agent.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/statistical-analysis/scripts/assumption_checks.py | """
Comprehensive statistical assumption checking utilities.
This module provides functions to check common statistical assumptions:
- Normality
- Homogeneity of variance
- Independence
- Linearity
- Outliers
"""
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
from typing import Dict, List, Tuple, Optional, Union
def check_normality(
data: Union[np.ndarray, pd.Series, List],
name: str = "data",
alpha: float = 0.05,
plot: bool = True
) -> Dict:
"""
Check normality assumption using Shapiro-Wilk test and visualizations.
Parameters
----------
data : array-like
Data to check for normality
name : str
Name of the variable (for labeling)
alpha : float
Significance level for Shapiro-Wilk test
plot : bool
Whether to create Q-Q plot and histogram
Returns
-------
dict
Results including test statistic, p-value, and interpretation
"""
data = np.asarray(data)
data_clean = data[~np.isnan(data)]
# Shapiro-Wilk test
statistic, p_value = stats.shapiro(data_clean)
# Interpretation
is_normal = p_value > alpha
interpretation = (
f"Data {'appear' if is_normal else 'do not appear'} normally distributed "
f"(W = {statistic:.3f}, p = {p_value:.3f})"
)
# Visual checks
if plot:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
# Q-Q plot
stats.probplot(data_clean, dist="norm", plot=ax1)
ax1.set_title(f"Q-Q Plot: {name}")
ax1.grid(alpha=0.3)
# Histogram with normal curve
ax2.hist(data_clean, bins='auto', density=True, alpha=0.7, color='steelblue', edgecolor='black')
mu, sigma = data_clean.mean(), data_clean.std()
x = np.linspace(data_clean.min(), data_clean.max(), 100)
ax2.plot(x, stats.norm.pdf(x, mu, sigma), 'r-', linewidth=2, label='Normal curve')
ax2.set_xlabel('Value')
ax2.set_ylabel('Density')
ax2.set_title(f'Histogram: {name}')
ax2.legend()
ax2.grid(alpha=0.3)
plt.tight_layout()
plt.show()
return {
'test': 'Shapiro-Wilk',
'statistic': statistic,
'p_value': p_value,
'is_normal': is_normal,
'interpretation': interpretation,
'n': len(data_clean),
'recommendation': (
"Proceed with parametric test" if is_normal
else "Consider non-parametric alternative or transformation"
)
}
def check_normality_per_group(
data: pd.DataFrame,
value_col: str,
group_col: str,
alpha: float = 0.05,
plot: bool = True
) -> pd.DataFrame:
"""
Check normality assumption for each group separately.
Parameters
----------
data : pd.DataFrame
Data containing values and group labels
value_col : str
Column name for values to check
group_col : str
Column name for group labels
alpha : float
Significance level
plot : bool
Whether to create Q-Q plots for each group
Returns
-------
pd.DataFrame
Results for each group
"""
groups = data[group_col].unique()
results = []
if plot:
n_groups = len(groups)
fig, axes = plt.subplots(1, n_groups, figsize=(5 * n_groups, 4))
if n_groups == 1:
axes = [axes]
for idx, group in enumerate(groups):
group_data = data[data[group_col] == group][value_col].dropna()
stat, p = stats.shapiro(group_data)
results.append({
'Group': group,
'N': len(group_data),
'W': stat,
'p-value': p,
'Normal': 'Yes' if p > alpha else 'No'
})
if plot:
stats.probplot(group_data, dist="norm", plot=axes[idx])
axes[idx].set_title(f"Q-Q Plot: {group}")
axes[idx].grid(alpha=0.3)
if plot:
plt.tight_layout()
plt.show()
return pd.DataFrame(results)
def check_homogeneity_of_variance(
data: pd.DataFrame,
value_col: str,
group_col: str,
alpha: float = 0.05,
plot: bool = True
) -> Dict:
"""
Check homogeneity of variance using Levene's test.
Parameters
----------
data : pd.DataFrame
Data containing values and group labels
value_col : str
Column name for values
group_col : str
Column name for group labels
alpha : float
Significance level
plot : bool
Whether to create box plots
Returns
-------
dict
Results including test statistic, p-value, and interpretation
"""
groups = [group[value_col].values for name, group in data.groupby(group_col)]
# Levene's test (robust to non-normality)
statistic, p_value = stats.levene(*groups)
# Variance ratio (max/min)
variances = [np.var(g, ddof=1) for g in groups]
var_ratio = max(variances) / min(variances)
is_homogeneous = p_value > alpha
interpretation = (
f"Variances {'appear' if is_homogeneous else 'do not appear'} homogeneous "
f"(F = {statistic:.3f}, p = {p_value:.3f}, variance ratio = {var_ratio:.2f})"
)
if plot:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
# Box plot
data.boxplot(column=value_col, by=group_col, ax=ax1)
ax1.set_title('Box Plots by Group')
ax1.set_xlabel(group_col)
ax1.set_ylabel(value_col)
plt.sca(ax1)
plt.xticks(rotation=45)
# Variance plot
group_names = data[group_col].unique()
ax2.bar(range(len(variances)), variances, color='steelblue', edgecolor='black')
ax2.set_xticks(range(len(variances)))
ax2.set_xticklabels(group_names, rotation=45)
ax2.set_ylabel('Variance')
ax2.set_title('Variance by Group')
ax2.grid(alpha=0.3, axis='y')
plt.tight_layout()
plt.show()
return {
'test': 'Levene',
'statistic': statistic,
'p_value': p_value,
'is_homogeneous': is_homogeneous,
'variance_ratio': var_ratio,
'interpretation': interpretation,
'recommendation': (
"Proceed with standard test" if is_homogeneous
else "Consider Welch's correction or transformation"
)
}
def check_linearity(
x: Union[np.ndarray, pd.Series],
y: Union[np.ndarray, pd.Series],
x_name: str = "X",
y_name: str = "Y"
) -> Dict:
"""
Check linearity assumption for regression.
Parameters
----------
x : array-like
Predictor variable
y : array-like
Outcome variable
x_name : str
Name of predictor
y_name : str
Name of outcome
Returns
-------
dict
Visualization and recommendations
"""
x = np.asarray(x)
y = np.asarray(y)
# Fit linear regression
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
y_pred = intercept + slope * x
# Calculate residuals
residuals = y - y_pred
# Visualization
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
# Scatter plot with regression line
ax1.scatter(x, y, alpha=0.6, s=50, edgecolors='black', linewidths=0.5)
ax1.plot(x, y_pred, 'r-', linewidth=2, label=f'y = {intercept:.2f} + {slope:.2f}x')
ax1.set_xlabel(x_name)
ax1.set_ylabel(y_name)
ax1.set_title('Scatter Plot with Regression Line')
ax1.legend()
ax1.grid(alpha=0.3)
# Residuals vs fitted
ax2.scatter(y_pred, residuals, alpha=0.6, s=50, edgecolors='black', linewidths=0.5)
ax2.axhline(y=0, color='r', linestyle='--', linewidth=2)
ax2.set_xlabel('Fitted values')
ax2.set_ylabel('Residuals')
ax2.set_title('Residuals vs Fitted Values')
ax2.grid(alpha=0.3)
plt.tight_layout()
plt.show()
return {
'r': r_value,
'r_squared': r_value ** 2,
'interpretation': (
"Examine residual plot. Points should be randomly scattered around zero. "
"Patterns (curves, funnels) suggest non-linearity or heteroscedasticity."
),
'recommendation': (
"If non-linear pattern detected: Consider polynomial terms, "
"transformations, or non-linear models"
)
}
def detect_outliers(
data: Union[np.ndarray, pd.Series, List],
name: str = "data",
method: str = "iqr",
threshold: float = 1.5,
plot: bool = True
) -> Dict:
"""
Detect outliers using IQR method or z-score method.
Parameters
----------
data : array-like
Data to check for outliers
name : str
Name of variable
method : str
Method to use: 'iqr' or 'zscore'
threshold : float
Threshold for outlier detection
For IQR: typically 1.5 (mild) or 3 (extreme)
For z-score: typically 3
plot : bool
Whether to create visualizations
Returns
-------
dict
Outlier indices, values, and visualizations
"""
data = np.asarray(data)
data_clean = data[~np.isnan(data)]
if method == "iqr":
q1 = np.percentile(data_clean, 25)
q3 = np.percentile(data_clean, 75)
iqr = q3 - q1
lower_bound = q1 - threshold * iqr
upper_bound = q3 + threshold * iqr
outlier_mask = (data_clean < lower_bound) | (data_clean > upper_bound)
elif method == "zscore":
z_scores = np.abs(stats.zscore(data_clean))
outlier_mask = z_scores > threshold
lower_bound = data_clean.mean() - threshold * data_clean.std()
upper_bound = data_clean.mean() + threshold * data_clean.std()
else:
raise ValueError("method must be 'iqr' or 'zscore'")
outlier_indices = np.where(outlier_mask)[0]
outlier_values = data_clean[outlier_mask]
n_outliers = len(outlier_indices)
pct_outliers = (n_outliers / len(data_clean)) * 100
if plot:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
# Box plot
bp = ax1.boxplot(data_clean, vert=True, patch_artist=True)
bp['boxes'][0].set_facecolor('steelblue')
ax1.set_ylabel('Value')
ax1.set_title(f'Box Plot: {name}')
ax1.grid(alpha=0.3, axis='y')
# Scatter plot highlighting outliers
x_coords = np.arange(len(data_clean))
ax2.scatter(x_coords[~outlier_mask], data_clean[~outlier_mask],
alpha=0.6, s=50, color='steelblue', label='Normal', edgecolors='black', linewidths=0.5)
if n_outliers > 0:
ax2.scatter(x_coords[outlier_mask], data_clean[outlier_mask],
alpha=0.8, s=100, color='red', label='Outliers', marker='D', edgecolors='black', linewidths=0.5)
ax2.axhline(y=lower_bound, color='orange', linestyle='--', linewidth=1.5, label='Bounds')
ax2.axhline(y=upper_bound, color='orange', linestyle='--', linewidth=1.5)
ax2.set_xlabel('Index')
ax2.set_ylabel('Value')
ax2.set_title(f'Outlier Detection: {name}')
ax2.legend()
ax2.grid(alpha=0.3)
plt.tight_layout()
plt.show()
return {
'method': method,
'threshold': threshold,
'n_outliers': n_outliers,
'pct_outliers': pct_outliers,
'outlier_indices': outlier_indices,
'outlier_values': outlier_values,
'lower_bound': lower_bound,
'upper_bound': upper_bound,
'interpretation': f"Found {n_outliers} outliers ({pct_outliers:.1f}% of data)",
'recommendation': (
"Investigate outliers for data entry errors. "
"Consider: (1) removing if errors, (2) winsorizing, "
"(3) keeping if legitimate, (4) using robust methods"
)
}
def comprehensive_assumption_check(
data: pd.DataFrame,
value_col: str,
group_col: Optional[str] = None,
alpha: float = 0.05
) -> Dict:
"""
Perform comprehensive assumption checking for common statistical tests.
Parameters
----------
data : pd.DataFrame
Data to check
value_col : str
Column name for dependent variable
group_col : str, optional
Column name for grouping variable (if applicable)
alpha : float
Significance level
Returns
-------
dict
Summary of all assumption checks
"""
print("=" * 70)
print("COMPREHENSIVE ASSUMPTION CHECK")
print("=" * 70)
results = {}
# Outlier detection
print("\n1. OUTLIER DETECTION")
print("-" * 70)
outlier_results = detect_outliers(
data[value_col].dropna(),
name=value_col,
method='iqr',
plot=True
)
results['outliers'] = outlier_results
print(f" {outlier_results['interpretation']}")
print(f" {outlier_results['recommendation']}")
# Check if grouped data
if group_col is not None:
# Normality per group
print(f"\n2. NORMALITY CHECK (by {group_col})")
print("-" * 70)
normality_results = check_normality_per_group(
data, value_col, group_col, alpha=alpha, plot=True
)
results['normality_per_group'] = normality_results
print(normality_results.to_string(index=False))
all_normal = normality_results['Normal'].eq('Yes').all()
print(f"\n All groups normal: {'Yes' if all_normal else 'No'}")
if not all_normal:
print(" β Consider non-parametric alternative (Mann-Whitney, Kruskal-Wallis)")
# Homogeneity of variance
print(f"\n3. HOMOGENEITY OF VARIANCE")
print("-" * 70)
homogeneity_results = check_homogeneity_of_variance(
data, value_col, group_col, alpha=alpha, plot=True
)
results['homogeneity'] = homogeneity_results
print(f" {homogeneity_results['interpretation']}")
print(f" {homogeneity_results['recommendation']}")
else:
# Overall normality
print(f"\n2. NORMALITY CHECK")
print("-" * 70)
normality_results = check_normality(
data[value_col].dropna(),
name=value_col,
alpha=alpha,
plot=True
)
results['normality'] = normality_results
print(f" {normality_results['interpretation']}")
print(f" {normality_results['recommendation']}")
# Summary
print("\n" + "=" * 70)
print("SUMMARY")
print("=" * 70)
if group_col is not None:
all_normal = results.get('normality_per_group', pd.DataFrame()).get('Normal', pd.Series()).eq('Yes').all()
is_homogeneous = results.get('homogeneity', {}).get('is_homogeneous', False)
if all_normal and is_homogeneous:
print("β All assumptions met. Proceed with parametric test (t-test, ANOVA).")
elif not all_normal:
print("β Normality violated. Use non-parametric alternative.")
elif not is_homogeneous:
print("β Homogeneity violated. Use Welch's correction or transformation.")
else:
is_normal = results.get('normality', {}).get('is_normal', False)
if is_normal:
print("β Normality assumption met.")
else:
print("β Normality violated. Consider transformation or non-parametric method.")
print("=" * 70)
return results
if __name__ == "__main__":
# Example usage
np.random.seed(42)
# Simulate data
group_a = np.random.normal(75, 8, 50)
group_b = np.random.normal(68, 10, 50)
df = pd.DataFrame({
'score': np.concatenate([group_a, group_b]),
'group': ['A'] * 50 + ['B'] * 50
})
# Run comprehensive check
results = comprehensive_assumption_check(
df,
value_col='score',
group_col='group',
alpha=0.05
)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/statistical-analysis/scripts/assumption_checks.py",
"license": "MIT License",
"lines": 460,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/string-database/scripts/string_api.py | """
STRING Database REST API Helper Functions
This module provides Python functions for interacting with the STRING database API.
All functions return raw response text or JSON which can be parsed as needed.
API Base URL: https://string-db.org/api
Documentation: https://string-db.org/help/api/
STRING provides protein-protein interaction data from over 40 sources covering
5000+ genomes with ~59.3 million proteins and 20+ billion interactions.
"""
import urllib.request
import urllib.parse
import urllib.error
import json
from typing import Optional, List, Union, Dict
STRING_BASE_URL = "https://string-db.org/api"
def string_map_ids(identifiers: Union[str, List[str]],
species: int = 9606,
limit: int = 1,
echo_query: int = 1,
caller_identity: str = "claude_scientific_skills") -> str:
"""
Map protein names, synonyms, and identifiers to STRING IDs.
Args:
identifiers: Single protein identifier or list of identifiers
species: NCBI taxon ID (default: 9606 for human)
limit: Number of matches to return per identifier (default: 1)
echo_query: Include query term in output (1) or not (0)
caller_identity: Application identifier for tracking
Returns:
str: TSV format with mapping results
Examples:
# Map single protein
result = string_map_ids('TP53', species=9606)
# Map multiple proteins
result = string_map_ids(['TP53', 'BRCA1', 'EGFR'], species=9606)
"""
if isinstance(identifiers, list):
identifiers_str = '\n'.join(identifiers)
else:
identifiers_str = identifiers
params = {
'identifiers': identifiers_str,
'species': species,
'limit': limit,
'echo_query': echo_query,
'caller_identity': caller_identity
}
url = f"{STRING_BASE_URL}/tsv/get_string_ids"
data = urllib.parse.urlencode(params).encode('utf-8')
try:
with urllib.request.urlopen(url, data=data) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def string_network(identifiers: Union[str, List[str]],
species: int = 9606,
required_score: int = 400,
network_type: str = "functional",
add_nodes: int = 0,
caller_identity: str = "claude_scientific_skills") -> str:
"""
Get protein-protein interaction network data.
Args:
identifiers: Protein identifier(s) - use STRING IDs for best results
species: NCBI taxon ID (default: 9606 for human)
required_score: Confidence threshold 0-1000 (default: 400 = medium confidence)
network_type: 'functional' or 'physical' (default: functional)
add_nodes: Number of additional nodes to add to network (0-10)
caller_identity: Application identifier for tracking
Returns:
str: TSV format with interaction data
Examples:
# Get network for single protein
network = string_network('9606.ENSP00000269305')
# Get network with multiple proteins
network = string_network(['9606.ENSP00000269305', '9606.ENSP00000275493'])
# Get network with additional interacting proteins
network = string_network('TP53', add_nodes=5, required_score=700)
"""
if isinstance(identifiers, list):
identifiers_str = '%0d'.join(identifiers)
else:
identifiers_str = identifiers
params = {
'identifiers': identifiers_str,
'species': species,
'required_score': required_score,
'network_type': network_type,
'add_nodes': add_nodes,
'caller_identity': caller_identity
}
url = f"{STRING_BASE_URL}/tsv/network?" + urllib.parse.urlencode(params)
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def string_network_image(identifiers: Union[str, List[str]],
species: int = 9606,
required_score: int = 400,
network_flavor: str = "evidence",
add_nodes: int = 0,
caller_identity: str = "claude_scientific_skills") -> bytes:
"""
Get network visualization as PNG image.
Args:
identifiers: Protein identifier(s)
species: NCBI taxon ID (default: 9606 for human)
required_score: Confidence threshold 0-1000 (default: 400)
network_flavor: 'evidence', 'confidence', or 'actions' (default: evidence)
add_nodes: Number of additional nodes to add (0-10)
caller_identity: Application identifier for tracking
Returns:
bytes: PNG image data
Example:
# Get network image
img_data = string_network_image(['TP53', 'MDM2', 'ATM'])
with open('network.png', 'wb') as f:
f.write(img_data)
"""
if isinstance(identifiers, list):
identifiers_str = '%0d'.join(identifiers)
else:
identifiers_str = identifiers
params = {
'identifiers': identifiers_str,
'species': species,
'required_score': required_score,
'network_flavor': network_flavor,
'add_nodes': add_nodes,
'caller_identity': caller_identity
}
url = f"{STRING_BASE_URL}/image/network?" + urllib.parse.urlencode(params)
try:
with urllib.request.urlopen(url) as response:
return response.read()
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}".encode()
def string_interaction_partners(identifiers: Union[str, List[str]],
species: int = 9606,
required_score: int = 400,
limit: int = 10,
caller_identity: str = "claude_scientific_skills") -> str:
"""
Get all interaction partners for protein(s).
Args:
identifiers: Protein identifier(s)
species: NCBI taxon ID (default: 9606 for human)
required_score: Confidence threshold 0-1000 (default: 400)
limit: Maximum number of partners to return (default: 10)
caller_identity: Application identifier for tracking
Returns:
str: TSV format with interaction partners
Example:
# Get top 20 interactors of TP53
partners = string_interaction_partners('TP53', limit=20, required_score=700)
"""
if isinstance(identifiers, list):
identifiers_str = '%0d'.join(identifiers)
else:
identifiers_str = identifiers
params = {
'identifiers': identifiers_str,
'species': species,
'required_score': required_score,
'limit': limit,
'caller_identity': caller_identity
}
url = f"{STRING_BASE_URL}/tsv/interaction_partners?" + urllib.parse.urlencode(params)
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def string_enrichment(identifiers: Union[str, List[str]],
species: int = 9606,
caller_identity: str = "claude_scientific_skills") -> str:
"""
Perform functional enrichment analysis (Gene Ontology, KEGG, Pfam, etc.).
Args:
identifiers: List of protein identifiers
species: NCBI taxon ID (default: 9606 for human)
caller_identity: Application identifier for tracking
Returns:
str: TSV format with enrichment results
Example:
# Enrichment for a list of proteins
proteins = ['TP53', 'MDM2', 'ATM', 'CHEK2', 'BRCA1']
enrichment = string_enrichment(proteins, species=9606)
"""
if isinstance(identifiers, list):
identifiers_str = '%0d'.join(identifiers)
else:
identifiers_str = identifiers
params = {
'identifiers': identifiers_str,
'species': species,
'caller_identity': caller_identity
}
url = f"{STRING_BASE_URL}/tsv/enrichment?" + urllib.parse.urlencode(params)
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def string_ppi_enrichment(identifiers: Union[str, List[str]],
species: int = 9606,
required_score: int = 400,
caller_identity: str = "claude_scientific_skills") -> str:
"""
Test if network has more interactions than expected by chance.
Args:
identifiers: List of protein identifiers
species: NCBI taxon ID (default: 9606 for human)
required_score: Confidence threshold 0-1000 (default: 400)
caller_identity: Application identifier for tracking
Returns:
str: JSON with PPI enrichment p-value
Example:
# Test if proteins are more connected than random
proteins = ['TP53', 'MDM2', 'ATM', 'CHEK2']
ppi_result = string_ppi_enrichment(proteins)
"""
if isinstance(identifiers, list):
identifiers_str = '%0d'.join(identifiers)
else:
identifiers_str = identifiers
params = {
'identifiers': identifiers_str,
'species': species,
'required_score': required_score,
'caller_identity': caller_identity
}
url = f"{STRING_BASE_URL}/json/ppi_enrichment?" + urllib.parse.urlencode(params)
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def string_homology(identifiers: Union[str, List[str]],
species: int = 9606,
caller_identity: str = "claude_scientific_skills") -> str:
"""
Get homology/similarity scores between proteins.
Args:
identifiers: Protein identifier(s)
species: NCBI taxon ID (default: 9606 for human)
caller_identity: Application identifier for tracking
Returns:
str: TSV format with homology scores
Example:
# Get homology data
homology = string_homology(['TP53', 'TP63', 'TP73'])
"""
if isinstance(identifiers, list):
identifiers_str = '%0d'.join(identifiers)
else:
identifiers_str = identifiers
params = {
'identifiers': identifiers_str,
'species': species,
'caller_identity': caller_identity
}
url = f"{STRING_BASE_URL}/tsv/homology?" + urllib.parse.urlencode(params)
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
def string_version() -> str:
"""
Get current STRING database version.
Returns:
str: Version information
Example:
version = string_version()
"""
url = f"{STRING_BASE_URL}/tsv/version"
try:
with urllib.request.urlopen(url) as response:
return response.read().decode('utf-8')
except urllib.error.HTTPError as e:
return f"Error: {e.code} - {e.reason}"
if __name__ == "__main__":
# Example usage
print("STRING Version:")
print(string_version())
print()
print("Mapping protein names to STRING IDs:")
mapping = string_map_ids(['TP53', 'BRCA1'], species=9606)
print(mapping)
print()
print("Getting interaction network:")
network = string_network('TP53', species=9606, add_nodes=3)
print(network[:500] + "...")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/string-database/scripts/string_api.py",
"license": "MIT License",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/torch_geometric/scripts/benchmark_model.py | #!/usr/bin/env python3
"""
Benchmark GNN models on standard datasets.
This script provides a simple way to benchmark different GNN architectures
on common datasets and compare their performance.
Usage:
python benchmark_model.py --models gcn gat --dataset Cora
python benchmark_model.py --models gcn --dataset Cora --epochs 200 --runs 10
"""
import argparse
import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv, SAGEConv, GINConv
from torch_geometric.datasets import Planetoid, TUDataset
from torch_geometric.loader import DataLoader
from torch_geometric.nn import global_mean_pool
import time
import numpy as np
class GCN(torch.nn.Module):
def __init__(self, num_features, hidden_channels, num_classes, dropout=0.5):
super().__init__()
self.conv1 = GCNConv(num_features, hidden_channels)
self.conv2 = GCNConv(hidden_channels, num_classes)
self.dropout = dropout
def forward(self, x, edge_index, batch=None):
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, edge_index)
if batch is not None:
x = global_mean_pool(x, batch)
return F.log_softmax(x, dim=1)
class GAT(torch.nn.Module):
def __init__(self, num_features, hidden_channels, num_classes, heads=8, dropout=0.6):
super().__init__()
self.conv1 = GATConv(num_features, hidden_channels, heads=heads, dropout=dropout)
self.conv2 = GATConv(hidden_channels * heads, num_classes, heads=1,
concat=False, dropout=dropout)
self.dropout = dropout
def forward(self, x, edge_index, batch=None):
x = F.dropout(x, p=self.dropout, training=self.training)
x = F.elu(self.conv1(x, edge_index))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, edge_index)
if batch is not None:
x = global_mean_pool(x, batch)
return F.log_softmax(x, dim=1)
class GraphSAGE(torch.nn.Module):
def __init__(self, num_features, hidden_channels, num_classes, dropout=0.5):
super().__init__()
self.conv1 = SAGEConv(num_features, hidden_channels)
self.conv2 = SAGEConv(hidden_channels, num_classes)
self.dropout = dropout
def forward(self, x, edge_index, batch=None):
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, edge_index)
if batch is not None:
x = global_mean_pool(x, batch)
return F.log_softmax(x, dim=1)
MODELS = {
'gcn': GCN,
'gat': GAT,
'graphsage': GraphSAGE,
}
def train_node_classification(model, data, optimizer):
"""Train for node classification."""
model.train()
optimizer.zero_grad()
out = model(data.x, data.edge_index)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test_node_classification(model, data):
"""Test for node classification."""
model.eval()
out = model(data.x, data.edge_index)
pred = out.argmax(dim=1)
accs = []
for mask in [data.train_mask, data.val_mask, data.test_mask]:
correct = (pred[mask] == data.y[mask]).sum()
accs.append(float(correct) / int(mask.sum()))
return accs
def train_graph_classification(model, loader, optimizer, device):
"""Train for graph classification."""
model.train()
total_loss = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index, data.batch)
loss = F.nll_loss(out, data.y)
loss.backward()
optimizer.step()
total_loss += loss.item() * data.num_graphs
return total_loss / len(loader.dataset)
@torch.no_grad()
def test_graph_classification(model, loader, device):
"""Test for graph classification."""
model.eval()
correct = 0
for data in loader:
data = data.to(device)
out = model(data.x, data.edge_index, data.batch)
pred = out.argmax(dim=1)
correct += (pred == data.y).sum().item()
return correct / len(loader.dataset)
def benchmark_node_classification(model_name, dataset_name, epochs, lr, weight_decay, device):
"""Benchmark a model on node classification."""
# Load dataset
dataset = Planetoid(root=f'/tmp/{dataset_name}', name=dataset_name)
data = dataset[0].to(device)
# Create model
model_class = MODELS[model_name]
model = model_class(
num_features=dataset.num_features,
hidden_channels=64,
num_classes=dataset.num_classes
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
# Training
start_time = time.time()
best_val_acc = 0
best_test_acc = 0
for epoch in range(1, epochs + 1):
loss = train_node_classification(model, data, optimizer)
train_acc, val_acc, test_acc = test_node_classification(model, data)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
train_time = time.time() - start_time
return {
'train_acc': train_acc,
'val_acc': best_val_acc,
'test_acc': best_test_acc,
'train_time': train_time,
}
def benchmark_graph_classification(model_name, dataset_name, epochs, lr, device):
"""Benchmark a model on graph classification."""
# Load dataset
dataset = TUDataset(root=f'/tmp/{dataset_name}', name=dataset_name)
# Split dataset
dataset = dataset.shuffle()
train_dataset = dataset[:int(len(dataset) * 0.8)]
test_dataset = dataset[int(len(dataset) * 0.8):]
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32)
# Create model
model_class = MODELS[model_name]
model = model_class(
num_features=dataset.num_features,
hidden_channels=64,
num_classes=dataset.num_classes
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# Training
start_time = time.time()
for epoch in range(1, epochs + 1):
loss = train_graph_classification(model, train_loader, optimizer, device)
# Final evaluation
train_acc = test_graph_classification(model, train_loader, device)
test_acc = test_graph_classification(model, test_loader, device)
train_time = time.time() - start_time
return {
'train_acc': train_acc,
'test_acc': test_acc,
'train_time': train_time,
}
def run_benchmark(args):
"""Run benchmark experiments."""
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using device: {device}")
# Determine task type
if args.dataset in ['Cora', 'CiteSeer', 'PubMed']:
task = 'node_classification'
else:
task = 'graph_classification'
print(f"\\nDataset: {args.dataset}")
print(f"Task: {task}")
print(f"Models: {', '.join(args.models)}")
print(f"Epochs: {args.epochs}")
print(f"Runs: {args.runs}")
print("=" * 60)
results = {model: [] for model in args.models}
# Run experiments
for run in range(args.runs):
print(f"\\nRun {run + 1}/{args.runs}")
print("-" * 60)
for model_name in args.models:
if model_name not in MODELS:
print(f"Unknown model: {model_name}")
continue
print(f" Training {model_name.upper()}...", end=" ")
try:
if task == 'node_classification':
result = benchmark_node_classification(
model_name, args.dataset, args.epochs,
args.lr, args.weight_decay, device
)
print(f"Test Acc: {result['test_acc']:.4f}, "
f"Time: {result['train_time']:.2f}s")
else:
result = benchmark_graph_classification(
model_name, args.dataset, args.epochs, args.lr, device
)
print(f"Test Acc: {result['test_acc']:.4f}, "
f"Time: {result['train_time']:.2f}s")
results[model_name].append(result)
except Exception as e:
print(f"Error: {e}")
# Print summary
print("\\n" + "=" * 60)
print("BENCHMARK RESULTS")
print("=" * 60)
for model_name in args.models:
if not results[model_name]:
continue
test_accs = [r['test_acc'] for r in results[model_name]]
times = [r['train_time'] for r in results[model_name]]
print(f"\\n{model_name.upper()}")
print(f" Test Accuracy: {np.mean(test_accs):.4f} Β± {np.std(test_accs):.4f}")
print(f" Training Time: {np.mean(times):.2f} Β± {np.std(times):.2f}s")
def main():
parser = argparse.ArgumentParser(description="Benchmark GNN models")
parser.add_argument('--models', nargs='+', default=['gcn'],
help='Model types to benchmark (gcn, gat, graphsage)')
parser.add_argument('--dataset', type=str, default='Cora',
help='Dataset name (Cora, CiteSeer, PubMed, ENZYMES, PROTEINS)')
parser.add_argument('--epochs', type=int, default=200,
help='Number of training epochs')
parser.add_argument('--runs', type=int, default=5,
help='Number of runs to average over')
parser.add_argument('--lr', type=float, default=0.01,
help='Learning rate')
parser.add_argument('--weight-decay', type=float, default=5e-4,
help='Weight decay for node classification')
args = parser.parse_args()
run_benchmark(args)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/torch_geometric/scripts/benchmark_model.py",
"license": "MIT License",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/torch_geometric/scripts/create_gnn_template.py | #!/usr/bin/env python3
"""
Generate boilerplate code for common GNN architectures in PyTorch Geometric.
This script creates ready-to-use GNN model templates with training loops,
evaluation metrics, and proper data handling.
Usage:
python create_gnn_template.py --model gcn --task node_classification --output my_model.py
python create_gnn_template.py --model gat --task graph_classification --output graph_classifier.py
"""
import argparse
from pathlib import Path
TEMPLATES = {
'node_classification': {
'gcn': '''import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch_geometric.datasets import Planetoid
class GCN(torch.nn.Module):
"""Graph Convolutional Network for node classification."""
def __init__(self, num_features, hidden_channels, num_classes, num_layers=2, dropout=0.5):
super().__init__()
self.convs = torch.nn.ModuleList()
# First layer
self.convs.append(GCNConv(num_features, hidden_channels))
# Hidden layers
for _ in range(num_layers - 2):
self.convs.append(GCNConv(hidden_channels, hidden_channels))
# Output layer
self.convs.append(GCNConv(hidden_channels, num_classes))
self.dropout = dropout
def forward(self, data):
x, edge_index = data.x, data.edge_index
# Apply conv layers with ReLU and dropout
for conv in self.convs[:-1]:
x = conv(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Final layer without activation
x = self.convs[-1](x, edge_index)
return F.log_softmax(x, dim=1)
def train(model, data, optimizer):
"""Train the model for one epoch."""
model.train()
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test(model, data):
"""Evaluate the model."""
model.eval()
out = model(data)
pred = out.argmax(dim=1)
accs = []
for mask in [data.train_mask, data.val_mask, data.test_mask]:
correct = (pred[mask] == data.y[mask]).sum()
accs.append(int(correct) / int(mask.sum()))
return accs
def main():
# Load dataset
dataset = Planetoid(root='/tmp/Cora', name='Cora')
data = dataset[0]
# Create model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GCN(
num_features=dataset.num_features,
hidden_channels=64,
num_classes=dataset.num_classes,
num_layers=3,
dropout=0.5
).to(device)
data = data.to(device)
# Setup optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
# Training loop
print("Training GCN model...")
best_val_acc = 0
for epoch in range(1, 201):
loss = train(model, data, optimizer)
train_acc, val_acc, test_acc = test(model, data)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
if epoch % 10 == 0:
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
print(f'\\nBest Test Accuracy: {best_test_acc:.4f}')
if __name__ == '__main__':
main()
''',
'gat': '''import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv
from torch_geometric.datasets import Planetoid
class GAT(torch.nn.Module):
"""Graph Attention Network for node classification."""
def __init__(self, num_features, hidden_channels, num_classes, heads=8, dropout=0.6):
super().__init__()
self.conv1 = GATConv(num_features, hidden_channels, heads=heads, dropout=dropout)
self.conv2 = GATConv(hidden_channels * heads, num_classes, heads=1,
concat=False, dropout=dropout)
self.dropout = dropout
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, p=self.dropout, training=self.training)
x = F.elu(self.conv1(x, edge_index))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
def train(model, data, optimizer):
"""Train the model for one epoch."""
model.train()
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test(model, data):
"""Evaluate the model."""
model.eval()
out = model(data)
pred = out.argmax(dim=1)
accs = []
for mask in [data.train_mask, data.val_mask, data.test_mask]:
correct = (pred[mask] == data.y[mask]).sum()
accs.append(int(correct) / int(mask.sum()))
return accs
def main():
# Load dataset
dataset = Planetoid(root='/tmp/Cora', name='Cora')
data = dataset[0]
# Create model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GAT(
num_features=dataset.num_features,
hidden_channels=8,
num_classes=dataset.num_classes,
heads=8,
dropout=0.6
).to(device)
data = data.to(device)
# Setup optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)
# Training loop
print("Training GAT model...")
best_val_acc = 0
for epoch in range(1, 201):
loss = train(model, data, optimizer)
train_acc, val_acc, test_acc = test(model, data)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
if epoch % 10 == 0:
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
print(f'\\nBest Test Accuracy: {best_test_acc:.4f}')
if __name__ == '__main__':
main()
''',
'graphsage': '''import torch
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
from torch_geometric.datasets import Planetoid
class GraphSAGE(torch.nn.Module):
"""GraphSAGE for node classification."""
def __init__(self, num_features, hidden_channels, num_classes, num_layers=2, dropout=0.5):
super().__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(num_features, hidden_channels))
for _ in range(num_layers - 2):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, num_classes))
self.dropout = dropout
def forward(self, data):
x, edge_index = data.x, data.edge_index
for conv in self.convs[:-1]:
x = conv(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[-1](x, edge_index)
return F.log_softmax(x, dim=1)
def train(model, data, optimizer):
model.train()
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test(model, data):
model.eval()
out = model(data)
pred = out.argmax(dim=1)
accs = []
for mask in [data.train_mask, data.val_mask, data.test_mask]:
correct = (pred[mask] == data.y[mask]).sum()
accs.append(int(correct) / int(mask.sum()))
return accs
def main():
dataset = Planetoid(root='/tmp/Cora', name='Cora')
data = dataset[0]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GraphSAGE(
num_features=dataset.num_features,
hidden_channels=64,
num_classes=dataset.num_classes,
num_layers=2,
dropout=0.5
).to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
print("Training GraphSAGE model...")
best_val_acc = 0
for epoch in range(1, 201):
loss = train(model, data, optimizer)
train_acc, val_acc, test_acc = test(model, data)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
if epoch % 10 == 0:
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
print(f'\\nBest Test Accuracy: {best_test_acc:.4f}')
if __name__ == '__main__':
main()
''',
},
'graph_classification': {
'gin': '''import torch
import torch.nn.functional as F
from torch_geometric.nn import GINConv, global_add_pool
from torch_geometric.datasets import TUDataset
from torch_geometric.loader import DataLoader
class GIN(torch.nn.Module):
"""Graph Isomorphism Network for graph classification."""
def __init__(self, num_features, hidden_channels, num_classes, num_layers=3, dropout=0.5):
super().__init__()
self.convs = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
# Create MLP for first layer
nn = torch.nn.Sequential(
torch.nn.Linear(num_features, hidden_channels),
torch.nn.ReLU(),
torch.nn.Linear(hidden_channels, hidden_channels)
)
self.convs.append(GINConv(nn))
self.batch_norms.append(torch.nn.BatchNorm1d(hidden_channels))
# Hidden layers
for _ in range(num_layers - 2):
nn = torch.nn.Sequential(
torch.nn.Linear(hidden_channels, hidden_channels),
torch.nn.ReLU(),
torch.nn.Linear(hidden_channels, hidden_channels)
)
self.convs.append(GINConv(nn))
self.batch_norms.append(torch.nn.BatchNorm1d(hidden_channels))
# Output MLP
self.lin = torch.nn.Linear(hidden_channels, num_classes)
self.dropout = dropout
def forward(self, data):
x, edge_index, batch = data.x, data.edge_index, data.batch
for conv, batch_norm in zip(self.convs, self.batch_norms):
x = conv(x, edge_index)
x = batch_norm(x)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# Global pooling
x = global_add_pool(x, batch)
# Output layer
x = self.lin(x)
return F.log_softmax(x, dim=1)
def train(model, loader, optimizer, device):
"""Train the model for one epoch."""
model.train()
total_loss = 0
for data in loader:
data = data.to(device)
optimizer.zero_grad()
out = model(data)
loss = F.nll_loss(out, data.y)
loss.backward()
optimizer.step()
total_loss += loss.item() * data.num_graphs
return total_loss / len(loader.dataset)
@torch.no_grad()
def test(model, loader, device):
"""Evaluate the model."""
model.eval()
correct = 0
for data in loader:
data = data.to(device)
out = model(data)
pred = out.argmax(dim=1)
correct += (pred == data.y).sum().item()
return correct / len(loader.dataset)
def main():
# Load dataset
dataset = TUDataset(root='/tmp/ENZYMES', name='ENZYMES')
print(f"Dataset: {dataset}")
print(f"Number of graphs: {len(dataset)}")
print(f"Number of features: {dataset.num_features}")
print(f"Number of classes: {dataset.num_classes}")
# Shuffle and split
dataset = dataset.shuffle()
train_dataset = dataset[:int(len(dataset) * 0.8)]
test_dataset = dataset[int(len(dataset) * 0.8):]
# Create data loaders
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32)
# Create model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GIN(
num_features=dataset.num_features,
hidden_channels=64,
num_classes=dataset.num_classes,
num_layers=3,
dropout=0.5
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# Training loop
print("\\nTraining GIN model...")
for epoch in range(1, 101):
loss = train(model, train_loader, optimizer, device)
train_acc = test(model, train_loader, device)
test_acc = test(model, test_loader, device)
if epoch % 10 == 0:
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
f'Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')
if __name__ == '__main__':
main()
''',
},
}
def generate_template(model_type: str, task: str, output_path: str):
"""Generate a GNN template file."""
if task not in TEMPLATES:
raise ValueError(f"Unknown task: {task}. Available: {list(TEMPLATES.keys())}")
if model_type not in TEMPLATES[task]:
raise ValueError(f"Model {model_type} not available for task {task}. "
f"Available: {list(TEMPLATES[task].keys())}")
template = TEMPLATES[task][model_type]
# Write to file
output_file = Path(output_path)
output_file.parent.mkdir(parents=True, exist_ok=True)
with open(output_file, 'w') as f:
f.write(template)
print(f"β Generated {model_type.upper()} template for {task}")
print(f" Saved to: {output_path}")
print(f"\\nTo run the template:")
print(f" python {output_path}")
def list_templates():
"""List all available templates."""
print("Available GNN Templates")
print("=" * 50)
for task, models in TEMPLATES.items():
print(f"\\n{task.upper()}")
print("-" * 50)
for model in models.keys():
print(f" - {model}")
print()
def main():
parser = argparse.ArgumentParser(
description="Generate GNN model templates",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python create_gnn_template.py --model gcn --task node_classification --output gcn_model.py
python create_gnn_template.py --model gin --task graph_classification --output gin_model.py
python create_gnn_template.py --list
"""
)
parser.add_argument('--model', type=str,
help='Model type (gcn, gat, graphsage, gin)')
parser.add_argument('--task', type=str,
help='Task type (node_classification, graph_classification)')
parser.add_argument('--output', type=str, default='gnn_model.py',
help='Output file path (default: gnn_model.py)')
parser.add_argument('--list', action='store_true',
help='List all available templates')
args = parser.parse_args()
if args.list:
list_templates()
return
if not args.model or not args.task:
parser.print_help()
print("\\n" + "=" * 50)
list_templates()
return
try:
generate_template(args.model, args.task, args.output)
except ValueError as e:
print(f"Error: {e}")
print("\\nUse --list to see available templates")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/torch_geometric/scripts/create_gnn_template.py",
"license": "MIT License",
"lines": 400,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/torch_geometric/scripts/visualize_graph.py | #!/usr/bin/env python3
"""
Visualize PyTorch Geometric graph structures using networkx and matplotlib.
This script provides utilities to visualize Data objects, including:
- Graph structure (nodes and edges)
- Node features (as colors)
- Edge attributes (as edge colors/widths)
- Community/cluster assignments
Usage:
python visualize_graph.py --dataset Cora --output graph.png
Or import and use:
from scripts.visualize_graph import visualize_data
visualize_data(data, title="My Graph", show_labels=True)
"""
import argparse
import matplotlib.pyplot as plt
import networkx as nx
import torch
from typing import Optional, Union
import numpy as np
def visualize_data(
data,
title: str = "Graph Visualization",
node_color_attr: Optional[str] = None,
edge_color_attr: Optional[str] = None,
show_labels: bool = False,
node_size: int = 300,
figsize: tuple = (12, 10),
layout: str = "spring",
output_path: Optional[str] = None,
max_nodes: Optional[int] = None,
):
"""
Visualize a PyTorch Geometric Data object.
Args:
data: PyTorch Geometric Data object
title: Plot title
node_color_attr: Data attribute to use for node colors (e.g., 'y', 'train_mask')
edge_color_attr: Data attribute to use for edge colors
show_labels: Whether to show node labels
node_size: Size of nodes in visualization
figsize: Figure size (width, height)
layout: Graph layout algorithm ('spring', 'circular', 'kamada_kawai', 'spectral')
output_path: Path to save figure (if None, displays interactively)
max_nodes: Maximum number of nodes to visualize (samples if exceeded)
"""
# Sample nodes if graph is too large
if max_nodes and data.num_nodes > max_nodes:
print(f"Graph has {data.num_nodes} nodes. Sampling {max_nodes} nodes for visualization.")
node_indices = torch.randperm(data.num_nodes)[:max_nodes]
data = data.subgraph(node_indices)
# Convert to networkx graph
G = nx.Graph() if is_undirected(data.edge_index) else nx.DiGraph()
# Add nodes
G.add_nodes_from(range(data.num_nodes))
# Add edges
edge_index = data.edge_index.cpu().numpy()
edges = list(zip(edge_index[0], edge_index[1]))
G.add_edges_from(edges)
# Setup figure
fig, ax = plt.subplots(figsize=figsize)
# Choose layout
if layout == "spring":
pos = nx.spring_layout(G, k=0.5, iterations=50)
elif layout == "circular":
pos = nx.circular_layout(G)
elif layout == "kamada_kawai":
pos = nx.kamada_kawai_layout(G)
elif layout == "spectral":
pos = nx.spectral_layout(G)
else:
raise ValueError(f"Unknown layout: {layout}")
# Determine node colors
if node_color_attr and hasattr(data, node_color_attr):
node_colors = getattr(data, node_color_attr).cpu().numpy()
if node_colors.dtype == bool:
node_colors = node_colors.astype(int)
if len(node_colors.shape) > 1:
# Multi-dimensional features - use first dimension
node_colors = node_colors[:, 0]
else:
node_colors = 'skyblue'
# Determine edge colors
if edge_color_attr and hasattr(data, edge_color_attr):
edge_colors = getattr(data, edge_color_attr).cpu().numpy()
if len(edge_colors.shape) > 1:
edge_colors = edge_colors[:, 0]
else:
edge_colors = 'gray'
# Draw graph
nx.draw_networkx_nodes(
G, pos,
node_color=node_colors,
node_size=node_size,
cmap=plt.cm.viridis,
ax=ax
)
nx.draw_networkx_edges(
G, pos,
edge_color=edge_colors,
alpha=0.3,
arrows=isinstance(G, nx.DiGraph),
arrowsize=10,
ax=ax
)
if show_labels:
nx.draw_networkx_labels(G, pos, font_size=8, ax=ax)
ax.set_title(title, fontsize=16, fontweight='bold')
ax.axis('off')
# Add colorbar if using numeric node colors
if node_color_attr and isinstance(node_colors, np.ndarray):
sm = plt.cm.ScalarMappable(
cmap=plt.cm.viridis,
norm=plt.Normalize(vmin=node_colors.min(), vmax=node_colors.max())
)
sm.set_array([])
cbar = plt.colorbar(sm, ax=ax, fraction=0.046, pad=0.04)
cbar.set_label(node_color_attr, rotation=270, labelpad=20)
plt.tight_layout()
if output_path:
plt.savefig(output_path, dpi=300, bbox_inches='tight')
print(f"Figure saved to {output_path}")
else:
plt.show()
plt.close()
def is_undirected(edge_index):
"""Check if graph is undirected."""
row, col = edge_index
num_edges = edge_index.size(1)
# Create a set of edges and reverse edges
edges = set(zip(row.tolist(), col.tolist()))
reverse_edges = set(zip(col.tolist(), row.tolist()))
# Check if all edges have their reverse
return edges == reverse_edges
def plot_degree_distribution(data, output_path: Optional[str] = None):
"""Plot the degree distribution of the graph."""
from torch_geometric.utils import degree
row, col = data.edge_index
deg = degree(col, data.num_nodes).cpu().numpy()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
# Histogram
ax1.hist(deg, bins=50, edgecolor='black', alpha=0.7)
ax1.set_xlabel('Degree', fontsize=12)
ax1.set_ylabel('Frequency', fontsize=12)
ax1.set_title('Degree Distribution', fontsize=14, fontweight='bold')
ax1.grid(alpha=0.3)
# Log-log plot
unique_degrees, counts = np.unique(deg, return_counts=True)
ax2.loglog(unique_degrees, counts, 'o-', alpha=0.7)
ax2.set_xlabel('Degree (log scale)', fontsize=12)
ax2.set_ylabel('Frequency (log scale)', fontsize=12)
ax2.set_title('Degree Distribution (Log-Log)', fontsize=14, fontweight='bold')
ax2.grid(alpha=0.3)
plt.tight_layout()
if output_path:
plt.savefig(output_path, dpi=300, bbox_inches='tight')
print(f"Degree distribution saved to {output_path}")
else:
plt.show()
plt.close()
def plot_graph_statistics(data, output_path: Optional[str] = None):
"""Plot various graph statistics."""
from torch_geometric.utils import degree, contains_self_loops, is_undirected as check_undirected
# Compute statistics
row, col = data.edge_index
deg = degree(col, data.num_nodes).cpu().numpy()
stats = {
'Nodes': data.num_nodes,
'Edges': data.num_edges,
'Avg Degree': deg.mean(),
'Max Degree': deg.max(),
'Self-loops': contains_self_loops(data.edge_index),
'Undirected': check_undirected(data.edge_index),
}
if hasattr(data, 'num_node_features'):
stats['Node Features'] = data.num_node_features
if hasattr(data, 'num_edge_features') and data.edge_attr is not None:
stats['Edge Features'] = data.num_edge_features
if hasattr(data, 'y'):
if data.y.dim() == 1:
stats['Classes'] = int(data.y.max().item()) + 1
# Create text plot
fig, ax = plt.subplots(figsize=(8, 6))
ax.axis('off')
text = "Graph Statistics\n" + "=" * 40 + "\n\n"
for key, value in stats.items():
text += f"{key:20s}: {value}\n"
ax.text(0.1, 0.5, text, fontsize=14, family='monospace',
verticalalignment='center', transform=ax.transAxes)
plt.tight_layout()
if output_path:
plt.savefig(output_path, dpi=300, bbox_inches='tight')
print(f"Statistics saved to {output_path}")
else:
plt.show()
plt.close()
# Print to console as well
print("\n" + text)
def main():
parser = argparse.ArgumentParser(description="Visualize PyTorch Geometric graphs")
parser.add_argument('--dataset', type=str, default='Cora',
help='Dataset name (e.g., Cora, CiteSeer, ENZYMES)')
parser.add_argument('--output', type=str, default=None,
help='Output file path for visualization')
parser.add_argument('--node-color', type=str, default='y',
help='Attribute to use for node colors')
parser.add_argument('--layout', type=str, default='spring',
choices=['spring', 'circular', 'kamada_kawai', 'spectral'],
help='Graph layout algorithm')
parser.add_argument('--show-labels', action='store_true',
help='Show node labels')
parser.add_argument('--max-nodes', type=int, default=500,
help='Maximum nodes to visualize')
parser.add_argument('--stats', action='store_true',
help='Show graph statistics')
parser.add_argument('--degree', action='store_true',
help='Show degree distribution')
args = parser.parse_args()
# Load dataset
print(f"Loading dataset: {args.dataset}")
try:
# Try Planetoid datasets
from torch_geometric.datasets import Planetoid
dataset = Planetoid(root=f'/tmp/{args.dataset}', name=args.dataset)
data = dataset[0]
except:
try:
# Try TUDataset
from torch_geometric.datasets import TUDataset
dataset = TUDataset(root=f'/tmp/{args.dataset}', name=args.dataset)
data = dataset[0]
except Exception as e:
print(f"Error loading dataset: {e}")
print("Supported datasets: Cora, CiteSeer, PubMed, ENZYMES, PROTEINS, etc.")
return
print(f"Loaded {args.dataset}: {data.num_nodes} nodes, {data.num_edges} edges")
# Generate visualizations
if args.stats:
stats_output = args.output.replace('.png', '_stats.png') if args.output else None
plot_graph_statistics(data, stats_output)
if args.degree:
degree_output = args.output.replace('.png', '_degree.png') if args.output else None
plot_degree_distribution(data, degree_output)
# Main visualization
visualize_data(
data,
title=f"{args.dataset} Graph",
node_color_attr=args.node_color,
show_labels=args.show_labels,
layout=args.layout,
output_path=args.output,
max_nodes=args.max_nodes
)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/torch_geometric/scripts/visualize_graph.py",
"license": "MIT License",
"lines": 255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/treatment-plans/scripts/check_completeness.py | #!/usr/bin/env python3
"""
Check Treatment Plan Completeness
Validates that all required sections are present in a treatment plan.
"""
import sys
import re
import argparse
from pathlib import Path
from typing import List, Tuple
# Required sections for all treatment plans
REQUIRED_SECTIONS = [
r'\\section\*\{.*Patient Information',
r'\\section\*\{.*Diagnosis.*Assessment',
r'\\section\*\{.*Goals',
r'\\section\*\{.*Interventions',
r'\\section\*\{.*Timeline.*Schedule',
r'\\section\*\{.*Monitoring',
r'\\section\*\{.*Outcomes',
r'\\section\*\{.*Follow[- ]?up',
r'\\section\*\{.*Education',
r'\\section\*\{.*Risk.*Safety',
]
# Section descriptions for user-friendly output
SECTION_DESCRIPTIONS = {
0: 'Patient Information (de-identified)',
1: 'Diagnosis and Assessment',
2: 'Treatment Goals (SMART format)',
3: 'Interventions (pharmacological, non-pharmacological, procedural)',
4: 'Timeline and Schedule',
5: 'Monitoring Parameters',
6: 'Expected Outcomes',
7: 'Follow-up Plan',
8: 'Patient Education',
9: 'Risk Mitigation and Safety'
}
def read_file(filepath: Path) -> str:
"""Read and return file contents."""
try:
with open(filepath, 'r', encoding='utf-8') as f:
return f.read()
except FileNotFoundError:
print(f"Error: File not found: {filepath}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
def check_sections(content: str) -> Tuple[List[bool], List[str]]:
"""
Check which required sections are present.
Returns tuple of (checklist, missing_sections).
"""
checklist = []
missing = []
for i, pattern in enumerate(REQUIRED_SECTIONS):
if re.search(pattern, content, re.IGNORECASE):
checklist.append(True)
else:
checklist.append(False)
missing.append(SECTION_DESCRIPTIONS[i])
return checklist, missing
def check_smart_goals(content: str) -> Tuple[bool, List[str]]:
"""
Check if SMART goal criteria are mentioned.
Returns (has_smart, missing_criteria).
"""
smart_criteria = {
'Specific': r'\bspecific\b',
'Measurable': r'\bmeasurable\b',
'Achievable': r'\bachievable\b',
'Relevant': r'\brelevant\b',
'Time-bound': r'\btime[- ]?bound\b'
}
missing = []
for criterion, pattern in smart_criteria.items():
if not re.search(pattern, content, re.IGNORECASE):
missing.append(criterion)
has_smart = len(missing) == 0
return has_smart, missing
def check_hipaa_notice(content: str) -> bool:
"""Check if HIPAA de-identification notice is present."""
pattern = r'HIPAA|de-identif|protected health information|PHI'
return bool(re.search(pattern, content, re.IGNORECASE))
def check_provider_signature(content: str) -> bool:
"""Check if provider signature section is present."""
pattern = r'\\section\*\{.*Signature|Provider Signature|Signature'
return bool(re.search(pattern, content, re.IGNORECASE))
def check_placeholders_remaining(content: str) -> Tuple[int, List[str]]:
"""
Check for uncustomized placeholders [like this].
Returns (count, sample_placeholders).
"""
placeholders = re.findall(r'\[([^\]]+)\]', content)
# Filter out LaTeX commands and references
filtered = []
for p in placeholders:
# Skip if it's a LaTeX command, number, or citation
if not (p.startswith('\\') or p.isdigit() or 'cite' in p.lower() or 'ref' in p.lower()):
filtered.append(p)
count = len(filtered)
samples = filtered[:5] # Return up to 5 examples
return count, samples
def display_results(filepath: Path, checklist: List[bool], missing: List[str],
smart_complete: bool, smart_missing: List[str],
has_hipaa: bool, has_signature: bool,
placeholder_count: int, placeholder_samples: List[str]):
"""Display completeness check results."""
total_sections = len(REQUIRED_SECTIONS)
present_count = sum(checklist)
completeness_pct = (present_count / total_sections) * 100
print("\n" + "="*70)
print("TREATMENT PLAN COMPLETENESS CHECK")
print("="*70)
print(f"\nFile: {filepath}")
print(f"File size: {filepath.stat().st_size:,} bytes")
# Overall completeness
print("\n" + "-"*70)
print("OVERALL COMPLETENESS")
print("-"*70)
print(f"Required sections present: {present_count}/{total_sections} ({completeness_pct:.0f}%)")
if completeness_pct == 100:
print("β All required sections present")
else:
print(f"β {len(missing)} section(s) missing")
# Section details
print("\n" + "-"*70)
print("SECTION CHECKLIST")
print("-"*70)
for i, (present, desc) in enumerate(zip(checklist, SECTION_DESCRIPTIONS.values())):
status = "β" if present else "β"
print(f"{status} {desc}")
# Missing sections
if missing:
print("\n" + "-"*70)
print("MISSING SECTIONS")
print("-"*70)
for section in missing:
print(f" β’ {section}")
# SMART goals
print("\n" + "-"*70)
print("SMART GOALS CHECK")
print("-"*70)
if smart_complete:
print("β All SMART criteria mentioned in document")
else:
print(f"β {len(smart_missing)} SMART criterion/criteria not found:")
for criterion in smart_missing:
print(f" β’ {criterion}")
print("\nNote: Goals should be Specific, Measurable, Achievable, Relevant, Time-bound")
# HIPAA notice
print("\n" + "-"*70)
print("PRIVACY AND COMPLIANCE")
print("-"*70)
if has_hipaa:
print("β HIPAA/de-identification notice present")
else:
print("β HIPAA de-identification notice not found")
print(" Recommendation: Include HIPAA Safe Harbor de-identification guidance")
if has_signature:
print("β Provider signature section present")
else:
print("β Provider signature section not found")
# Placeholders
print("\n" + "-"*70)
print("CUSTOMIZATION STATUS")
print("-"*70)
if placeholder_count == 0:
print("β No uncustomized placeholders detected")
else:
print(f"β {placeholder_count} placeholder(s) may need customization")
print("\nExamples:")
for sample in placeholder_samples:
print(f" β’ [{sample}]")
print("\nRecommendation: Replace all [bracketed placeholders] with patient-specific information")
# Summary
print("\n" + "="*70)
print("SUMMARY")
print("="*70)
# Calculate overall score
score_components = [
completeness_pct / 100, # Section completeness (0-1)
1.0 if smart_complete else 0.6, # SMART goals (full or partial credit)
1.0 if has_hipaa else 0.0, # HIPAA notice (binary)
1.0 if has_signature else 0.0, # Signature (binary)
1.0 if placeholder_count == 0 else 0.5 # Customization (full or partial)
]
overall_score = (sum(score_components) / len(score_components)) * 100
print(f"\nOverall completeness score: {overall_score:.0f}%")
if overall_score >= 90:
print("Status: β EXCELLENT - Treatment plan is comprehensive")
elif overall_score >= 75:
print("Status: β GOOD - Minor improvements needed")
elif overall_score >= 60:
print("Status: β FAIR - Several sections need attention")
else:
print("Status: β INCOMPLETE - Significant work needed")
print("\n" + "="*70)
# Return exit code based on completeness
return 0 if completeness_pct >= 80 else 1
def main():
parser = argparse.ArgumentParser(
description='Check treatment plan completeness',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Check a treatment plan file
python check_completeness.py my_treatment_plan.tex
# Check and exit with error code if incomplete (for CI/CD)
python check_completeness.py plan.tex && echo "Complete"
This script checks for:
- All required sections (10 core sections)
- SMART goal criteria
- HIPAA de-identification notice
- Provider signature section
- Uncustomized placeholders
Exit codes:
0 - All required sections present (β₯80% complete)
1 - Missing required sections (<80% complete)
2 - File error or invalid arguments
"""
)
parser.add_argument(
'file',
type=Path,
help='Treatment plan file to check (.tex format)'
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='Show detailed output'
)
args = parser.parse_args()
# Check file exists and is .tex
if not args.file.exists():
print(f"Error: File not found: {args.file}", file=sys.stderr)
sys.exit(2)
if args.file.suffix.lower() not in ['.tex', '.txt']:
print(f"Warning: Expected .tex file, got {args.file.suffix}", file=sys.stderr)
# Read file
content = read_file(args.file)
# Perform checks
checklist, missing = check_sections(content)
smart_complete, smart_missing = check_smart_goals(content)
has_hipaa = check_hipaa_notice(content)
has_signature = check_provider_signature(content)
placeholder_count, placeholder_samples = check_placeholders_remaining(content)
# Display results
exit_code = display_results(
args.file, checklist, missing,
smart_complete, smart_missing,
has_hipaa, has_signature,
placeholder_count, placeholder_samples
)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/treatment-plans/scripts/check_completeness.py",
"license": "MIT License",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/treatment-plans/scripts/generate_template.py | #!/usr/bin/env python3
"""
Generate Treatment Plan Template
Interactive script to select and generate treatment plan templates.
"""
import os
import sys
import shutil
import argparse
from pathlib import Path
from datetime import datetime
# Template types and descriptions
TEMPLATES = {
'general_medical': {
'name': 'General Medical Treatment Plan',
'file': 'general_medical_treatment_plan.tex',
'description': 'For primary care and chronic disease management (diabetes, hypertension, etc.)'
},
'rehabilitation': {
'name': 'Rehabilitation Treatment Plan',
'file': 'rehabilitation_treatment_plan.tex',
'description': 'For physical therapy, occupational therapy, and rehabilitation services'
},
'mental_health': {
'name': 'Mental Health Treatment Plan',
'file': 'mental_health_treatment_plan.tex',
'description': 'For psychiatric and behavioral health treatment'
},
'chronic_disease': {
'name': 'Chronic Disease Management Plan',
'file': 'chronic_disease_management_plan.tex',
'description': 'For complex multimorbidity and long-term care coordination'
},
'perioperative': {
'name': 'Perioperative Care Plan',
'file': 'perioperative_care_plan.tex',
'description': 'For surgical and procedural patient management'
},
'pain_management': {
'name': 'Pain Management Plan',
'file': 'pain_management_plan.tex',
'description': 'For acute and chronic pain treatment (multimodal approach)'
}
}
def get_templates_dir():
"""Get the path to the templates directory."""
# Assume script is in .claude/skills/treatment-plans/scripts/
script_dir = Path(__file__).parent
templates_dir = script_dir.parent / 'assets'
return templates_dir
def list_templates():
"""Display available templates."""
print("\n" + "="*70)
print("AVAILABLE TREATMENT PLAN TEMPLATES")
print("="*70)
for i, (key, info) in enumerate(TEMPLATES.items(), 1):
print(f"\n{i}. {info['name']}")
print(f" Type: {key}")
print(f" File: {info['file']}")
print(f" Description: {info['description']}")
print("\n" + "="*70)
def interactive_selection():
"""Interactive template selection."""
list_templates()
while True:
try:
choice = input("\nSelect template number (1-6) or 'q' to quit: ").strip().lower()
if choice == 'q':
print("Exiting...")
sys.exit(0)
choice_num = int(choice)
if 1 <= choice_num <= len(TEMPLATES):
template_key = list(TEMPLATES.keys())[choice_num - 1]
return template_key
else:
print(f"Please enter a number between 1 and {len(TEMPLATES)}.")
except ValueError:
print("Invalid input. Please enter a number or 'q' to quit.")
def get_output_filename(template_key, custom_name=None):
"""Generate output filename."""
if custom_name:
# Ensure .tex extension
if not custom_name.endswith('.tex'):
custom_name += '.tex'
return custom_name
# Default: template_key_YYYYMMDD.tex
timestamp = datetime.now().strftime('%Y%m%d')
return f"{template_key}_plan_{timestamp}.tex"
def copy_template(template_key, output_path):
"""Copy template to output location."""
templates_dir = get_templates_dir()
template_file = TEMPLATES[template_key]['file']
source_path = templates_dir / template_file
if not source_path.exists():
raise FileNotFoundError(f"Template not found: {source_path}")
# Create output directory if it doesn't exist
output_path = Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
# Copy template
shutil.copy2(source_path, output_path)
return output_path
def display_success(output_path, template_key):
"""Display success message with next steps."""
template_info = TEMPLATES[template_key]
print("\n" + "="*70)
print("β TEMPLATE GENERATED SUCCESSFULLY")
print("="*70)
print(f"\nTemplate: {template_info['name']}")
print(f"Output file: {output_path}")
print(f"File size: {os.path.getsize(output_path):,} bytes")
print("\n" + "-"*70)
print("NEXT STEPS:")
print("-"*70)
print("\n1. CUSTOMIZE THE TEMPLATE:")
print(" - Open the .tex file in your LaTeX editor")
print(" - Replace all [bracketed placeholders] with patient-specific information")
print(" - Remove or modify sections as appropriate for your patient")
print("\n2. COMPILE TO PDF:")
print(f" $ pdflatex {output_path.name}")
print("\n3. VALIDATE (optional):")
print(f" $ python check_completeness.py {output_path.name}")
print(f" $ python validate_treatment_plan.py {output_path.name}")
print("\n4. DE-IDENTIFY BEFORE SHARING:")
print(" - Remove all HIPAA identifiers (18 identifiers)")
print(" - See regulatory_compliance.md reference for details")
print("\n" + "="*70)
def main():
parser = argparse.ArgumentParser(
description='Generate treatment plan template',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Interactive mode (recommended for first-time users)
python generate_template.py
# Direct generation with type specification
python generate_template.py --type general_medical --output diabetes_plan.tex
# Generate with default filename
python generate_template.py --type mental_health
# List available templates
python generate_template.py --list
Available template types:
general_medical, rehabilitation, mental_health, chronic_disease,
perioperative, pain_management
"""
)
parser.add_argument(
'--type',
choices=list(TEMPLATES.keys()),
help='Template type to generate'
)
parser.add_argument(
'--output',
help='Output filename (default: auto-generated with timestamp)'
)
parser.add_argument(
'--list',
action='store_true',
help='List available templates and exit'
)
args = parser.parse_args()
# List templates and exit
if args.list:
list_templates()
return
# Determine template type
if args.type:
template_key = args.type
print(f"\nGenerating template: {TEMPLATES[template_key]['name']}")
else:
# Interactive mode
template_key = interactive_selection()
# Determine output filename
if args.output:
output_filename = args.output
else:
output_filename = get_output_filename(template_key)
# Default output to current directory
output_path = Path.cwd() / output_filename
# Confirm overwrite if file exists
if output_path.exists():
response = input(f"\nFile {output_filename} already exists. Overwrite? (y/n): ").strip().lower()
if response != 'y':
print("Cancelled.")
return
# Copy template
try:
output_path = copy_template(template_key, output_path)
display_success(output_path, template_key)
except Exception as e:
print(f"\nβ ERROR: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/treatment-plans/scripts/generate_template.py",
"license": "MIT License",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/treatment-plans/scripts/timeline_generator.py | #!/usr/bin/env python3
"""
Treatment Timeline Generator
Generates visual treatment timelines from treatment plan files.
"""
import sys
import re
import argparse
from pathlib import Path
from datetime import datetime, timedelta
from typing import List, Dict, Tuple
# Try to import matplotlib, but make it optional
try:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.patches import Rectangle
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
def extract_timeline_info(content: str) -> Dict[str, List[Tuple[str, str]]]:
"""
Extract timeline and schedule information from treatment plan.
Returns dict with phases, appointments, milestones.
"""
timeline_data = {
'phases': [],
'appointments': [],
'milestones': []
}
# Extract treatment phases
# Look for patterns like "Week 1-4: Description" or "Months 1-3: Description"
phase_patterns = [
r'(Week[s]?\s*\d+[-β]\d+|Month[s]?\s*\d+[-β]\d+)[:\s]+([^\n]+)',
r'(POD\s*\d+[-β]\d+)[:\s]+([^\n]+)',
r'(\d+[-β]\d+\s*week[s]?)[:\s]+([^\n]+)'
]
for pattern in phase_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
for timeframe, description in matches:
timeline_data['phases'].append((timeframe.strip(), description.strip()))
# Extract appointments
# Look for patterns like "Week 2: Visit" or "Month 3: Follow-up"
apt_patterns = [
r'(Week\s*\d+|Month\s*\d+|POD\s*\d+)[:\s]+(Visit|Appointment|Follow-up|Check-up|Consultation)([^\n]*)',
r'(Every\s+\d+\s+\w+)[:\s]+(Visit|Appointment|therapy|session)([^\n]*)'
]
for pattern in apt_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
for timeframe, visit_type, details in matches:
timeline_data['appointments'].append((timeframe.strip(), f"{visit_type}{details}".strip()))
# Extract milestones/assessments
# Look for "reassessment", "goal evaluation", "milestone" mentions
milestone_patterns = [
r'(Week\s*\d+|Month\s*\d+)[:\s]+(reassess|evaluation|assessment|milestone)([^\n]*)',
r'(\w+\s*\d+)[:\s]+(HbA1c|labs?|imaging|test)([^\n]*)'
]
for pattern in milestone_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
for timeframe, event_type, details in matches:
timeline_data['milestones'].append((timeframe.strip(), f"{event_type}{details}".strip()))
return timeline_data
def parse_timeframe_to_days(timeframe: str) -> Tuple[int, int]:
"""
Parse timeframe string to start and end days.
Examples: "Week 1-4" -> (0, 28), "Month 3" -> (60, 90)
"""
timeframe = timeframe.lower()
# Week patterns
if 'week' in timeframe:
weeks = re.findall(r'\d+', timeframe)
if len(weeks) == 2:
start_week = int(weeks[0])
end_week = int(weeks[1])
return ((start_week - 1) * 7, end_week * 7)
elif len(weeks) == 1:
week = int(weeks[0])
return ((week - 1) * 7, week * 7)
# Month patterns
if 'month' in timeframe:
months = re.findall(r'\d+', timeframe)
if len(months) == 2:
start_month = int(months[0])
end_month = int(months[1])
return ((start_month - 1) * 30, end_month * 30)
elif len(months) == 1:
month = int(months[0])
return ((month - 1) * 30, month * 30)
# POD (post-operative day) patterns
if 'pod' in timeframe:
days = re.findall(r'\d+', timeframe)
if len(days) == 2:
return (int(days[0]), int(days[1]))
elif len(days) == 1:
day = int(days[0])
return (day, day + 1)
# Default fallback
return (0, 7)
def create_text_timeline(timeline_data: Dict, output_file: Path = None):
"""Create a text-based timeline representation."""
lines = []
lines.append("="*70)
lines.append("TREATMENT TIMELINE")
lines.append("="*70)
# Treatment phases
if timeline_data['phases']:
lines.append("\nTREATMENT PHASES:")
lines.append("-"*70)
for timeframe, description in timeline_data['phases']:
lines.append(f"{timeframe:20s} | {description}")
# Appointments
if timeline_data['appointments']:
lines.append("\nSCHEDULED APPOINTMENTS:")
lines.append("-"*70)
for timeframe, details in timeline_data['appointments']:
lines.append(f"{timeframe:20s} | {details}")
# Milestones
if timeline_data['milestones']:
lines.append("\nMILESTONES & ASSESSMENTS:")
lines.append("-"*70)
for timeframe, event in timeline_data['milestones']:
lines.append(f"{timeframe:20s} | {event}")
lines.append("\n" + "="*70)
# Output
output_text = "\n".join(lines)
if output_file:
with open(output_file, 'w') as f:
f.write(output_text)
print(f"\nText timeline saved to: {output_file}")
else:
print(output_text)
return output_text
def create_visual_timeline(timeline_data: Dict, output_file: Path, start_date: str = None):
"""Create a visual Gantt-chart style timeline (requires matplotlib)."""
if not HAS_MATPLOTLIB:
print("Error: matplotlib not installed. Install with: pip install matplotlib", file=sys.stderr)
print("Generating text timeline instead...", file=sys.stderr)
text_output = output_file.with_suffix('.txt')
create_text_timeline(timeline_data, text_output)
return
# Parse start date
if start_date:
try:
start = datetime.strptime(start_date, '%Y-%m-%d')
except ValueError:
print(f"Invalid date format: {start_date}. Using today.", file=sys.stderr)
start = datetime.now()
else:
start = datetime.now()
# Prepare data for plotting
phases = []
for timeframe, description in timeline_data['phases']:
start_day, end_day = parse_timeframe_to_days(timeframe)
phases.append({
'name': f"{timeframe}: {description[:40]}",
'start': start + timedelta(days=start_day),
'end': start + timedelta(days=end_day),
'type': 'phase'
})
# Add appointments as events
events = []
for timeframe, details in timeline_data['appointments']:
start_day, _ = parse_timeframe_to_days(timeframe)
events.append({
'name': f"{timeframe}: {details[:40]}",
'date': start + timedelta(days=start_day),
'type': 'appointment'
})
# Add milestones
for timeframe, event in timeline_data['milestones']:
start_day, _ = parse_timeframe_to_days(timeframe)
events.append({
'name': f"{timeframe}: {event[:40]}",
'date': start + timedelta(days=start_day),
'type': 'milestone'
})
# Create figure
fig, ax = plt.subplots(figsize=(12, 8))
# Plot phases as horizontal bars
y_position = len(phases) + len(events)
for i, phase in enumerate(phases):
duration = (phase['end'] - phase['start']).days
ax.barh(y_position - i, duration, left=mdates.date2num(phase['start']),
height=0.6, color='steelblue', alpha=0.7, edgecolor='black')
ax.text(mdates.date2num(phase['start']) + duration/2, y_position - i,
phase['name'], va='center', ha='center', fontsize=9, color='white', weight='bold')
# Plot events as markers
event_y = y_position - len(phases) - 1
for i, event in enumerate(events):
marker = 'o' if event['type'] == 'appointment' else 's'
color = 'green' if event['type'] == 'appointment' else 'orange'
ax.plot(mdates.date2num(event['date']), event_y - i, marker=marker,
markersize=10, color=color, markeredgecolor='black')
ax.text(mdates.date2num(event['date']) + 2, event_y - i, event['name'],
va='center', ha='left', fontsize=8)
# Format x-axis as dates
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %Y'))
ax.xaxis.set_major_locator(mdates.MonthLocator())
plt.xticks(rotation=45, ha='right')
# Labels and title
ax.set_xlabel('Date', fontsize=12, weight='bold')
ax.set_title('Treatment Plan Timeline', fontsize=14, weight='bold', pad=20)
ax.set_yticks([])
ax.grid(axis='x', alpha=0.3, linestyle='--')
# Legend
from matplotlib.lines import Line2D
legend_elements = [
Rectangle((0, 0), 1, 1, fc='steelblue', alpha=0.7, edgecolor='black', label='Treatment Phase'),
Line2D([0], [0], marker='o', color='w', markerfacecolor='green', markersize=10,
markeredgecolor='black', label='Appointment'),
Line2D([0], [0], marker='s', color='w', markerfacecolor='orange', markersize=10,
markeredgecolor='black', label='Milestone/Assessment')
]
ax.legend(handles=legend_elements, loc='upper right', framealpha=0.9)
plt.tight_layout()
# Save
plt.savefig(output_file, dpi=300, bbox_inches='tight')
print(f"\nVisual timeline saved to: {output_file}")
# Close plot
plt.close()
def main():
parser = argparse.ArgumentParser(
description='Generate treatment timeline visualization',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Generate text timeline
python timeline_generator.py --plan my_plan.tex
# Generate visual timeline (requires matplotlib)
python timeline_generator.py --plan my_plan.tex --output timeline.png --visual
# Specify start date for visual timeline
python timeline_generator.py --plan my_plan.tex --output timeline.pdf --visual --start 2025-02-01
Output formats:
Text: .txt
Visual: .png, .pdf, .svg (requires matplotlib)
Note: Visual timeline generation requires matplotlib.
Install with: pip install matplotlib
"""
)
parser.add_argument(
'--plan',
type=Path,
required=True,
help='Treatment plan file to analyze (.tex format)'
)
parser.add_argument(
'--output',
type=Path,
help='Output file (default: timeline.txt or timeline.png if --visual)'
)
parser.add_argument(
'--visual',
action='store_true',
help='Generate visual timeline (requires matplotlib)'
)
parser.add_argument(
'--start',
help='Start date for timeline (YYYY-MM-DD format, default: today)'
)
args = parser.parse_args()
# Check plan file exists
if not args.plan.exists():
print(f"Error: File not found: {args.plan}", file=sys.stderr)
sys.exit(1)
# Read plan
try:
with open(args.plan, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
# Extract timeline information
print("Extracting timeline information from treatment plan...")
timeline_data = extract_timeline_info(content)
# Check if any timeline info found
total_items = (len(timeline_data['phases']) +
len(timeline_data['appointments']) +
len(timeline_data['milestones']))
if total_items == 0:
print("\nWarning: No timeline information detected in treatment plan.", file=sys.stderr)
print("The plan may not contain structured timeline/schedule sections.", file=sys.stderr)
print("\nTip: Include sections with timeframes like:", file=sys.stderr)
print(" - Week 1-4: Initial phase", file=sys.stderr)
print(" - Month 3: Follow-up visit", file=sys.stderr)
sys.exit(1)
print(f"Found {len(timeline_data['phases'])} phase(s), "
f"{len(timeline_data['appointments'])} appointment(s), "
f"{len(timeline_data['milestones'])} milestone(s)")
# Determine output file
if not args.output:
if args.visual:
args.output = Path('timeline.png')
else:
args.output = Path('timeline.txt')
# Generate timeline
if args.visual:
create_visual_timeline(timeline_data, args.output, args.start)
else:
create_text_timeline(timeline_data, args.output)
print(f"\nTimeline generation complete!")
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/treatment-plans/scripts/timeline_generator.py",
"license": "MIT License",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/treatment-plans/scripts/validate_treatment_plan.py | #!/usr/bin/env python3
"""
Validate Treatment Plan Quality
Comprehensive validation of treatment plan content quality and compliance.
"""
import sys
import re
import argparse
from pathlib import Path
from typing import Dict, List, Tuple
# Validation criteria and patterns
VALIDATION_CHECKS = {
'smart_goals': {
'name': 'SMART Goals Criteria',
'patterns': [
(r'\bspecific\b', 'Specific criterion'),
(r'\bmeasurable\b', 'Measurable criterion'),
(r'\bachievable\b', 'Achievable criterion'),
(r'\brelevant\b', 'Relevant criterion'),
(r'\btime[- ]?bound\b', 'Time-bound criterion')
]
},
'evidence_based': {
'name': 'Evidence-Based Practice',
'patterns': [
(r'guideline|evidence|study|trial|research', 'Evidence/guideline references'),
(r'\\cite\{|\\bibitem\{|\\bibliography\{', 'Citations present')
]
},
'patient_centered': {
'name': 'Patient-Centered Care',
'patterns': [
(r'patient.*preference|shared decision|patient.*value|patient.*priority', 'Patient preferences'),
(r'quality of life|functional.*goal|patient.*goal', 'Functional/QoL goals')
]
},
'safety': {
'name': 'Safety and Risk Mitigation',
'patterns': [
(r'adverse.*effect|side effect|risk|complication', 'Adverse effects mentioned'),
(r'monitoring|warning sign|emergency|when to call', 'Safety monitoring plan')
]
},
'medication': {
'name': 'Medication Documentation',
'patterns': [
(r'\\d+\s*mg|\\d+\s*mcg|dose|dosage', 'Specific doses'),
(r'daily|BID|TID|QID|once|twice', 'Frequency specified'),
(r'rationale|indication|because|for', 'Rationale provided')
]
}
}
def read_file(filepath: Path) -> str:
"""Read and return file contents."""
try:
with open(filepath, 'r', encoding='utf-8') as f:
return f.read()
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(2)
def validate_content(content: str) -> Dict[str, Tuple[int, int, List[str]]]:
"""
Validate content against criteria.
Returns dict with results: {category: (passed, total, missing_items)}
"""
results = {}
for category, checks in VALIDATION_CHECKS.items():
patterns = checks['patterns']
passed = 0
missing = []
for pattern, description in patterns:
if re.search(pattern, content, re.IGNORECASE):
passed += 1
else:
missing.append(description)
total = len(patterns)
results[category] = (passed, total, missing)
return results
def check_icd10_codes(content: str) -> Tuple[bool, int]:
"""Check for ICD-10 code presence."""
# ICD-10 format: Letter followed by 2 digits, optionally more digits/letters
pattern = r'\b[A-TV-Z]\d{2}\.?[\dA-TV-Z]*\b'
matches = re.findall(pattern, content)
has_codes = len(matches) > 0
count = len(matches)
return has_codes, count
def check_timeframes(content: str) -> Tuple[bool, List[str]]:
"""Check for specific timeframes in goals."""
timeframe_patterns = [
r'\d+\s*week',
r'\d+\s*month',
r'\d+\s*day',
r'within\s+\d+',
r'by\s+\w+\s+\d+'
]
found_timeframes = []
for pattern in timeframe_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
found_timeframes.extend(matches[:3]) # Limit to avoid too many
has_timeframes = len(found_timeframes) > 0
return has_timeframes, found_timeframes[:5]
def check_quantitative_goals(content: str) -> Tuple[bool, List[str]]:
"""Check for quantitative/measurable goals."""
# Look for numbers with units in goal context
patterns = [
r'\d+\s*%', # Percentages (HbA1c 7%)
r'\d+/\d+', # Ratios (BP 130/80)
r'\d+\s*mg/dL', # Lab values
r'\d+\s*mmHg', # Blood pressure
r'\d+\s*feet|meters', # Distance
r'\d+\s*pounds|lbs|kg', # Weight
r'\d+/10', # Pain scales
r'\d+\s*minutes|hours' # Time
]
found_metrics = []
for pattern in patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
found_metrics.extend(matches[:2])
has_metrics = len(found_metrics) > 0
return has_metrics, found_metrics[:5]
def assess_readability(content: str) -> str:
"""Basic readability assessment (very simplified)."""
# Remove LaTeX commands for word count
text_content = re.sub(r'\\[a-zA-Z]+(\{[^}]*\})?', '', content)
text_content = re.sub(r'[{}%\\]', '', text_content)
words = text_content.split()
word_count = len(words)
# Very rough sentences (periods followed by space/newline)
sentences = re.split(r'[.!?]+\s+', text_content)
sentence_count = len([s for s in sentences if s.strip()])
if sentence_count > 0:
avg_words_per_sentence = word_count / sentence_count
if avg_words_per_sentence < 15:
return "Simple (good for patient materials)"
elif avg_words_per_sentence < 25:
return "Moderate (appropriate for professional documentation)"
else:
return "Complex (may be difficult for some readers)"
return "Unable to assess"
def display_validation_results(filepath: Path, results: Dict,
has_icd10: bool, icd10_count: int,
has_timeframes: bool, timeframe_examples: List[str],
has_metrics: bool, metric_examples: List[str],
readability: str):
"""Display comprehensive validation results."""
print("\n" + "="*70)
print("TREATMENT PLAN QUALITY VALIDATION")
print("="*70)
print(f"\nFile: {filepath}")
print(f"File size: {filepath.stat().st_size:,} bytes")
# Overall quality score
total_passed = sum(r[0] for r in results.values())
total_checks = sum(r[1] for r in results.values())
quality_pct = (total_passed / total_checks) * 100 if total_checks > 0 else 0
print("\n" + "-"*70)
print("OVERALL QUALITY SCORE")
print("-"*70)
print(f"Validation checks passed: {total_passed}/{total_checks} ({quality_pct:.0f}%)")
# Detailed category results
print("\n" + "-"*70)
print("QUALITY CRITERIA ASSESSMENT")
print("-"*70)
for category, (passed, total, missing) in results.items():
category_name = VALIDATION_CHECKS[category]['name']
pct = (passed / total) * 100 if total > 0 else 0
status = "β" if passed == total else "β " if passed > 0 else "β"
print(f"\n{status} {category_name}: {passed}/{total} ({pct:.0f}%)")
if missing:
print(" Missing:")
for item in missing:
print(f" β’ {item}")
# Specific checks
print("\n" + "-"*70)
print("SPECIFIC VALIDATION CHECKS")
print("-"*70)
# ICD-10 codes
if has_icd10:
print(f"β ICD-10 diagnosis codes present ({icd10_count} found)")
else:
print("β No ICD-10 diagnosis codes detected")
print(" Recommendation: Include ICD-10 codes for all diagnoses")
# Timeframes
if has_timeframes:
print(f"β Time-bound goals present")
if timeframe_examples:
print(" Examples:", ", ".join(timeframe_examples[:3]))
else:
print("β No specific timeframes found in goals")
print(" Recommendation: Add specific timeframes (e.g., 'within 3 months', '8 weeks')")
# Measurable metrics
if has_metrics:
print(f"β Quantitative/measurable goals present")
if metric_examples:
print(" Examples:", ", ".join(metric_examples[:3]))
else:
print("β Limited quantitative metrics found")
print(" Recommendation: Include specific measurable targets (HbA1c <7%, BP <130/80)")
# Readability
print(f"\nReadability assessment: {readability}")
# Summary and recommendations
print("\n" + "="*70)
print("SUMMARY AND RECOMMENDATIONS")
print("="*70)
if quality_pct >= 90:
print("\nβ EXCELLENT quality - Treatment plan meets high standards")
elif quality_pct >= 75:
print("\nβ GOOD quality - Treatment plan is well-developed with minor areas for improvement")
elif quality_pct >= 60:
print("\nβ FAIR quality - Several important elements need strengthening")
else:
print("\nβ NEEDS IMPROVEMENT - Significant quality issues to address")
# Specific recommendations
print("\nKey Recommendations:")
recommendations = []
# SMART goals
if results['smart_goals'][0] < results['smart_goals'][1]:
recommendations.append("Ensure all goals meet SMART criteria (Specific, Measurable, Achievable, Relevant, Time-bound)")
# Evidence-based
if results['evidence_based'][0] == 0:
recommendations.append("Add evidence-based rationale and cite clinical practice guidelines")
# Patient-centered
if results['patient_centered'][0] < results['patient_centered'][1]:
recommendations.append("Incorporate patient preferences and functional quality-of-life goals")
# Safety
if results['safety'][0] < results['safety'][1]:
recommendations.append("Include comprehensive safety monitoring and risk mitigation strategies")
# Medication documentation
if results['medication'][0] < results['medication'][1]:
recommendations.append("Document medications with specific doses, frequencies, and rationales")
if not has_icd10:
recommendations.append("Add ICD-10 diagnosis codes for billing and documentation support")
if not has_timeframes:
recommendations.append("Add specific timeframes to all treatment goals")
if recommendations:
for i, rec in enumerate(recommendations, 1):
print(f"{i}. {rec}")
else:
print("None - Treatment plan demonstrates excellent quality across all criteria!")
print("\n" + "="*70)
# Return exit code
return 0 if quality_pct >= 70 else 1
def main():
parser = argparse.ArgumentParser(
description='Validate treatment plan quality and compliance',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Validate a treatment plan
python validate_treatment_plan.py my_plan.tex
# Use in automated workflows (exits with error if quality <70%)
python validate_treatment_plan.py plan.tex && echo "Quality check passed"
Validation Categories:
- SMART goals criteria (Specific, Measurable, Achievable, Relevant, Time-bound)
- Evidence-based practice (guidelines, citations)
- Patient-centered care (preferences, functional goals)
- Safety and risk mitigation (adverse effects, monitoring)
- Medication documentation (doses, frequencies, rationales)
- ICD-10 coding, timeframes, measurable metrics
Exit Codes:
0 - Quality β₯70% (acceptable)
1 - Quality <70% (needs improvement)
2 - File error or invalid arguments
"""
)
parser.add_argument(
'file',
type=Path,
help='Treatment plan file to validate (.tex format)'
)
args = parser.parse_args()
# Check file exists
if not args.file.exists():
print(f"Error: File not found: {args.file}", file=sys.stderr)
sys.exit(2)
# Read and validate
content = read_file(args.file)
# Run validation checks
results = validate_content(content)
has_icd10, icd10_count = check_icd10_codes(content)
has_timeframes, timeframe_examples = check_timeframes(content)
has_metrics, metric_examples = check_quantitative_goals(content)
readability = assess_readability(content)
# Display results
exit_code = display_validation_results(
args.file, results,
has_icd10, icd10_count,
has_timeframes, timeframe_examples,
has_metrics, metric_examples,
readability
)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/treatment-plans/scripts/validate_treatment_plan.py",
"license": "MIT License",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/uniprot-database/scripts/uniprot_client.py | #!/usr/bin/env python3
"""
UniProt REST API Client
A Python client for interacting with the UniProt REST API.
Provides helper functions for common operations including search,
retrieval, ID mapping, and streaming.
Usage examples:
# Search for proteins
results = search_proteins("insulin AND organism_name:human", format="json")
# Get a single protein
protein = get_protein("P12345", format="fasta")
# Map IDs
mapped = map_ids(["P12345", "P04637"], from_db="UniProtKB_AC-ID", to_db="PDB")
# Stream large results
for batch in stream_results("taxonomy_id:9606 AND reviewed:true", format="fasta"):
process(batch)
"""
import requests
import sys
import time
import json
from typing import List, Dict, Optional, Generator
from urllib.parse import urlencode
BASE_URL = "https://rest.uniprot.org"
POLLING_INTERVAL = 3 # seconds
def search_proteins(query: str, format: str = "json",
fields: Optional[List[str]] = None,
size: int = 25) -> Dict:
"""
Search UniProt database with a query.
Args:
query: Search query (e.g., "insulin AND organism_name:human")
format: Response format (json, tsv, xlsx, xml, fasta, txt, rdf)
fields: List of fields to return (e.g., ["accession", "gene_names", "organism_name"])
size: Number of results per page (default 25, max 500)
Returns:
Response data in requested format
"""
endpoint = f"{BASE_URL}/uniprotkb/search"
params = {
"query": query,
"format": format,
"size": size
}
if fields:
params["fields"] = ",".join(fields)
response = requests.get(endpoint, params=params)
response.raise_for_status()
if format == "json":
return response.json()
else:
return response.text
def get_protein(accession: str, format: str = "json") -> str:
"""
Retrieve a single protein entry by accession number.
Args:
accession: UniProt accession number (e.g., "P12345")
format: Response format (json, txt, xml, fasta, gff, rdf)
Returns:
Protein data in requested format
"""
endpoint = f"{BASE_URL}/uniprotkb/{accession}.{format}"
response = requests.get(endpoint)
response.raise_for_status()
if format == "json":
return response.json()
else:
return response.text
def batch_retrieve(accessions: List[str], format: str = "json",
fields: Optional[List[str]] = None) -> str:
"""
Retrieve multiple protein entries efficiently.
Args:
accessions: List of UniProt accession numbers
format: Response format
fields: List of fields to return
Returns:
Combined results in requested format
"""
query = " OR ".join([f"accession:{acc}" for acc in accessions])
return search_proteins(query, format=format, fields=fields, size=len(accessions))
def stream_results(query: str, format: str = "fasta",
fields: Optional[List[str]] = None,
chunk_size: int = 8192) -> Generator[str, None, None]:
"""
Stream large result sets without pagination.
Args:
query: Search query
format: Response format
fields: List of fields to return
chunk_size: Size of chunks to yield
Yields:
Chunks of response data
"""
endpoint = f"{BASE_URL}/uniprotkb/stream"
params = {
"query": query,
"format": format
}
if fields:
params["fields"] = ",".join(fields)
response = requests.get(endpoint, params=params, stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=chunk_size, decode_unicode=True):
if chunk:
yield chunk
def map_ids(ids: List[str], from_db: str, to_db: str,
format: str = "json") -> Dict:
"""
Map protein identifiers between different database systems.
Args:
ids: List of identifiers to map (max 100,000)
from_db: Source database (e.g., "UniProtKB_AC-ID", "Gene_Name")
to_db: Target database (e.g., "PDB", "Ensembl", "RefSeq_Protein")
format: Response format
Returns:
Mapping results
Note:
- Maximum 100,000 IDs per job
- Results stored for 7 days
- See id_mapping_databases.md for all supported databases
"""
if len(ids) > 100000:
raise ValueError("Maximum 100,000 IDs allowed per mapping job")
# Step 1: Submit job
submit_endpoint = f"{BASE_URL}/idmapping/run"
data = {
"from": from_db,
"to": to_db,
"ids": ",".join(ids)
}
response = requests.post(submit_endpoint, data=data)
response.raise_for_status()
job_id = response.json()["jobId"]
# Step 2: Poll for completion
status_endpoint = f"{BASE_URL}/idmapping/status/{job_id}"
while True:
response = requests.get(status_endpoint)
response.raise_for_status()
status = response.json()
if "results" in status or "failedIds" in status:
break
time.sleep(POLLING_INTERVAL)
# Step 3: Retrieve results
results_endpoint = f"{BASE_URL}/idmapping/results/{job_id}"
params = {"format": format}
response = requests.get(results_endpoint, params=params)
response.raise_for_status()
if format == "json":
return response.json()
else:
return response.text
def get_available_fields() -> List[Dict]:
"""
Get list of all available fields for queries.
Returns:
List of field definitions with names and descriptions
"""
endpoint = f"{BASE_URL}/configure/uniprotkb/result-fields"
response = requests.get(endpoint)
response.raise_for_status()
return response.json()
def get_id_mapping_databases() -> Dict:
"""
Get list of all supported databases for ID mapping.
Returns:
Dictionary of database groups and their supported databases
"""
endpoint = f"{BASE_URL}/configure/idmapping/fields"
response = requests.get(endpoint)
response.raise_for_status()
return response.json()
def main():
"""Command-line interface for UniProt database queries."""
import argparse
parser = argparse.ArgumentParser(
description='Query UniProt database using REST API',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Search for proteins
%(prog)s --search "insulin AND organism_name:human" --format json
# Get a specific protein
%(prog)s --get P01308 --format fasta
# Map IDs from UniProt to PDB
%(prog)s --map P01308,P04637 --from UniProtKB_AC-ID --to PDB
# Stream large results
%(prog)s --stream "taxonomy_id:9606 AND reviewed:true" --format fasta
# List available fields
%(prog)s --list-fields
# List mapping databases
%(prog)s --list-databases
"""
)
# Main operation arguments (mutually exclusive)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--search', '-s', help='Search query string')
group.add_argument('--get', '-g', help='Get protein by accession number')
group.add_argument('--map', '-m', help='Map IDs (comma-separated)')
group.add_argument('--stream', help='Stream large result sets')
group.add_argument('--list-fields', action='store_true',
help='List all available query fields')
group.add_argument('--list-databases', action='store_true',
help='List all ID mapping databases')
# Format options
parser.add_argument('--format', '-f', default='json',
help='Output format (json, tsv, xlsx, xml, fasta, txt, rdf)')
# Search-specific options
parser.add_argument('--fields', help='Comma-separated list of fields to return')
parser.add_argument('--size', type=int, default=25,
help='Number of results (default: 25, max: 500)')
# Mapping-specific options
parser.add_argument('--from', dest='from_db',
help='Source database for ID mapping')
parser.add_argument('--to', dest='to_db',
help='Target database for ID mapping')
args = parser.parse_args()
try:
if args.list_fields:
fields = get_available_fields()
print(json.dumps(fields, indent=2))
elif args.list_databases:
databases = get_id_mapping_databases()
print(json.dumps(databases, indent=2))
elif args.search:
fields_list = args.fields.split(',') if args.fields else None
results = search_proteins(
args.search,
format=args.format,
fields=fields_list,
size=args.size
)
if args.format == 'json':
print(json.dumps(results, indent=2))
else:
print(results)
elif args.get:
protein = get_protein(args.get, format=args.format)
if args.format == 'json':
print(json.dumps(protein, indent=2))
else:
print(protein)
elif args.map:
if not args.from_db or not args.to_db:
parser.error("--map requires --from and --to arguments")
ids = [id.strip() for id in args.map.split(',')]
mapping = map_ids(ids, args.from_db, args.to_db, format=args.format)
if args.format == 'json':
print(json.dumps(mapping, indent=2))
else:
print(mapping)
elif args.stream:
fields_list = args.fields.split(',') if args.fields else None
for chunk in stream_results(args.stream, format=args.format, fields=fields_list):
print(chunk, end='')
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/uniprot-database/scripts/uniprot_client.py",
"license": "MIT License",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/uspto-database/scripts/patent_search.py | #!/usr/bin/env python3
"""
USPTO PatentSearch API Helper
Provides functions for searching and retrieving patent data using the USPTO
PatentSearch API (ElasticSearch-based system, replaced legacy PatentsView in May 2025).
Requires:
- requests library: pip install requests
- USPTO API key from https://account.uspto.gov/api-manager/
Environment variables:
USPTO_API_KEY - Your USPTO API key
"""
import os
import sys
import json
import requests
from typing import Dict, List, Optional, Any
from datetime import datetime
class PatentSearchClient:
"""Client for USPTO PatentSearch API."""
BASE_URL = "https://search.patentsview.org/api/v1"
def __init__(self, api_key: Optional[str] = None):
"""
Initialize client with API key.
Args:
api_key: USPTO API key (if not provided, uses USPTO_API_KEY env var)
"""
self.api_key = api_key or os.getenv("USPTO_API_KEY")
if not self.api_key:
raise ValueError("API key required. Set USPTO_API_KEY environment variable or pass to constructor.")
self.headers = {
"X-Api-Key": self.api_key,
"Content-Type": "application/json"
}
def _request(self, endpoint: str, query: Dict, fields: Optional[List[str]] = None,
sort: Optional[List[Dict]] = None, options: Optional[Dict] = None) -> Dict:
"""
Make a request to the PatentSearch API.
Args:
endpoint: API endpoint (e.g., "patent", "inventor")
query: Query dictionary
fields: List of fields to return
sort: Sort specification
options: Pagination and other options
Returns:
API response as dictionary
"""
url = f"{self.BASE_URL}/{endpoint}"
data = {"q": query}
if fields:
data["f"] = fields
if sort:
data["s"] = sort
if options:
data["o"] = options
response = requests.post(url, headers=self.headers, json=data)
response.raise_for_status()
return response.json()
def search_patents(self, query: Dict, fields: Optional[List[str]] = None,
sort: Optional[List[Dict]] = None, page: int = 1,
per_page: int = 100) -> Dict:
"""
Search for patents.
Args:
query: Query dictionary (see PatentSearch API docs for syntax)
fields: Fields to return (defaults to essential fields)
sort: Sort specification
page: Page number
per_page: Results per page (max 1000)
Returns:
Search results with patents array
Example:
# Search by keyword
results = client.search_patents({
"patent_abstract": {"_text_all": ["machine", "learning"]}
})
# Search by date range
results = client.search_patents({
"patent_date": {"_gte": "2024-01-01", "_lte": "2024-12-31"}
})
"""
if fields is None:
fields = [
"patent_number", "patent_title", "patent_date",
"patent_abstract", "assignee_organization",
"inventor_name", "cpc_subclass_id"
]
if sort is None:
sort = [{"patent_date": "desc"}]
options = {"page": page, "per_page": min(per_page, 1000)}
return self._request("patent", query, fields, sort, options)
def get_patent(self, patent_number: str) -> Optional[Dict]:
"""
Get details for a specific patent by number.
Args:
patent_number: Patent number (with or without commas)
Returns:
Patent data dictionary or None if not found
"""
# Remove commas from patent number
patent_number = patent_number.replace(",", "")
query = {"patent_number": patent_number}
fields = [
"patent_number", "patent_title", "patent_date", "patent_abstract",
"patent_type", "inventor_name", "assignee_organization",
"cpc_subclass_id", "cited_patent_number", "citedby_patent_number"
]
result = self._request("patent", query, fields)
if result.get("patents"):
return result["patents"][0]
return None
def search_by_inventor(self, inventor_name: str, **kwargs) -> Dict:
"""
Search patents by inventor name.
Args:
inventor_name: Inventor name (use _text_phrase for exact match)
**kwargs: Additional search parameters
Returns:
Search results
"""
query = {"inventor_name": {"_text_phrase": inventor_name}}
return self.search_patents(query, **kwargs)
def search_by_assignee(self, assignee_name: str, **kwargs) -> Dict:
"""
Search patents by assignee/company name.
Args:
assignee_name: Assignee/company name
**kwargs: Additional search parameters
Returns:
Search results
"""
query = {"assignee_organization": {"_text_any": assignee_name.split()}}
return self.search_patents(query, **kwargs)
def search_by_classification(self, cpc_code: str, **kwargs) -> Dict:
"""
Search patents by CPC classification code.
Args:
cpc_code: CPC subclass code (e.g., "H04N", "G06F")
**kwargs: Additional search parameters
Returns:
Search results
"""
query = {"cpc_subclass_id": cpc_code}
return self.search_patents(query, **kwargs)
def search_by_date_range(self, start_date: str, end_date: str, **kwargs) -> Dict:
"""
Search patents by date range.
Args:
start_date: Start date (YYYY-MM-DD)
end_date: End date (YYYY-MM-DD)
**kwargs: Additional search parameters
Returns:
Search results
"""
query = {
"patent_date": {
"_gte": start_date,
"_lte": end_date
}
}
return self.search_patents(query, **kwargs)
def advanced_search(self, keywords: List[str], assignee: Optional[str] = None,
start_date: Optional[str] = None, end_date: Optional[str] = None,
cpc_codes: Optional[List[str]] = None, **kwargs) -> Dict:
"""
Perform advanced search with multiple criteria.
Args:
keywords: List of keywords to search in abstract/title
assignee: Assignee/company name
start_date: Start date (YYYY-MM-DD)
end_date: End date (YYYY-MM-DD)
cpc_codes: List of CPC classification codes
**kwargs: Additional search parameters
Returns:
Search results
"""
conditions = []
# Keyword search in abstract
if keywords:
conditions.append({
"patent_abstract": {"_text_all": keywords}
})
# Assignee filter
if assignee:
conditions.append({
"assignee_organization": {"_text_any": assignee.split()}
})
# Date range
if start_date and end_date:
conditions.append({
"patent_date": {"_gte": start_date, "_lte": end_date}
})
# CPC classification
if cpc_codes:
conditions.append({
"cpc_subclass_id": cpc_codes
})
query = {"_and": conditions} if len(conditions) > 1 else conditions[0]
return self.search_patents(query, **kwargs)
def main():
"""Command-line interface for patent search."""
if len(sys.argv) < 2:
print("Usage:")
print(" python patent_search.py <patent_number>")
print(" python patent_search.py --inventor <name>")
print(" python patent_search.py --assignee <company>")
print(" python patent_search.py --keywords <word1> <word2> ...")
sys.exit(1)
client = PatentSearchClient()
try:
if sys.argv[1] == "--inventor":
results = client.search_by_inventor(" ".join(sys.argv[2:]))
elif sys.argv[1] == "--assignee":
results = client.search_by_assignee(" ".join(sys.argv[2:]))
elif sys.argv[1] == "--keywords":
query = {"patent_abstract": {"_text_all": sys.argv[2:]}}
results = client.search_patents(query)
else:
# Assume patent number
patent = client.get_patent(sys.argv[1])
if patent:
results = {"patents": [patent], "count": 1, "total_hits": 1}
else:
print(f"Patent {sys.argv[1]} not found")
sys.exit(1)
# Print results
print(json.dumps(results, indent=2))
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/uspto-database/scripts/patent_search.py",
"license": "MIT License",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/scientific/uspto-database/scripts/peds_client.py | #!/usr/bin/env python3
"""
USPTO Patent Examination Data System (PEDS) Helper
Provides functions for retrieving patent examination data using the
uspto-opendata-python library.
Requires:
- uspto-opendata-python: pip install uspto-opendata-python
Note: This script provides a simplified interface to PEDS data.
For full functionality, use the uspto-opendata-python library directly.
"""
import sys
import json
from typing import Dict, List, Optional, Any
from datetime import datetime
try:
from uspto.peds import PEDSClient as OriginalPEDSClient
HAS_USPTO_LIB = True
except ImportError:
HAS_USPTO_LIB = False
print("Warning: uspto-opendata-python not installed.", file=sys.stderr)
print("Install with: pip install uspto-opendata-python", file=sys.stderr)
class PEDSHelper:
"""Helper class for accessing PEDS data."""
def __init__(self):
"""Initialize PEDS client."""
if not HAS_USPTO_LIB:
raise ImportError("uspto-opendata-python library required")
self.client = OriginalPEDSClient()
def get_application(self, application_number: str) -> Optional[Dict]:
"""
Get patent application data by application number.
Args:
application_number: Application number (e.g., "16123456")
Returns:
Application data dictionary with:
- title: Application title
- filing_date: Filing date
- status: Current status
- transactions: List of prosecution events
- inventors: List of inventors
- assignees: List of assignees
"""
try:
result = self.client.get_application(application_number)
return self._format_application_data(result)
except Exception as e:
print(f"Error retrieving application {application_number}: {e}", file=sys.stderr)
return None
def get_patent(self, patent_number: str) -> Optional[Dict]:
"""
Get patent data by patent number.
Args:
patent_number: Patent number (e.g., "11234567")
Returns:
Patent data dictionary
"""
try:
result = self.client.get_patent(patent_number)
return self._format_application_data(result)
except Exception as e:
print(f"Error retrieving patent {patent_number}: {e}", file=sys.stderr)
return None
def get_transaction_history(self, application_number: str) -> List[Dict]:
"""
Get transaction history for an application.
Args:
application_number: Application number
Returns:
List of transactions with date, code, and description
"""
app_data = self.get_application(application_number)
if app_data and 'transactions' in app_data:
return app_data['transactions']
return []
def get_office_actions(self, application_number: str) -> List[Dict]:
"""
Get office actions for an application.
Args:
application_number: Application number
Returns:
List of office actions with dates and types
"""
transactions = self.get_transaction_history(application_number)
# Filter for office action transaction codes
oa_codes = ['CTNF', 'CTFR', 'AOPF', 'NOA']
office_actions = [
trans for trans in transactions
if trans.get('code') in oa_codes
]
return office_actions
def get_status_summary(self, application_number: str) -> Dict[str, Any]:
"""
Get a summary of application status.
Args:
application_number: Application number
Returns:
Dictionary with status summary:
- current_status: Current application status
- filing_date: Filing date
- status_date: Status date
- is_patented: Boolean indicating if patented
- patent_number: Patent number if granted
- pendency_days: Days since filing
"""
app_data = self.get_application(application_number)
if not app_data:
return {}
filing_date = app_data.get('filing_date')
if filing_date:
filing_dt = datetime.strptime(filing_date, '%Y-%m-%d')
pendency_days = (datetime.now() - filing_dt).days
else:
pendency_days = None
return {
'current_status': app_data.get('app_status'),
'filing_date': filing_date,
'status_date': app_data.get('app_status_date'),
'is_patented': app_data.get('patent_number') is not None,
'patent_number': app_data.get('patent_number'),
'issue_date': app_data.get('patent_issue_date'),
'pendency_days': pendency_days,
'title': app_data.get('title'),
'inventors': app_data.get('inventors', []),
'assignees': app_data.get('assignees', [])
}
def analyze_prosecution(self, application_number: str) -> Dict[str, Any]:
"""
Analyze prosecution history.
Args:
application_number: Application number
Returns:
Dictionary with prosecution analysis:
- total_office_actions: Count of office actions
- rejections: Count of rejections
- allowance: Boolean if allowed
- response_count: Count of applicant responses
- examination_duration: Days from filing to allowance/abandonment
"""
transactions = self.get_transaction_history(application_number)
app_summary = self.get_status_summary(application_number)
if not transactions:
return {}
analysis = {
'total_office_actions': 0,
'non_final_rejections': 0,
'final_rejections': 0,
'allowance': False,
'responses': 0,
'abandonment': False
}
for trans in transactions:
code = trans.get('code', '')
if code == 'CTNF':
analysis['non_final_rejections'] += 1
analysis['total_office_actions'] += 1
elif code == 'CTFR':
analysis['final_rejections'] += 1
analysis['total_office_actions'] += 1
elif code in ['AOPF', 'OA']:
analysis['total_office_actions'] += 1
elif code == 'NOA':
analysis['allowance'] = True
elif code == 'WRIT':
analysis['responses'] += 1
elif code == 'ABND':
analysis['abandonment'] = True
analysis['status'] = app_summary.get('current_status')
analysis['pendency_days'] = app_summary.get('pendency_days')
return analysis
def _format_application_data(self, raw_data: Dict) -> Dict:
"""Format raw PEDS data into cleaner structure."""
# This is a placeholder - actual implementation depends on
# the structure returned by uspto-opendata-python
return raw_data
def main():
"""Command-line interface for PEDS data."""
import argparse
parser = argparse.ArgumentParser(
description='Query USPTO Patent Examination Data System (PEDS)',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Get application data by application number
%(prog)s --application 16123456
# Get patent data by patent number
%(prog)s --patent 11234567
# Get status summary
%(prog)s --status 16123456
# Analyze prosecution history
%(prog)s --analyze 16123456
# Get transaction history
%(prog)s --transactions 16123456
# Get office actions
%(prog)s --office-actions 16123456
"""
)
if not HAS_USPTO_LIB:
parser.error("uspto-opendata-python library not installed. Install with: pip install uspto-opendata-python")
# Main operation arguments (mutually exclusive)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--application', '-a', help='Get application by application number')
group.add_argument('--patent', '-p', help='Get patent by patent number')
group.add_argument('--status', '-s', help='Get status summary for application')
group.add_argument('--analyze', help='Analyze prosecution history for application')
group.add_argument('--transactions', '-t', help='Get transaction history for application')
group.add_argument('--office-actions', '-o', help='Get office actions for application')
args = parser.parse_args()
try:
helper = PEDSHelper()
if args.application:
result = helper.get_application(args.application)
elif args.patent:
result = helper.get_patent(args.patent)
elif args.status:
result = helper.get_status_summary(args.status)
elif args.analyze:
result = helper.analyze_prosecution(args.analyze)
elif args.transactions:
result = helper.get_transaction_history(args.transactions)
elif args.office_actions:
result = helper.get_office_actions(args.office_actions)
if result:
print(json.dumps(result, indent=2))
else:
print("No data found", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/uspto-database/scripts/peds_client.py",
"license": "MIT License",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/uspto-database/scripts/trademark_client.py | #!/usr/bin/env python3
"""
USPTO Trademark API Helper
Provides functions for searching and retrieving trademark data using USPTO
Trademark Status & Document Retrieval (TSDR) API.
Requires:
- requests library: pip install requests
- USPTO API key from https://account.uspto.gov/api-manager/
Environment variables:
USPTO_API_KEY - Your USPTO API key
"""
import os
import sys
import json
import requests
from typing import Dict, List, Optional, Any
class TrademarkClient:
"""Client for USPTO Trademark APIs."""
TSDR_BASE_URL = "https://tsdrapi.uspto.gov/ts/cd"
ASSIGNMENT_BASE_URL = "https://assignment-api.uspto.gov/trademark"
def __init__(self, api_key: Optional[str] = None):
"""
Initialize client with API key.
Args:
api_key: USPTO API key (if not provided, uses USPTO_API_KEY env var)
"""
self.api_key = api_key or os.getenv("USPTO_API_KEY")
if not self.api_key:
raise ValueError("API key required. Set USPTO_API_KEY environment variable or pass to constructor.")
self.headers = {"X-Api-Key": self.api_key}
def get_trademark_by_serial(self, serial_number: str) -> Optional[Dict]:
"""
Get trademark information by serial number.
Args:
serial_number: Trademark serial number (e.g., "87654321")
Returns:
Trademark data dictionary or None if not found
"""
url = f"{self.TSDR_BASE_URL}/casedocs/sn{serial_number}/info.json"
try:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
return None
raise
def get_trademark_by_registration(self, registration_number: str) -> Optional[Dict]:
"""
Get trademark information by registration number.
Args:
registration_number: Trademark registration number (e.g., "5678901")
Returns:
Trademark data dictionary or None if not found
"""
url = f"{self.TSDR_BASE_URL}/casedocs/rn{registration_number}/info.json"
try:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
return None
raise
def get_trademark_status(self, serial_or_registration: str) -> Dict[str, Any]:
"""
Get current status summary for a trademark.
Args:
serial_or_registration: Serial or registration number
Returns:
Status summary dictionary with:
- mark_text: Text of the mark
- status: Current status
- filing_date: Application filing date
- registration_number: Registration number if registered
- registration_date: Registration date if registered
"""
# Try serial number first
data = self.get_trademark_by_serial(serial_or_registration)
# If not found, try registration number
if not data:
data = self.get_trademark_by_registration(serial_or_registration)
if not data:
return {}
tm = data.get('TradeMarkAppln', {})
return {
'mark_text': tm.get('MarkVerbalElementText'),
'status': tm.get('MarkCurrentStatusExternalDescriptionText'),
'status_date': tm.get('MarkCurrentStatusDate'),
'filing_date': tm.get('ApplicationDate'),
'application_number': tm.get('ApplicationNumber'),
'registration_number': tm.get('RegistrationNumber'),
'registration_date': tm.get('RegistrationDate'),
'mark_drawing_code': tm.get('MarkDrawingCode'),
'is_registered': tm.get('RegistrationNumber') is not None
}
def get_goods_and_services(self, serial_or_registration: str) -> List[Dict]:
"""
Get goods and services classification for a trademark.
Args:
serial_or_registration: Serial or registration number
Returns:
List of goods/services entries with classes
"""
data = self.get_trademark_by_serial(serial_or_registration)
if not data:
data = self.get_trademark_by_registration(serial_or_registration)
if not data:
return []
tm = data.get('TradeMarkAppln', {})
return tm.get('GoodsAndServices', [])
def get_owner_info(self, serial_or_registration: str) -> List[Dict]:
"""
Get owner/applicant information for a trademark.
Args:
serial_or_registration: Serial or registration number
Returns:
List of owner entries
"""
data = self.get_trademark_by_serial(serial_or_registration)
if not data:
data = self.get_trademark_by_registration(serial_or_registration)
if not data:
return []
tm = data.get('TradeMarkAppln', {})
return tm.get('Owners', [])
def get_prosecution_history(self, serial_or_registration: str) -> List[Dict]:
"""
Get prosecution history for a trademark.
Args:
serial_or_registration: Serial or registration number
Returns:
List of prosecution events
"""
data = self.get_trademark_by_serial(serial_or_registration)
if not data:
data = self.get_trademark_by_registration(serial_or_registration)
if not data:
return []
tm = data.get('TradeMarkAppln', {})
return tm.get('ProsecutionHistoryEntry', [])
def check_trademark_health(self, serial_or_registration: str) -> Dict[str, Any]:
"""
Check trademark health and identify issues.
Args:
serial_or_registration: Serial or registration number
Returns:
Health check dictionary with alerts and status
"""
status = self.get_trademark_status(serial_or_registration)
if not status:
return {'error': 'Trademark not found'}
current_status = status.get('status', '').upper()
alerts = []
# Check for problematic statuses
if 'ABANDON' in current_status:
alerts.append('β οΈ ABANDONED - Mark is no longer active')
elif 'CANCELLED' in current_status:
alerts.append('β οΈ CANCELLED - Registration cancelled')
elif 'EXPIRED' in current_status:
alerts.append('β οΈ EXPIRED - Registration has expired')
elif 'SUSPENDED' in current_status:
alerts.append('βΈοΈ SUSPENDED - Examination suspended')
elif 'PUBLISHED' in current_status:
alerts.append('π’ PUBLISHED - In opposition period')
elif 'REGISTERED' in current_status:
alerts.append('β
ACTIVE - Mark is registered and active')
elif 'PENDING' in current_status:
alerts.append('β³ PENDING - Application under examination')
return {
'mark': status.get('mark_text'),
'status': current_status,
'status_date': status.get('status_date'),
'alerts': alerts,
'needs_attention': len([a for a in alerts if 'β οΈ' in a]) > 0
}
def main():
"""Command-line interface for trademark search."""
import argparse
parser = argparse.ArgumentParser(
description='Query USPTO Trademark Status & Document Retrieval (TSDR) API',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Get trademark by serial number
%(prog)s --serial 87654321
# Get trademark by registration number
%(prog)s --registration 5678901
# Get status summary
%(prog)s --status 87654321
# Check trademark health
%(prog)s --health 87654321
# Get goods and services
%(prog)s --goods 87654321
# Get owner information
%(prog)s --owner 87654321
# Get prosecution history
%(prog)s --prosecution 87654321
Environment:
Set USPTO_API_KEY environment variable with your API key from:
https://account.uspto.gov/api-manager/
"""
)
# Main operation arguments (mutually exclusive)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--serial', '-s', help='Get trademark by serial number')
group.add_argument('--registration', '-r', help='Get trademark by registration number')
group.add_argument('--status', help='Get status summary (serial or registration number)')
group.add_argument('--health', help='Check trademark health (serial or registration number)')
group.add_argument('--goods', '-g', help='Get goods and services (serial or registration number)')
group.add_argument('--owner', '-o', help='Get owner information (serial or registration number)')
group.add_argument('--prosecution', '-p', help='Get prosecution history (serial or registration number)')
# API key option
parser.add_argument('--api-key', '-k', help='USPTO API key (overrides USPTO_API_KEY env var)')
args = parser.parse_args()
try:
client = TrademarkClient(api_key=args.api_key)
if args.serial:
result = client.get_trademark_by_serial(args.serial)
elif args.registration:
result = client.get_trademark_by_registration(args.registration)
elif args.status:
result = client.get_trademark_status(args.status)
elif args.health:
result = client.check_trademark_health(args.health)
elif args.goods:
result = client.get_goods_and_services(args.goods)
elif args.owner:
result = client.get_owner_info(args.owner)
elif args.prosecution:
result = client.get_prosecution_history(args.prosecution)
if result:
print(json.dumps(result, indent=2))
else:
number = (args.serial or args.registration or args.status or
args.health or args.goods or args.owner or args.prosecution)
print(f"Trademark {number} not found", file=sys.stderr)
sys.exit(1)
except ValueError as e:
parser.error(str(e))
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/uspto-database/scripts/trademark_client.py",
"license": "MIT License",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/venue-templates/scripts/customize_template.py | #!/usr/bin/env python3
"""
Customize Template Script
Customize LaTeX templates with author information and project details.
Usage:
python customize_template.py --template nature_article.tex --output my_paper.tex
python customize_template.py --template nature_article.tex --title "My Research" --output my_paper.tex
python customize_template.py --interactive
"""
import argparse
import re
from pathlib import Path
def get_skill_path():
"""Get the path to the venue-templates skill directory."""
script_dir = Path(__file__).parent
skill_dir = script_dir.parent
return skill_dir
def find_template(template_name):
"""Find template file in assets directory."""
skill_path = get_skill_path()
assets_path = skill_path / "assets"
# Search in all subdirectories
for subdir in ["journals", "posters", "grants"]:
template_path = assets_path / subdir / template_name
if template_path.exists():
return template_path
return None
def customize_template(template_path, output_path, **kwargs):
"""Customize a template with provided information."""
# Read template
with open(template_path, 'r') as f:
content = f.read()
# Replace placeholders
replacements = {
'title': (
[r'Insert Your Title Here[^}]*', r'Your [^}]*Title[^}]*Here[^}]*'],
kwargs.get('title', '')
),
'authors': (
[r'First Author\\textsuperscript\{1\}, Second Author[^}]*',
r'First Author.*Second Author.*Third Author'],
kwargs.get('authors', '')
),
'affiliations': (
[r'Department Name, Institution Name, City, State[^\\]*',
r'Department of [^,]*, University Name[^\\]*'],
kwargs.get('affiliations', '')
),
'email': (
[r'first\.author@university\.edu',
r'\[email protected\]'],
kwargs.get('email', '')
)
}
# Apply replacements
modified = False
for key, (patterns, replacement) in replacements.items():
if replacement:
for pattern in patterns:
if re.search(pattern, content):
content = re.sub(pattern, replacement, content, count=1)
modified = True
print(f"β Replaced {key}")
# Write output
with open(output_path, 'w') as f:
f.write(content)
if modified:
print(f"\nβ Customized template saved to: {output_path}")
else:
print(f"\nβ οΈ Template copied to: {output_path}")
print(" No customizations applied (no matching placeholders found or no values provided)")
print(f"\nNext steps:")
print(f"1. Open {output_path} in your LaTeX editor")
print(f"2. Replace remaining placeholders")
print(f"3. Add your content")
print(f"4. Compile with pdflatex or your preferred LaTeX compiler")
def interactive_mode():
"""Run in interactive mode."""
print("\n=== Template Customization (Interactive Mode) ===\n")
# List available templates
skill_path = get_skill_path()
assets_path = skill_path / "assets"
print("Available templates:\n")
templates = []
for i, subdir in enumerate(["journals", "posters", "grants"], 1):
subdir_path = assets_path / subdir
if subdir_path.exists():
print(f"{subdir.upper()}:")
for j, template_file in enumerate(sorted(subdir_path.glob("*.tex")), 1):
templates.append(template_file)
print(f" {len(templates)}. {template_file.name}")
print()
# Select template
while True:
try:
choice = int(input(f"Select template (1-{len(templates)}): "))
if 1 <= choice <= len(templates):
template_path = templates[choice - 1]
break
else:
print(f"Please enter a number between 1 and {len(templates)}")
except ValueError:
print("Please enter a valid number")
print(f"\nSelected: {template_path.name}\n")
# Get customization info
title = input("Paper title (press Enter to skip): ").strip()
authors = input("Authors (e.g., 'John Doe, Jane Smith') (press Enter to skip): ").strip()
affiliations = input("Affiliations (press Enter to skip): ").strip()
email = input("Corresponding email (press Enter to skip): ").strip()
# Output file
default_output = f"my_{template_path.stem}.tex"
output = input(f"Output filename [{default_output}]: ").strip()
if not output:
output = default_output
output_path = Path(output)
# Customize
print()
customize_template(
template_path,
output_path,
title=title,
authors=authors,
affiliations=affiliations,
email=email
)
def main():
parser = argparse.ArgumentParser(
description="Customize LaTeX templates with author and project information",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s --interactive
%(prog)s --template nature_article.tex --output my_paper.tex
%(prog)s --template neurips_article.tex --title "My ML Research" --output my_neurips.tex
"""
)
parser.add_argument('--template', type=str, help='Template filename')
parser.add_argument('--output', type=str, help='Output filename')
parser.add_argument('--title', type=str, help='Paper title')
parser.add_argument('--authors', type=str, help='Author names')
parser.add_argument('--affiliations', type=str, help='Institutions/affiliations')
parser.add_argument('--email', type=str, help='Corresponding author email')
parser.add_argument('--interactive', action='store_true', help='Run in interactive mode')
args = parser.parse_args()
# Interactive mode
if args.interactive:
interactive_mode()
return
# Command-line mode
if not args.template or not args.output:
print("Error: --template and --output are required (or use --interactive)")
parser.print_help()
return
# Find template
template_path = find_template(args.template)
if not template_path:
print(f"Error: Template '{args.template}' not found")
print("\nSearched in:")
skill_path = get_skill_path()
for subdir in ["journals", "posters", "grants"]:
print(f" - {skill_path}/assets/{subdir}/")
return
# Customize
output_path = Path(args.output)
customize_template(
template_path,
output_path,
title=args.title,
authors=args.authors,
affiliations=args.affiliations,
email=args.email
)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/venue-templates/scripts/customize_template.py",
"license": "MIT License",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/venue-templates/scripts/query_template.py | #!/usr/bin/env python3
"""
Query Template Script
Search and retrieve venue-specific templates by name, type, or keywords.
Usage:
python query_template.py --venue "Nature" --type "article"
python query_template.py --keyword "machine learning"
python query_template.py --list-all
python query_template.py --venue "NeurIPS" --requirements
"""
import argparse
import os
import json
from pathlib import Path
# Template database
TEMPLATES = {
"journals": {
"nature": {
"file": "nature_article.tex",
"full_name": "Nature",
"description": "Top-tier multidisciplinary science journal",
"page_limit": "~3000 words",
"citation_style": "Superscript numbered",
"format": "Single column"
},
"neurips": {
"file": "neurips_article.tex",
"full_name": "NeurIPS (Neural Information Processing Systems)",
"description": "Top-tier machine learning conference",
"page_limit": "8 pages + unlimited refs",
"citation_style": "Numbered [1]",
"format": "Two column",
"anonymization": "Required (double-blind)"
},
"plos_one": {
"file": "plos_one.tex",
"full_name": "PLOS ONE",
"description": "Open-access multidisciplinary journal",
"page_limit": "No limit",
"citation_style": "Vancouver [1]",
"format": "Single column"
}
},
"posters": {
"beamerposter": {
"file": "beamerposter_academic.tex",
"full_name": "Beamerposter Academic",
"description": "Classic academic conference poster using beamerposter",
"size": "A0, customizable",
"package": "beamerposter"
}
},
"grants": {
"nsf": {
"file": "nsf_proposal_template.tex",
"full_name": "NSF Standard Grant",
"description": "National Science Foundation research proposal",
"page_limit": "15 pages (project description)",
"key_sections": "Project Summary, Project Description, Broader Impacts"
},
"nih_specific_aims": {
"file": "nih_specific_aims.tex",
"full_name": "NIH Specific Aims Page",
"description": "Most critical page of NIH proposals",
"page_limit": "1 page (strictly enforced)",
"key_sections": "Hook, Hypothesis, 3 Aims, Payoff"
}
}
}
def get_skill_path():
"""Get the path to the venue-templates skill directory."""
# Assume script is in .claude/skills/venue-templates/scripts/
script_dir = Path(__file__).parent
skill_dir = script_dir.parent
return skill_dir
def search_templates(venue=None, template_type=None, keyword=None):
"""Search for templates matching criteria."""
results = []
for cat_name, category in TEMPLATES.items():
# Filter by type if specified
if template_type and cat_name != template_type and template_type != "all":
continue
for temp_id, template in category.items():
# Filter by venue name
if venue:
venue_lower = venue.lower()
if venue_lower not in temp_id and venue_lower not in template.get("full_name", "").lower():
continue
# Filter by keyword
if keyword:
keyword_lower = keyword.lower()
search_text = json.dumps(template).lower()
if keyword_lower not in search_text:
continue
results.append({
"id": temp_id,
"category": cat_name,
"file": template["file"],
"full_name": template.get("full_name", temp_id),
"description": template.get("description", ""),
"details": template
})
return results
def list_all_templates():
"""List all available templates."""
print("\n=== AVAILABLE TEMPLATES ===\n")
for cat_name, category in TEMPLATES.items():
print(f"\n{cat_name.upper()}:")
for temp_id, template in category.items():
print(f" β’ {template.get('full_name', temp_id)}")
print(f" File: {template['file']}")
if "description" in template:
print(f" Description: {template['description']}")
print()
def print_template_info(template):
"""Print detailed information about a template."""
print(f"\n{'='*60}")
print(f"Template: {template['full_name']}")
print(f"{'='*60}")
print(f"Category: {template['category']}")
print(f"File: {template['file']}")
details = template['details']
print(f"\nDescription: {details.get('description', 'N/A')}")
if 'page_limit' in details:
print(f"Page Limit: {details['page_limit']}")
if 'citation_style' in details:
print(f"Citation Style: {details['citation_style']}")
if 'format' in details:
print(f"Format: {details['format']}")
if 'anonymization' in details:
print(f"β οΈ Anonymization: {details['anonymization']}")
if 'size' in details:
print(f"Poster Size: {details['size']}")
if 'package' in details:
print(f"LaTeX Package: {details['package']}")
if 'key_sections' in details:
print(f"Key Sections: {details['key_sections']}")
# Print full path to template
skill_path = get_skill_path()
template_path = skill_path / "assets" / template['category'] / template['file']
print(f"\nFull Path: {template_path}")
if template_path.exists():
print("β Template file exists")
else:
print("β Template file not found")
print()
def print_requirements(venue):
"""Print formatting requirements for a venue."""
results = search_templates(venue=venue)
if not results:
print(f"No templates found for venue: {venue}")
return
template = results[0] # Take first match
details = template['details']
print(f"\n{'='*60}")
print(f"FORMATTING REQUIREMENTS: {template['full_name']}")
print(f"{'='*60}\n")
if 'page_limit' in details:
print(f"π Page Limit: {details['page_limit']}")
if 'format' in details:
print(f"π Format: {details['format']}")
if 'citation_style' in details:
print(f"π Citation Style: {details['citation_style']}")
if 'anonymization' in details:
print(f"π Anonymization: {details['anonymization']}")
if 'size' in details:
print(f"π Size: {details['size']}")
print(f"\nπ‘ For detailed requirements, see:")
skill_path = get_skill_path()
if template['category'] == "journals":
print(f" {skill_path}/references/journals_formatting.md")
elif template['category'] == "posters":
print(f" {skill_path}/references/posters_guidelines.md")
elif template['category'] == "grants":
print(f" {skill_path}/references/grants_requirements.md")
print()
def main():
parser = argparse.ArgumentParser(
description="Query venue-specific LaTeX templates",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s --list-all
%(prog)s --venue "Nature" --type journals
%(prog)s --keyword "machine learning"
%(prog)s --venue "NeurIPS" --requirements
"""
)
parser.add_argument('--venue', type=str, help='Venue name (e.g., "Nature", "NeurIPS")')
parser.add_argument('--type', type=str, choices=['journals', 'posters', 'grants', 'all'],
help='Template type')
parser.add_argument('--keyword', type=str, help='Search keyword')
parser.add_argument('--list-all', action='store_true', help='List all available templates')
parser.add_argument('--requirements', action='store_true',
help='Show formatting requirements for venue')
args = parser.parse_args()
# List all templates
if args.list_all:
list_all_templates()
return
# Show requirements
if args.requirements:
if not args.venue:
print("Error: --requirements requires --venue")
parser.print_help()
return
print_requirements(args.venue)
return
# Search for templates
if not any([args.venue, args.type, args.keyword]):
parser.print_help()
return
results = search_templates(venue=args.venue, template_type=args.type, keyword=args.keyword)
if not results:
print("No templates found matching your criteria.")
return
print(f"\nFound {len(results)} template(s):\n")
for result in results:
print_template_info(result)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/venue-templates/scripts/query_template.py",
"license": "MIT License",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/scientific/venue-templates/scripts/validate_format.py | #!/usr/bin/env python3
"""
Validate Format Script
Check if document meets venue-specific formatting requirements.
Usage:
python validate_format.py --file my_paper.pdf --venue "Nature" --check-all
python validate_format.py --file my_paper.pdf --venue "NeurIPS" --check page-count,margins
python validate_format.py --file my_paper.pdf --venue "PLOS ONE" --report validation_report.txt
"""
import argparse
import subprocess
from pathlib import Path
import re
# Venue requirements database
VENUE_REQUIREMENTS = {
"nature": {
"page_limit": 5, # Approximate for ~3000 words
"margins": {"top": 2.5, "bottom": 2.5, "left": 2.5, "right": 2.5}, # cm
"font_size": 12, # pt
"font_family": "Times",
"line_spacing": "double"
},
"neurips": {
"page_limit": 8, # Excluding refs
"margins": {"top": 2.54, "bottom": 2.54, "left": 2.54, "right": 2.54}, # cm (1 inch)
"font_size": 10,
"font_family": "Times",
"format": "two-column"
},
"plos_one": {
"page_limit": None, # No limit
"margins": {"top": 2.54, "bottom": 2.54, "left": 2.54, "right": 2.54},
"font_size": 10,
"font_family": "Arial",
"line_spacing": "double"
},
"nsf": {
"page_limit": 15, # Project description
"margins": {"top": 2.54, "bottom": 2.54, "left": 2.54, "right": 2.54}, # 1 inch required
"font_size": 11, # Minimum
"font_family": "Times Roman",
"line_spacing": "single or double"
},
"nih": {
"page_limit": 12, # Research strategy
"margins": {"top": 1.27, "bottom": 1.27, "left": 1.27, "right": 1.27}, # 0.5 inch minimum
"font_size": 11, # Arial 11pt minimum
"font_family": "Arial",
"line_spacing": "any"
}
}
def get_pdf_info(pdf_path):
"""Extract information from PDF using pdfinfo."""
try:
result = subprocess.run(
['pdfinfo', str(pdf_path)],
capture_output=True,
text=True,
check=True
)
info = {}
for line in result.stdout.split('\n'):
if ':' in line:
key, value = line.split(':', 1)
info[key.strip()] = value.strip()
return info
except FileNotFoundError:
print("β οΈ pdfinfo not found. Install poppler-utils for full PDF analysis.")
print(" macOS: brew install poppler")
print(" Linux: sudo apt-get install poppler-utils")
return None
except subprocess.CalledProcessError as e:
print(f"Error running pdfinfo: {e}")
return None
def check_page_count(pdf_path, venue_reqs):
"""Check if page count is within limit."""
pdf_info = get_pdf_info(pdf_path)
if not pdf_info:
return {"status": "skip", "message": "Could not determine page count"}
pages = int(pdf_info.get('Pages', 0))
limit = venue_reqs.get('page_limit')
if limit is None:
return {"status": "pass", "message": f"No page limit. Document has {pages} pages."}
if pages <= limit:
return {"status": "pass", "message": f"β Page count OK: {pages}/{limit} pages"}
else:
return {"status": "fail", "message": f"β Page count exceeded: {pages}/{limit} pages"}
def check_margins(pdf_path, venue_reqs):
"""Check if margins meet requirements."""
# Note: This is a simplified check. Full margin analysis requires more sophisticated tools.
req_margins = venue_reqs.get('margins', {})
if not req_margins:
return {"status": "skip", "message": "No margin requirements specified"}
# This is a placeholder - accurate margin checking requires parsing PDF content
return {
"status": "info",
"message": f"βΉοΈ Required margins: {req_margins} cm (manual verification recommended)"
}
def check_fonts(pdf_path, venue_reqs):
"""Check fonts in PDF."""
try:
result = subprocess.run(
['pdffonts', str(pdf_path)],
capture_output=True,
text=True,
check=True
)
fonts_found = []
for line in result.stdout.split('\n')[2:]: # Skip header
if line.strip():
parts = line.split()
if parts:
fonts_found.append(parts[0])
req_font = venue_reqs.get('font_family', '')
req_size = venue_reqs.get('font_size')
message = f"βΉοΈ Fonts found: {', '.join(set(fonts_found))}\n"
message += f" Required: {req_font}"
if req_size:
message += f" {req_size}pt minimum"
return {"status": "info", "message": message}
except FileNotFoundError:
return {"status": "skip", "message": "pdffonts not available"}
except subprocess.CalledProcessError:
return {"status": "skip", "message": "Could not extract font information"}
def validate_document(pdf_path, venue, checks):
"""Validate document against venue requirements."""
venue_key = venue.lower().replace(" ", "_")
if venue_key not in VENUE_REQUIREMENTS:
print(f"β Unknown venue: {venue}")
print(f"Available venues: {', '.join(VENUE_REQUIREMENTS.keys())}")
return
venue_reqs = VENUE_REQUIREMENTS[venue_key]
print(f"\n{'='*60}")
print(f"VALIDATING: {pdf_path.name}")
print(f"VENUE: {venue}")
print(f"{'='*60}\n")
results = {}
# Run requested checks
if 'page-count' in checks or 'all' in checks:
results['page-count'] = check_page_count(pdf_path, venue_reqs)
if 'margins' in checks or 'all' in checks:
results['margins'] = check_margins(pdf_path, venue_reqs)
if 'fonts' in checks or 'all' in checks:
results['fonts'] = check_fonts(pdf_path, venue_reqs)
# Print results
for check_name, result in results.items():
print(f"{check_name.upper()}:")
print(f" {result['message']}\n")
# Summary
failures = sum(1 for r in results.values() if r['status'] == 'fail')
passes = sum(1 for r in results.values() if r['status'] == 'pass')
print(f"{'='*60}")
if failures == 0:
print(f"β VALIDATION PASSED ({passes} checks)")
else:
print(f"β VALIDATION FAILED ({failures} issues)")
print(f"{'='*60}\n")
return results
def generate_report(pdf_path, venue, results, report_path):
"""Generate validation report."""
with open(report_path, 'w') as f:
f.write(f"Validation Report\n")
f.write(f"{'='*60}\n\n")
f.write(f"File: {pdf_path}\n")
f.write(f"Venue: {venue}\n")
f.write(f"Date: {Path.ctime(pdf_path)}\n\n")
for check_name, result in results.items():
f.write(f"{check_name.upper()}:\n")
f.write(f" Status: {result['status']}\n")
f.write(f" {result['message']}\n\n")
failures = sum(1 for r in results.values() if r['status'] == 'fail')
f.write(f"\nSummary: {'PASSED' if failures == 0 else 'FAILED'}\n")
print(f"Report saved to: {report_path}")
def main():
parser = argparse.ArgumentParser(
description="Validate document formatting for venue requirements",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s --file my_paper.pdf --venue "Nature" --check-all
%(prog)s --file my_paper.pdf --venue "NeurIPS" --check page-count,fonts
%(prog)s --file proposal.pdf --venue "NSF" --report validation.txt
"""
)
parser.add_argument('--file', type=str, required=True, help='PDF file to validate')
parser.add_argument('--venue', type=str, required=True, help='Target venue')
parser.add_argument('--check', type=str, default='all',
help='Checks to perform: page-count, margins, fonts, all (comma-separated)')
parser.add_argument('--check-all', action='store_true', help='Perform all checks')
parser.add_argument('--report', type=str, help='Save report to file')
args = parser.parse_args()
# Check file exists
pdf_path = Path(args.file)
if not pdf_path.exists():
print(f"Error: File not found: {pdf_path}")
return
# Parse checks
if args.check_all:
checks = ['all']
else:
checks = [c.strip() for c in args.check.split(',')]
# Validate
results = validate_document(pdf_path, args.venue, checks)
# Generate report if requested
if args.report and results:
generate_report(pdf_path, args.venue, results, Path(args.report))
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/scientific/venue-templates/scripts/validate_format.py",
"license": "MIT License",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/creative-design/executing-marketing-campaigns/scripts/marketing_utils.py | #!/usr/bin/env python3
"""
Marketing Campaign Utility Script
Helps with:
- Generating UTM parameters for campaign links
- Validating UTM parameter formats
- Creating campaign tracking URLs
- Exporting campaign tracking configuration
Usage:
python marketing_utils.py --action generate_utm --campaign "Q3_Product_Launch" --channel "email"
python marketing_utils.py --action validate_url --url "https://example.com?utm_source=email&utm_campaign=Q3"
python marketing_utils.py --action batch_generate --file campaigns.csv
"""
import argparse
import csv
import json
import sys
from datetime import datetime
from typing import Dict, List, Optional
from urllib.parse import urlencode, urlparse, parse_qs
def generate_utm_parameters(
source: str,
medium: str,
campaign: str,
content: Optional[str] = None,
term: Optional[str] = None
) -> Dict[str, str]:
"""
Generate UTM parameters following best practices.
Args:
source: utm_source (e.g., "email", "google", "facebook") - REQUIRED
medium: utm_medium (e.g., "newsletter", "cpc", "organic") - REQUIRED
campaign: utm_campaign (e.g., "Q3_product_launch") - REQUIRED
content: utm_content (e.g., "hero_image", "button_v1") - OPTIONAL
term: utm_term (for paid search keywords) - OPTIONAL
Returns:
Dictionary of UTM parameters
"""
# Validate required parameters
if not all([source, medium, campaign]):
raise ValueError("source, medium, and campaign are required")
# Validate format (lowercase, underscores, no spaces)
for param_name, param_value in [("source", source), ("medium", medium), ("campaign", campaign)]:
if not param_value.replace("_", "").replace("-", "").isalnum():
raise ValueError(f"{param_name} contains invalid characters. Use only alphanumeric, hyphens, and underscores.")
if " " in param_value:
raise ValueError(f"{param_name} cannot contain spaces. Use underscores instead.")
utm_params = {
"utm_source": source.lower(),
"utm_medium": medium.lower(),
"utm_campaign": campaign.lower()
}
if content:
utm_params["utm_content"] = content.lower()
if term:
utm_params["utm_term"] = term.lower()
return utm_params
def build_tracking_url(base_url: str, utm_params: Dict[str, str]) -> str:
"""
Build a complete tracking URL with UTM parameters.
Args:
base_url: The destination URL (e.g., "https://example.com/pricing")
utm_params: Dictionary of UTM parameters
Returns:
Complete URL with UTM parameters
"""
if not base_url.startswith(("http://", "https://")):
raise ValueError("URL must start with http:// or https://")
# Parse existing URL parameters
parsed = urlparse(base_url)
existing_params = parse_qs(parsed.query)
# Flatten existing params (parse_qs returns lists)
existing_params_flat = {k: v[0] if isinstance(v, list) and v else v for k, v in existing_params.items()}
# Merge with UTM parameters (UTM takes precedence)
all_params = {**existing_params_flat, **utm_params}
# Rebuild URL
query_string = urlencode(all_params)
if parsed.query:
# URL already has parameters
base_url_without_query = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
return f"{base_url_without_query}?{query_string}"
else:
return f"{base_url}?{query_string}"
def validate_tracking_url(url: str) -> Dict:
"""
Validate that a tracking URL has proper UTM parameters.
Args:
url: URL to validate
Returns:
Dictionary with validation results
"""
parsed = urlparse(url)
params = parse_qs(parsed.query)
# Check for required UTM parameters
required_params = ["utm_source", "utm_medium", "utm_campaign"]
missing_params = [p for p in required_params if p not in params]
result = {
"url": url,
"valid": len(missing_params) == 0,
"missing_parameters": missing_params,
"utm_parameters": {k: v[0] if v else None for k, v in params.items() if k.startswith("utm_")},
"other_parameters": {k: v[0] if v else None for k, v in params.items() if not k.startswith("utm_")}
}
if result["valid"]:
result["status"] = "β Valid tracking URL"
else:
result["status"] = f"β Missing parameters: {', '.join(missing_params)}"
return result
def generate_campaign_tracking_sheet(
campaigns: List[Dict[str, str]],
base_url: str,
output_file: Optional[str] = None
) -> List[Dict]:
"""
Generate tracking URLs for multiple campaigns.
Args:
campaigns: List of campaign dicts with keys: name, channel, content_type
base_url: Base destination URL
output_file: Optional CSV file to save results
Returns:
List of campaign tracking configurations
"""
tracking_config = []
for campaign in campaigns:
try:
utm_params = generate_utm_parameters(
source=campaign.get("channel", "unknown"),
medium=campaign.get("content_type", "content"),
campaign=campaign.get("name", "campaign"),
content=campaign.get("variant", None)
)
tracking_url = build_tracking_url(base_url, utm_params)
tracking_config.append({
"campaign_name": campaign.get("name"),
"channel": campaign.get("channel"),
"content_type": campaign.get("content_type"),
"variant": campaign.get("variant", ""),
"utm_source": utm_params["utm_source"],
"utm_medium": utm_params["utm_medium"],
"utm_campaign": utm_params["utm_campaign"],
"tracking_url": tracking_url,
"generated_date": datetime.now().isoformat()
})
except Exception as e:
print(f"Error processing campaign {campaign.get('name')}: {str(e)}", file=sys.stderr)
# Save to CSV if requested
if output_file and tracking_config:
try:
with open(output_file, 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=tracking_config[0].keys())
writer.writeheader()
writer.writerows(tracking_config)
print(f"β Campaign tracking sheet saved to {output_file}")
except Exception as e:
print(f"Error saving to file: {str(e)}", file=sys.stderr)
return tracking_config
def campaign_summary(tracking_config: List[Dict]) -> None:
"""Print a summary of generated campaigns."""
if not tracking_config:
print("No campaigns to summarize")
return
print("\n" + "="*80)
print("CAMPAIGN TRACKING SUMMARY")
print("="*80 + "\n")
# Group by channel
by_channel = {}
for config in tracking_config:
channel = config["channel"]
if channel not in by_channel:
by_channel[channel] = []
by_channel[channel].append(config)
for channel, configs in sorted(by_channel.items()):
print(f"\nπ {channel.upper()} ({len(configs)} campaigns)")
print("-" * 80)
for config in configs:
print(f" Campaign: {config['campaign_name']}")
print(f" URL: {config['tracking_url']}")
if config.get('variant'):
print(f" Variant: {config['variant']}")
print()
def main():
parser = argparse.ArgumentParser(
description="Marketing Campaign Utility - Generate and validate tracking URLs"
)
subparsers = parser.add_subparsers(dest="action", help="Action to perform")
# Generate UTM parameters
utm_parser = subparsers.add_parser("generate_utm", help="Generate UTM parameters")
utm_parser.add_argument("--source", required=True, help="utm_source (e.g., email, google, facebook)")
utm_parser.add_argument("--medium", required=True, help="utm_medium (e.g., newsletter, cpc, organic)")
utm_parser.add_argument("--campaign", required=True, help="utm_campaign (e.g., Q3_launch)")
utm_parser.add_argument("--content", help="utm_content (optional, e.g., hero_image)")
utm_parser.add_argument("--term", help="utm_term (optional, for paid search)")
# Build tracking URL
url_parser = subparsers.add_parser("build_url", help="Build complete tracking URL")
url_parser.add_argument("--url", required=True, help="Base URL destination")
url_parser.add_argument("--source", required=True, help="utm_source")
url_parser.add_argument("--medium", required=True, help="utm_medium")
url_parser.add_argument("--campaign", required=True, help="utm_campaign")
url_parser.add_argument("--content", help="utm_content (optional)")
# Validate tracking URL
validate_parser = subparsers.add_parser("validate", help="Validate tracking URL")
validate_parser.add_argument("--url", required=True, help="URL to validate")
# Batch process from CSV
batch_parser = subparsers.add_parser("batch", help="Process batch of campaigns from CSV")
batch_parser.add_argument("--file", required=True, help="CSV file with campaigns")
batch_parser.add_argument("--url", required=True, help="Base URL for all campaigns")
batch_parser.add_argument("--output", help="Output CSV file")
args = parser.parse_args()
if args.action == "generate_utm":
try:
params = generate_utm_parameters(
source=args.source,
medium=args.medium,
campaign=args.campaign,
content=args.content,
term=args.term
)
print("\nβ UTM Parameters Generated:")
print(json.dumps(params, indent=2))
print("\nQuery string:")
print(urlencode(params))
except Exception as e:
print(f"β Error: {str(e)}", file=sys.stderr)
sys.exit(1)
elif args.action == "build_url":
try:
params = generate_utm_parameters(
source=args.source,
medium=args.medium,
campaign=args.campaign,
content=args.content
)
tracking_url = build_tracking_url(args.url, params)
print("\nβ Tracking URL Generated:")
print(tracking_url)
except Exception as e:
print(f"β Error: {str(e)}", file=sys.stderr)
sys.exit(1)
elif args.action == "validate":
result = validate_tracking_url(args.url)
print("\n" + result["status"])
print(json.dumps({k: v for k, v in result.items() if k != "status"}, indent=2))
elif args.action == "batch":
try:
campaigns = []
with open(args.file, 'r') as f:
reader = csv.DictReader(f)
campaigns = list(reader)
if not campaigns:
print(f"β No campaigns found in {args.file}", file=sys.stderr)
sys.exit(1)
tracking_config = generate_campaign_tracking_sheet(
campaigns,
args.url,
args.output
)
campaign_summary(tracking_config)
except FileNotFoundError:
print(f"β File not found: {args.file}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"β Error: {str(e)}", file=sys.stderr)
sys.exit(1)
else:
parser.print_help()
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/creative-design/executing-marketing-campaigns/scripts/marketing_utils.py",
"license": "MIT License",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/devops-iac-engineer/scripts/devops_utils.py | #!/usr/bin/env python3
"""
DevOps Utility Scripts
Provides helper functions for common DevOps tasks
"""
import argparse
import sys
import os
import json
import yaml
import subprocess
from pathlib import Path
from typing import Dict, List, Optional
class TerraformHelper:
"""Helper functions for Terraform operations"""
@staticmethod
def init_project(name: str, cloud: str, region: str) -> None:
"""Initialize a new Terraform project structure"""
base_path = Path(name)
# Create directory structure
directories = [
base_path / "environments" / "dev",
base_path / "environments" / "staging",
base_path / "environments" / "prod",
base_path / "modules" / "vpc",
base_path / "modules" / "compute",
base_path / "modules" / "database",
]
for directory in directories:
directory.mkdir(parents=True, exist_ok=True)
print(f"Created: {directory}")
# Create main.tf template
main_tf_content = f"""terraform {{
required_version = ">= 1.6.0"
required_providers {{
{cloud} = {{
source = "hashicorp/{cloud}"
version = "~> 5.0"
}}
}}
backend "s3" {{
bucket = "terraform-state-{name}"
key = "{{{{env}}}}/terraform.tfstate"
region = "{region}"
encrypt = true
dynamodb_table = "terraform-state-lock"
}}
}}
provider "{cloud}" {{
region = "{region}"
}}
"""
for env in ["dev", "staging", "prod"]:
env_path = base_path / "environments" / env
(env_path / "main.tf").write_text(main_tf_content.replace("{{env}}", env))
(env_path / "variables.tf").write_text(
'variable "environment" {\n description = "Environment name"\n type = string\n default = "'
+ env
+ '"\n}\n'
)
(env_path / "outputs.tf").write_text("# Define outputs here\n")
print(f"Created Terraform files in: {env_path}")
# Create README
readme_content = f"""# {name} Infrastructure
## Structure
- `environments/`: Environment-specific configurations
- `modules/`: Reusable Terraform modules
## Usage
### Initialize
```bash
cd environments/dev
terraform init
```
### Plan
```bash
terraform plan -out=tfplan
```
### Apply
```bash
terraform apply tfplan
```
"""
(base_path / "README.md").write_text(readme_content)
print(f"\nProject '{name}' initialized successfully!")
@staticmethod
def validate_hcl(file_path: str) -> bool:
"""Validate Terraform HCL syntax"""
try:
result = subprocess.run(
["terraform", "fmt", "-check", file_path],
capture_output=True,
text=True,
)
if result.returncode == 0:
print(f"β {file_path} is properly formatted")
return True
else:
print(f"β {file_path} needs formatting")
print(result.stdout)
return False
except FileNotFoundError:
print("Error: terraform command not found")
return False
class KubernetesHelper:
"""Helper functions for Kubernetes operations"""
@staticmethod
def validate_manifest(file_path: str, schema_version: str = "1.28") -> bool:
"""Validate Kubernetes manifest syntax"""
try:
with open(file_path, "r") as f:
docs = list(yaml.safe_load_all(f))
print(f"Validating {len(docs)} document(s) in {file_path}")
for i, doc in enumerate(docs):
if not doc:
continue
# Basic validation
if "apiVersion" not in doc:
print(f"β Document {i + 1}: Missing apiVersion")
return False
if "kind" not in doc:
print(f"β Document {i + 1}: Missing kind")
return False
if "metadata" not in doc:
print(f"β Document {i + 1}: Missing metadata")
return False
print(
f"β Document {i + 1}: {doc['kind']} '{doc['metadata'].get('name', 'unnamed')}'"
)
print(f"β All documents in {file_path} are valid")
return True
except yaml.YAMLError as e:
print(f"β YAML syntax error: {e}")
return False
except FileNotFoundError:
print(f"β File not found: {file_path}")
return False
@staticmethod
def generate_deployment(
name: str, image: str, namespace: str = "default", replicas: int = 3
) -> str:
"""Generate a Kubernetes deployment manifest"""
manifest = {
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {"name": name, "namespace": namespace, "labels": {"app": name}},
"spec": {
"replicas": replicas,
"selector": {"matchLabels": {"app": name}},
"template": {
"metadata": {"labels": {"app": name}},
"spec": {
"containers": [
{
"name": name,
"image": image,
"ports": [{"containerPort": 8080}],
"resources": {
"requests": {"memory": "256Mi", "cpu": "250m"},
"limits": {"memory": "512Mi", "cpu": "500m"},
},
"livenessProbe": {
"httpGet": {"path": "/healthz", "port": 8080},
"initialDelaySeconds": 30,
"periodSeconds": 10,
},
"readinessProbe": {
"httpGet": {"path": "/ready", "port": 8080},
"initialDelaySeconds": 10,
"periodSeconds": 5,
},
}
]
},
},
},
}
return yaml.dump(manifest, default_flow_style=False, sort_keys=False)
class GitOpsHelper:
"""Helper functions for GitOps workflows"""
@staticmethod
def init_gitops(tool: str, environments: List[str]) -> None:
"""Initialize GitOps directory structure"""
base_path = Path("gitops")
if tool.lower() == "argocd":
# ArgoCD structure
for env in environments:
env_path = base_path / "applications" / env
env_path.mkdir(parents=True, exist_ok=True)
# Create application manifest
app_manifest = {
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Application",
"metadata": {
"name": f"myapp-{env}",
"namespace": "argocd",
},
"spec": {
"project": "default",
"source": {
"repoURL": "https://github.com/myorg/myapp.git",
"targetRevision": "HEAD",
"path": f"kubernetes/overlays/{env}",
},
"destination": {
"server": "https://kubernetes.default.svc",
"namespace": env,
},
"syncPolicy": {
"automated": {
"prune": True,
"selfHeal": True,
}
},
},
}
with open(env_path / "application.yaml", "w") as f:
yaml.dump(app_manifest, f, default_flow_style=False)
print(f"Created ArgoCD application for: {env}")
elif tool.lower() == "flux":
# Flux structure
for env in environments:
env_path = base_path / "clusters" / env
env_path.mkdir(parents=True, exist_ok=True)
# Create kustomization
kustomization = {
"apiVersion": "kustomize.toolkit.fluxcd.io/v1",
"kind": "Kustomization",
"metadata": {"name": f"myapp-{env}", "namespace": "flux-system"},
"spec": {
"interval": "5m",
"path": f"./kubernetes/overlays/{env}",
"prune": True,
"sourceRef": {"kind": "GitRepository", "name": "myapp"},
},
}
with open(env_path / "kustomization.yaml", "w") as f:
yaml.dump(kustomization, f, default_flow_style=False)
print(f"Created Flux kustomization for: {env}")
print(f"\nGitOps structure initialized for {tool}")
class SecurityHelper:
"""Helper functions for security operations"""
@staticmethod
def scan_secrets(directory: str) -> List[str]:
"""Scan for potential secrets in files"""
import re
secret_patterns = {
"AWS Access Key": r"AKIA[0-9A-Z]{16}",
"Private Key": r"-----BEGIN (RSA |EC |OPENSSH )?PRIVATE KEY-----",
"API Key": r"api[_-]?key['\"]?\s*[:=]\s*['\"]?[a-zA-Z0-9]{32,}",
"Password": r"password['\"]?\s*[:=]\s*['\"]?[^'\"\s]{8,}",
"Token": r"token['\"]?\s*[:=]\s*['\"]?[a-zA-Z0-9._\-]{20,}",
}
findings = []
for root, dirs, files in os.walk(directory):
# Skip common non-code directories
dirs[:] = [
d
for d in dirs
if d not in [".git", "node_modules", "venv", ".terraform"]
]
for file in files:
file_path = os.path.join(root, file)
try:
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
content = f.read()
for secret_type, pattern in secret_patterns.items():
matches = re.finditer(pattern, content)
for match in matches:
findings.append(
f"{file_path}:{secret_type} - {match.group()[:20]}..."
)
except Exception as e:
continue
return findings
def main():
parser = argparse.ArgumentParser(description="DevOps Utility Scripts")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# Terraform commands
tf_parser = subparsers.add_parser("terraform", help="Terraform utilities")
tf_subparsers = tf_parser.add_subparsers(dest="subcommand")
tf_init = tf_subparsers.add_parser("init-project", help="Initialize Terraform project")
tf_init.add_argument("--name", required=True, help="Project name")
tf_init.add_argument(
"--cloud", required=True, choices=["aws", "azure", "gcp"], help="Cloud provider"
)
tf_init.add_argument("--region", required=True, help="Default region")
tf_validate = tf_subparsers.add_parser("validate", help="Validate Terraform files")
tf_validate.add_argument("--file", required=True, help="File to validate")
# Kubernetes commands
k8s_parser = subparsers.add_parser("k8s", help="Kubernetes utilities")
k8s_subparsers = k8s_parser.add_subparsers(dest="subcommand")
k8s_validate = k8s_subparsers.add_parser("validate", help="Validate K8s manifest")
k8s_validate.add_argument("--file", required=True, help="Manifest file")
k8s_validate.add_argument(
"--schema-version", default="1.28", help="Kubernetes version"
)
k8s_generate = k8s_subparsers.add_parser("generate", help="Generate deployment")
k8s_generate.add_argument("--name", required=True, help="Deployment name")
k8s_generate.add_argument("--image", required=True, help="Container image")
k8s_generate.add_argument("--namespace", default="default", help="Namespace")
k8s_generate.add_argument("--replicas", type=int, default=3, help="Replica count")
# GitOps commands
gitops_parser = subparsers.add_parser("gitops", help="GitOps utilities")
gitops_subparsers = gitops_parser.add_subparsers(dest="subcommand")
gitops_init = gitops_subparsers.add_parser("init", help="Initialize GitOps structure")
gitops_init.add_argument(
"--tool", required=True, choices=["argocd", "flux"], help="GitOps tool"
)
gitops_init.add_argument(
"--environments", required=True, help="Comma-separated environments"
)
# Security commands
security_parser = subparsers.add_parser("security", help="Security utilities")
security_subparsers = security_parser.add_subparsers(dest="subcommand")
security_scan = security_subparsers.add_parser("scan-secrets", help="Scan for secrets")
security_scan.add_argument("--directory", default=".", help="Directory to scan")
args = parser.parse_args()
# Execute commands
if args.command == "terraform":
if args.subcommand == "init-project":
TerraformHelper.init_project(args.name, args.cloud, args.region)
elif args.subcommand == "validate":
TerraformHelper.validate_hcl(args.file)
elif args.command == "k8s":
if args.subcommand == "validate":
KubernetesHelper.validate_manifest(args.file, args.schema_version)
elif args.subcommand == "generate":
manifest = KubernetesHelper.generate_deployment(
args.name, args.image, args.namespace, args.replicas
)
print(manifest)
elif args.command == "gitops":
if args.subcommand == "init":
environments = [env.strip() for env in args.environments.split(",")]
GitOpsHelper.init_gitops(args.tool, environments)
elif args.command == "security":
if args.subcommand == "scan-secrets":
findings = SecurityHelper.scan_secrets(args.directory)
if findings:
print(f"β οΈ Found {len(findings)} potential secrets:")
for finding in findings:
print(f" - {finding}")
sys.exit(1)
else:
print("β No secrets found")
else:
parser.print_help()
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/devops-iac-engineer/scripts/devops_utils.py",
"license": "MIT License",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/security-compliance/scripts/risk_calculator.py | #!/usr/bin/env python3
"""
Risk Assessment Calculator
Calculates risk scores using both qualitative and quantitative methodologies.
Supports risk matrix, ALE calculations, and cost-benefit analysis for controls.
Usage:
python risk_calculator.py --interactive
python risk_calculator.py risks.csv
python risk_calculator.py risks.csv --output risk_report.csv
"""
import argparse
import csv
import json
from dataclasses import dataclass
from typing import List, Dict, Optional
from datetime import datetime
@dataclass
class Risk:
"""Risk assessment data class"""
id: str
name: str
asset_value: float
exposure_factor: float
aro: float # Annualized Rate of Occurrence
likelihood_qualitative: int # 1-5 scale
impact_qualitative: int # 1-5 scale
category: str
owner: str
class RiskCalculator:
"""Risk assessment calculator with multiple methodologies"""
# Risk matrix: (likelihood, impact) -> risk_level
RISK_MATRIX = {
(1, 1): "Low", (1, 2): "Low", (1, 3): "Low", (1, 4): "Medium", (1, 5): "Medium",
(2, 1): "Low", (2, 2): "Low", (2, 3): "Medium", (2, 4): "High", (2, 5): "High",
(3, 1): "Low", (3, 2): "Medium", (3, 3): "Medium", (3, 4): "High", (3, 5): "Critical",
(4, 1): "Medium", (4, 2): "High", (4, 3): "High", (4, 4): "Critical", (4, 5): "Critical",
(5, 1): "Medium", (5, 2): "High", (5, 3): "Critical", (5, 4): "Critical", (5, 5): "Critical"
}
SLA_DAYS = {
"Critical": 1,
"High": 7,
"Medium": 30,
"Low": 90
}
def __init__(self):
self.risks: List[Risk] = []
def calculate_quantitative(self, risk: Risk) -> Dict:
"""Calculate quantitative risk metrics (SLE, ALE)"""
sle = risk.asset_value * risk.exposure_factor
ale = sle * risk.aro
return {
"sle": round(sle, 2),
"ale": round(ale, 2)
}
def calculate_qualitative(self, risk: Risk) -> Dict:
"""Calculate qualitative risk metrics"""
risk_score = risk.likelihood_qualitative * risk.impact_qualitative
risk_level = self.RISK_MATRIX.get(
(risk.likelihood_qualitative, risk.impact_qualitative),
"Unknown"
)
sla_days = self.SLA_DAYS.get(risk_level, 90)
return {
"risk_score": risk_score,
"risk_level": risk_level,
"sla_days": sla_days
}
def cost_benefit_analysis(self, risk: Risk, control_cost: float, new_aro: float) -> Dict:
"""Perform cost-benefit analysis for a security control"""
quant = self.calculate_quantitative(risk)
ale_before = quant["ale"]
# Calculate ALE after control
ale_after = (risk.asset_value * risk.exposure_factor) * new_aro
annual_savings = ale_before - ale_after
net_benefit = annual_savings - control_cost
roi = (net_benefit / control_cost * 100) if control_cost > 0 else 0
return {
"ale_before": round(ale_before, 2),
"ale_after": round(ale_after, 2),
"annual_savings": round(annual_savings, 2),
"control_cost": control_cost,
"net_benefit": round(net_benefit, 2),
"roi_percent": round(roi, 2),
"recommendation": "Implement" if net_benefit > 0 else "Do not implement",
"payback_period_years": round(control_cost / annual_savings, 2) if annual_savings > 0 else float('inf')
}
def add_risk(self, risk: Risk):
"""Add risk to assessment"""
self.risks.append(risk)
def generate_report(self) -> List[Dict]:
"""Generate comprehensive risk report"""
report = []
for risk in self.risks:
quant = self.calculate_quantitative(risk)
qual = self.calculate_qualitative(risk)
report.append({
"Risk ID": risk.id,
"Risk Name": risk.name,
"Category": risk.category,
"Owner": risk.owner,
"Asset Value": f"${risk.asset_value:,.0f}",
"Exposure Factor": f"{risk.exposure_factor:.0%}",
"ARO": f"{risk.aro:.2f}",
"SLE": f"${quant['sle']:,.0f}",
"ALE": f"${quant['ale']:,.0f}",
"Likelihood": risk.likelihood_qualitative,
"Impact": risk.impact_qualitative,
"Risk Score": qual["risk_score"],
"Risk Level": qual["risk_level"],
"Remediation SLA": f"{qual['sla_days']} days"
})
# Sort by ALE (descending)
report.sort(key=lambda x: float(x["ALE"].replace("$", "").replace(",", "")), reverse=True)
return report
def generate_summary(self) -> Dict:
"""Generate summary statistics"""
if not self.risks:
return {}
total_ale = sum(self.calculate_quantitative(r)["ale"] for r in self.risks)
risk_levels = {"Critical": 0, "High": 0, "Medium": 0, "Low": 0}
for risk in self.risks:
qual = self.calculate_qualitative(risk)
risk_levels[qual["risk_level"]] = risk_levels.get(qual["risk_level"], 0) + 1
top_risks = sorted(
[(r, self.calculate_quantitative(r)["ale"]) for r in self.risks],
key=lambda x: x[1],
reverse=True
)[:5]
return {
"total_risks": len(self.risks),
"total_ale": round(total_ale, 2),
"risk_levels": risk_levels,
"top_5_risks": [(r.name, round(ale, 2)) for r, ale in top_risks]
}
def load_risks_from_csv(filename: str) -> List[Risk]:
"""Load risks from CSV file"""
risks = []
with open(filename, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
risk = Risk(
id=row['id'],
name=row['name'],
asset_value=float(row['asset_value']),
exposure_factor=float(row['exposure_factor']),
aro=float(row['aro']),
likelihood_qualitative=int(row['likelihood']),
impact_qualitative=int(row['impact']),
category=row['category'],
owner=row['owner']
)
risks.append(risk)
return risks
def save_report_to_csv(report: List[Dict], filename: str):
"""Save risk report to CSV file"""
if not report:
print("No data to save")
return
with open(filename, 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=report[0].keys())
writer.writeheader()
writer.writerows(report)
print(f"Report saved to {filename}")
def interactive_mode():
"""Interactive risk assessment mode"""
calculator = RiskCalculator()
print("=" * 60)
print("Risk Assessment Calculator - Interactive Mode")
print("=" * 60)
while True:
print("\nOptions:")
print("1. Add new risk")
print("2. Calculate cost-benefit for control")
print("3. Generate risk report")
print("4. View summary")
print("5. Exit")
choice = input("\nEnter choice (1-5): ").strip()
if choice == "1":
print("\n--- Add New Risk ---")
risk_id = input("Risk ID: ").strip()
name = input("Risk Name: ").strip()
asset_value = float(input("Asset Value ($): "))
exposure_factor = float(input("Exposure Factor (0-1): "))
aro = float(input("Annual Rate of Occurrence (0-1): "))
likelihood = int(input("Likelihood (1-5): "))
impact = int(input("Impact (1-5): "))
category = input("Category: ").strip()
owner = input("Owner: ").strip()
risk = Risk(risk_id, name, asset_value, exposure_factor, aro,
likelihood, impact, category, owner)
calculator.add_risk(risk)
quant = calculator.calculate_quantitative(risk)
qual = calculator.calculate_qualitative(risk)
print(f"\nβ Risk added successfully!")
print(f" SLE: ${quant['sle']:,.0f}")
print(f" ALE: ${quant['ale']:,.0f}")
print(f" Risk Level: {qual['risk_level']}")
print(f" Remediation SLA: {qual['sla_days']} days")
elif choice == "2":
if not calculator.risks:
print("No risks added yet. Please add a risk first.")
continue
print("\n--- Cost-Benefit Analysis ---")
print("Available risks:")
for i, risk in enumerate(calculator.risks, 1):
print(f"{i}. {risk.name} (ID: {risk.id})")
risk_idx = int(input("Select risk number: ")) - 1
if risk_idx < 0 or risk_idx >= len(calculator.risks):
print("Invalid selection")
continue
risk = calculator.risks[risk_idx]
control_cost = float(input("Annual cost of control ($): "))
new_aro = float(input("New ARO after control (0-1): "))
cba = calculator.cost_benefit_analysis(risk, control_cost, new_aro)
print(f"\n--- Cost-Benefit Analysis Results ---")
print(f"ALE Before Control: ${cba['ale_before']:,.0f}")
print(f"ALE After Control: ${cba['ale_after']:,.0f}")
print(f"Annual Savings: ${cba['annual_savings']:,.0f}")
print(f"Control Cost: ${cba['control_cost']:,.0f}")
print(f"Net Benefit: ${cba['net_benefit']:,.0f}")
print(f"ROI: {cba['roi_percent']:.1f}%")
print(f"Payback Period: {cba['payback_period_years']:.2f} years")
print(f"Recommendation: {cba['recommendation']}")
elif choice == "3":
if not calculator.risks:
print("No risks added yet. Please add a risk first.")
continue
report = calculator.generate_report()
print("\n" + "=" * 120)
print("Risk Assessment Report")
print("=" * 120)
# Print header
headers = list(report[0].keys())
print("|".join(f"{h:^15}" for h in headers))
print("-" * 120)
# Print rows
for row in report:
print("|".join(f"{str(v):^15}" for v in row.values()))
elif choice == "4":
summary = calculator.generate_summary()
if not summary:
print("No risks added yet. Please add a risk first.")
continue
print("\n" + "=" * 60)
print("Risk Assessment Summary")
print("=" * 60)
print(f"Total Risks: {summary['total_risks']}")
print(f"Total ALE: ${summary['total_ale']:,.0f}")
print(f"\nRisk Level Distribution:")
for level, count in summary['risk_levels'].items():
print(f" {level}: {count}")
print(f"\nTop 5 Risks by ALE:")
for name, ale in summary['top_5_risks']:
print(f" {name}: ${ale:,.0f}")
elif choice == "5":
print("Exiting...")
break
else:
print("Invalid choice. Please enter 1-5.")
def main():
parser = argparse.ArgumentParser(description="Risk Assessment Calculator")
parser.add_argument('input_file', nargs='?', help='CSV file containing risk data')
parser.add_argument('--output', '-o', help='Output CSV file for risk report')
parser.add_argument('--interactive', '-i', action='store_true',
help='Run in interactive mode')
parser.add_argument('--control-cost', type=float,
help='Cost of control for cost-benefit analysis')
parser.add_argument('--new-aro', type=float,
help='New ARO after control implementation')
args = parser.parse_args()
if args.interactive:
interactive_mode()
return
if not args.input_file:
print("Error: Please provide an input file or use --interactive mode")
parser.print_help()
return
# Load risks from CSV
try:
risks = load_risks_from_csv(args.input_file)
calculator = RiskCalculator()
for risk in risks:
calculator.add_risk(risk)
print(f"Loaded {len(risks)} risks from {args.input_file}")
# Generate report
report = calculator.generate_report()
# Display summary
summary = calculator.generate_summary()
print("\n" + "=" * 60)
print("Risk Assessment Summary")
print("=" * 60)
print(f"Total Risks: {summary['total_risks']}")
print(f"Total ALE: ${summary['total_ale']:,.0f}")
print(f"\nRisk Level Distribution:")
for level, count in summary['risk_levels'].items():
if count > 0:
print(f" {level}: {count}")
print(f"\nTop 5 Risks by ALE:")
for name, ale in summary['top_5_risks']:
print(f" {name}: ${ale:,.0f}")
# Save report if output file specified
if args.output:
save_report_to_csv(report, args.output)
print("\nRisk Report:")
print("-" * 120)
for risk_data in report[:10]: # Show top 10
print(f"{risk_data['Risk ID']}: {risk_data['Risk Name']}")
print(f" ALE: {risk_data['ALE']} | Risk Level: {risk_data['Risk Level']} | "
f"SLA: {risk_data['Remediation SLA']}")
except FileNotFoundError:
print(f"Error: File '{args.input_file}' not found")
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/security-compliance/scripts/risk_calculator.py",
"license": "MIT License",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/security-compliance/scripts/vuln_prioritizer.py | #!/usr/bin/env python3
"""
Vulnerability Prioritization Tool
Prioritizes vulnerabilities based on CVSS score combined with business context
factors such as asset criticality, exposure, exploit availability, and compensating controls.
Usage:
python vuln_prioritizer.py vulnerabilities.csv
python vuln_prioritizer.py vulnerabilities.csv --output prioritized.csv
python vuln_prioritizer.py --interactive
"""
import argparse
import csv
from dataclasses import dataclass
from typing import List, Dict
from datetime import datetime, timedelta
@dataclass
class Vulnerability:
"""Vulnerability data class"""
cve_id: str
title: str
cvss_score: float
affected_system: str
asset_criticality: int # 1-5 scale
exposure: str # internet_facing, internal, isolated
data_sensitivity: str # highly_confidential, confidential, public
exploit_available: bool
exploit_in_wild: bool
compensating_controls: bool
discovered_date: str
class VulnerabilityPrioritizer:
"""Vulnerability prioritization engine"""
EXPOSURE_WEIGHT = {
"internet_facing": 3,
"internal": 2,
"isolated": 1
}
DATA_SENSITIVITY_WEIGHT = {
"highly_confidential": 3, # PII, PHI, financial
"confidential": 2,
"public": 1
}
SLA_MAPPING = {
"P0": 1, # Critical - patch within 24-48 hours
"P1": 7, # High - patch within 7 days
"P2": 30, # Medium - patch within 30 days
"P3": 90 # Low - patch within 90 days
}
def __init__(self):
self.vulnerabilities: List[Vulnerability] = []
def calculate_priority_score(self, vuln: Vulnerability) -> float:
"""
Calculate priority score based on CVSS + business context
Formula:
Priority Score = CVSS Γ exploit_multiplier Γ asset_multiplier Γ exposure_multiplier Γ data_sensitivity_multiplier Γ controls_multiplier
"""
# Base CVSS score (0-10)
cvss_score = vuln.cvss_score
# Exploit multipliers
exploit_available_mult = 1.5 if vuln.exploit_available else 1.0
exploit_in_wild_mult = 2.0 if vuln.exploit_in_wild else 1.0
# Asset criticality multiplier (1-5 scale normalized)
asset_mult = vuln.asset_criticality / 3.0
# Exposure multiplier
exposure_mult = self.EXPOSURE_WEIGHT.get(vuln.exposure, 2) / 2.0
# Data sensitivity multiplier
data_sens_mult = self.DATA_SENSITIVITY_WEIGHT.get(vuln.data_sensitivity, 2) / 2.0
# Compensating controls reduction
controls_mult = 0.5 if vuln.compensating_controls else 1.0
# Calculate final priority score
priority_score = (
cvss_score *
exploit_available_mult *
exploit_in_wild_mult *
asset_mult *
exposure_mult *
data_sens_mult *
controls_mult
)
return priority_score
def determine_priority_level(self, priority_score: float) -> str:
"""Determine priority level (P0-P3) based on score"""
if priority_score >= 14:
return "P0" # Critical
elif priority_score >= 10:
return "P1" # High
elif priority_score >= 6:
return "P2" # Medium
else:
return "P3" # Low
def calculate_due_date(self, vuln: Vulnerability, priority_level: str) -> str:
"""Calculate patch due date based on priority level"""
sla_days = self.SLA_MAPPING.get(priority_level, 90)
discovered = datetime.strptime(vuln.discovered_date, "%Y-%m-%d")
due_date = discovered + timedelta(days=sla_days)
return due_date.strftime("%Y-%m-%d")
def generate_rationale(self, vuln: Vulnerability, priority_score: float) -> str:
"""Generate human-readable rationale for prioritization"""
factors = []
if vuln.cvss_score >= 9.0:
factors.append("Critical CVSS score")
elif vuln.cvss_score >= 7.0:
factors.append("High CVSS score")
if vuln.exploit_in_wild:
factors.append("Active exploitation in wild")
if vuln.exploit_available:
factors.append("Public exploit available")
if vuln.exposure == "internet_facing":
factors.append("Internet-facing system")
if vuln.asset_criticality >= 4:
factors.append("Critical business system")
if vuln.data_sensitivity == "highly_confidential":
factors.append("Contains sensitive data (PII/PHI)")
if vuln.compensating_controls:
factors.append("Compensating controls in place (WAF/IPS)")
return "; ".join(factors) if factors else "Standard risk assessment"
def add_vulnerability(self, vuln: Vulnerability):
"""Add vulnerability to assessment"""
self.vulnerabilities.append(vuln)
def generate_report(self) -> List[Dict]:
"""Generate prioritized vulnerability report"""
report = []
for vuln in self.vulnerabilities:
priority_score = self.calculate_priority_score(vuln)
priority_level = self.determine_priority_level(priority_score)
due_date = self.calculate_due_date(vuln, priority_level)
rationale = self.generate_rationale(vuln, priority_score)
report.append({
"CVE ID": vuln.cve_id,
"Title": vuln.title,
"Affected System": vuln.affected_system,
"CVSS Score": f"{vuln.cvss_score:.1f}",
"Priority Score": f"{priority_score:.2f}",
"Priority Level": priority_level,
"SLA Days": self.SLA_MAPPING[priority_level],
"Discovered": vuln.discovered_date,
"Due Date": due_date,
"Exploit Available": "Yes" if vuln.exploit_available else "No",
"Active Exploitation": "Yes" if vuln.exploit_in_wild else "No",
"Asset Criticality": vuln.asset_criticality,
"Exposure": vuln.exposure,
"Data Sensitivity": vuln.data_sensitivity,
"Compensating Controls": "Yes" if vuln.compensating_controls else "No",
"Rationale": rationale
})
# Sort by priority score (descending)
report.sort(key=lambda x: float(x["Priority Score"]), reverse=True)
return report
def generate_summary(self) -> Dict:
"""Generate summary statistics"""
if not self.vulnerabilities:
return {}
priority_counts = {"P0": 0, "P1": 0, "P2": 0, "P3": 0}
for vuln in self.vulnerabilities:
priority_score = self.calculate_priority_score(vuln)
priority_level = self.determine_priority_level(priority_score)
priority_counts[priority_level] += 1
# Count exploitable vulnerabilities
exploitable = sum(1 for v in self.vulnerabilities if v.exploit_available)
actively_exploited = sum(1 for v in self.vulnerabilities if v.exploit_in_wild)
# Count by exposure
internet_facing = sum(1 for v in self.vulnerabilities if v.exposure == "internet_facing")
return {
"total_vulnerabilities": len(self.vulnerabilities),
"priority_distribution": priority_counts,
"exploitable_count": exploitable,
"actively_exploited_count": actively_exploited,
"internet_facing_count": internet_facing
}
def load_vulnerabilities_from_csv(filename: str) -> List[Vulnerability]:
"""Load vulnerabilities from CSV file"""
vulnerabilities = []
with open(filename, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
vuln = Vulnerability(
cve_id=row['cve_id'],
title=row['title'],
cvss_score=float(row['cvss_score']),
affected_system=row['affected_system'],
asset_criticality=int(row['asset_criticality']),
exposure=row['exposure'],
data_sensitivity=row['data_sensitivity'],
exploit_available=row['exploit_available'].lower() == 'true',
exploit_in_wild=row['exploit_in_wild'].lower() == 'true',
compensating_controls=row['compensating_controls'].lower() == 'true',
discovered_date=row['discovered_date']
)
vulnerabilities.append(vuln)
return vulnerabilities
def save_report_to_csv(report: List[Dict], filename: str):
"""Save vulnerability report to CSV file"""
if not report:
print("No data to save")
return
with open(filename, 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=report[0].keys())
writer.writeheader()
writer.writerows(report)
print(f"β Report saved to {filename}")
def interactive_mode():
"""Interactive vulnerability prioritization mode"""
prioritizer = VulnerabilityPrioritizer()
print("=" * 60)
print("Vulnerability Prioritization Tool - Interactive Mode")
print("=" * 60)
while True:
print("\nOptions:")
print("1. Add new vulnerability")
print("2. Generate prioritization report")
print("3. View summary statistics")
print("4. Exit")
choice = input("\nEnter choice (1-4): ").strip()
if choice == "1":
print("\n--- Add New Vulnerability ---")
cve_id = input("CVE ID (e.g., CVE-2021-44228): ").strip()
title = input("Title: ").strip()
cvss_score = float(input("CVSS Score (0-10): "))
affected_system = input("Affected System: ").strip()
asset_criticality = int(input("Asset Criticality (1-5, 5=most critical): "))
print("\nExposure:")
print(" 1. internet_facing")
print(" 2. internal")
print(" 3. isolated")
exposure_choice = input("Select (1-3): ")
exposure_map = {"1": "internet_facing", "2": "internal", "3": "isolated"}
exposure = exposure_map.get(exposure_choice, "internal")
print("\nData Sensitivity:")
print(" 1. highly_confidential (PII/PHI/Financial)")
print(" 2. confidential")
print(" 3. public")
sens_choice = input("Select (1-3): ")
sens_map = {"1": "highly_confidential", "2": "confidential", "3": "public"}
data_sensitivity = sens_map.get(sens_choice, "confidential")
exploit_available = input("Public exploit available? (yes/no): ").lower() == "yes"
exploit_in_wild = input("Active exploitation in wild? (yes/no): ").lower() == "yes"
compensating_controls = input("Compensating controls in place? (yes/no): ").lower() == "yes"
discovered_date = input("Discovered date (YYYY-MM-DD): ").strip()
vuln = Vulnerability(
cve_id, title, cvss_score, affected_system, asset_criticality,
exposure, data_sensitivity, exploit_available, exploit_in_wild,
compensating_controls, discovered_date
)
prioritizer.add_vulnerability(vuln)
# Calculate and display priority
priority_score = prioritizer.calculate_priority_score(vuln)
priority_level = prioritizer.determine_priority_level(priority_score)
due_date = prioritizer.calculate_due_date(vuln, priority_level)
rationale = prioritizer.generate_rationale(vuln, priority_score)
print(f"\nβ Vulnerability added successfully!")
print(f" Priority Score: {priority_score:.2f}")
print(f" Priority Level: {priority_level}")
print(f" SLA: Patch within {prioritizer.SLA_MAPPING[priority_level]} days")
print(f" Due Date: {due_date}")
print(f" Rationale: {rationale}")
elif choice == "2":
if not prioritizer.vulnerabilities:
print("No vulnerabilities added yet. Please add a vulnerability first.")
continue
report = prioritizer.generate_report()
print("\n" + "=" * 150)
print("Vulnerability Prioritization Report")
print("=" * 150)
print(f"{'CVE ID':<20} {'System':<25} {'CVSS':<6} {'Priority':<10} {'Level':<7} {'Due Date':<12} {'Rationale':<50}")
print("-" * 150)
for row in report:
print(f"{row['CVE ID']:<20} "
f"{row['Affected System']:<25} "
f"{row['CVSS Score']:<6} "
f"{row['Priority Score']:<10} "
f"{row['Priority Level']:<7} "
f"{row['Due Date']:<12} "
f"{row['Rationale']:<50}")
elif choice == "3":
summary = prioritizer.generate_summary()
if not summary:
print("No vulnerabilities added yet. Please add a vulnerability first.")
continue
print("\n" + "=" * 60)
print("Vulnerability Summary")
print("=" * 60)
print(f"Total Vulnerabilities: {summary['total_vulnerabilities']}")
print(f"\nPriority Distribution:")
for level, count in summary['priority_distribution'].items():
print(f" {level}: {count}")
print(f"\nExploitability:")
print(f" Public exploits available: {summary['exploitable_count']}")
print(f" Active exploitation: {summary['actively_exploited_count']}")
print(f"\nExposure:")
print(f" Internet-facing systems: {summary['internet_facing_count']}")
elif choice == "4":
print("Exiting...")
break
else:
print("Invalid choice. Please enter 1-4.")
def main():
parser = argparse.ArgumentParser(description="Vulnerability Prioritization Tool")
parser.add_argument('input_file', nargs='?', help='CSV file containing vulnerability data')
parser.add_argument('--output', '-o', help='Output CSV file for prioritized report')
parser.add_argument('--interactive', '-i', action='store_true',
help='Run in interactive mode')
parser.add_argument('--filter-level', choices=['P0', 'P1', 'P2', 'P3'],
help='Filter to show only specified priority level')
args = parser.parse_args()
if args.interactive:
interactive_mode()
return
if not args.input_file:
print("Error: Please provide an input file or use --interactive mode")
parser.print_help()
return
try:
vulnerabilities = load_vulnerabilities_from_csv(args.input_file)
prioritizer = VulnerabilityPrioritizer()
for vuln in vulnerabilities:
prioritizer.add_vulnerability(vuln)
print(f"β Loaded {len(vulnerabilities)} vulnerabilities from {args.input_file}")
# Generate report
report = prioritizer.generate_report()
# Filter if requested
if args.filter_level:
report = [r for r in report if r['Priority Level'] == args.filter_level]
# Display summary
summary = prioritizer.generate_summary()
print("\n" + "=" * 60)
print("Vulnerability Summary")
print("=" * 60)
print(f"Total Vulnerabilities: {summary['total_vulnerabilities']}")
print(f"\nPriority Distribution:")
for level, count in summary['priority_distribution'].items():
print(f" {level}: {count}")
print(f"\nExploitability:")
print(f" Public exploits available: {summary['exploitable_count']}")
print(f" Active exploitation: {summary['actively_exploited_count']}")
# Display top prioritized vulnerabilities
print("\n" + "=" * 150)
print("Top Prioritized Vulnerabilities")
print("=" * 150)
print(f"{'CVE ID':<20} {'System':<30} {'CVSS':<6} {'Priority':<10} {'Level':<7} {'Due Date':<12}")
print("-" * 150)
for row in report[:15]: # Show top 15
print(f"{row['CVE ID']:<20} "
f"{row['Affected System']:<30} "
f"{row['CVSS Score']:<6} "
f"{row['Priority Score']:<10} "
f"{row['Priority Level']:<7} "
f"{row['Due Date']:<12}")
# Save report if output file specified
if args.output:
save_report_to_csv(report, args.output)
except FileNotFoundError:
print(f"Error: File '{args.input_file}' not found")
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/security-compliance/scripts/vuln_prioritizer.py",
"license": "MIT License",
"lines": 355,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/agile-product-owner/scripts/user_story_generator.py | #!/usr/bin/env python3
"""
User Story Generator with INVEST Criteria
Creates well-formed user stories with acceptance criteria
"""
import json
from typing import Dict, List, Tuple
class UserStoryGenerator:
"""Generate INVEST-compliant user stories"""
def __init__(self):
self.personas = {
'end_user': {
'name': 'End User',
'needs': ['efficiency', 'simplicity', 'reliability', 'speed'],
'context': 'daily usage of core features'
},
'admin': {
'name': 'Administrator',
'needs': ['control', 'visibility', 'security', 'configuration'],
'context': 'system management and oversight'
},
'power_user': {
'name': 'Power User',
'needs': ['advanced features', 'automation', 'customization', 'shortcuts'],
'context': 'expert usage and workflow optimization'
},
'new_user': {
'name': 'New User',
'needs': ['guidance', 'learning', 'safety', 'clarity'],
'context': 'first-time experience and onboarding'
}
}
self.story_templates = {
'feature': "As a {persona}, I want to {action} so that {benefit}",
'improvement': "As a {persona}, I need {capability} to {achieve_goal}",
'fix': "As a {persona}, I expect {behavior} when {condition}",
'integration': "As a {persona}, I want to {integrate} so that {workflow}"
}
self.acceptance_criteria_patterns = [
"Given {precondition}, When {action}, Then {outcome}",
"Should {behavior} when {condition}",
"Must {requirement} to {achieve}",
"Can {capability} without {negative_outcome}"
]
def generate_epic_stories(self, epic: Dict) -> List[Dict]:
"""Break down epic into user stories"""
stories = []
# Analyze epic for key components
epic_name = epic.get('name', 'Feature')
epic_description = epic.get('description', '')
personas = epic.get('personas', ['end_user'])
scope = epic.get('scope', [])
# Generate stories for each persona and scope item
for persona in personas:
for i, scope_item in enumerate(scope):
story = self.generate_story(
persona=persona,
feature=scope_item,
epic=epic_name,
index=i+1
)
stories.append(story)
# Add enabler stories (technical, infrastructure)
if epic.get('technical_requirements'):
for req in epic['technical_requirements']:
enabler = self.generate_enabler_story(req, epic_name)
stories.append(enabler)
return stories
def generate_story(self, persona: str, feature: str, epic: str, index: int) -> Dict:
"""Generate a single user story"""
persona_data = self.personas.get(persona, self.personas['end_user'])
# Create story
story = {
'id': f"{epic[:3].upper()}-{index:03d}",
'type': 'story',
'title': self._generate_title(feature),
'narrative': self._generate_narrative(persona_data, feature),
'acceptance_criteria': self._generate_acceptance_criteria(feature),
'estimation': self._estimate_complexity(feature),
'priority': self._determine_priority(persona, feature),
'dependencies': [],
'invest_check': self._check_invest_criteria(feature)
}
return story
def generate_enabler_story(self, requirement: str, epic: str) -> Dict:
"""Generate technical enabler story"""
return {
'id': f"{epic[:3].upper()}-E{len(requirement):02d}",
'type': 'enabler',
'title': f"Technical: {requirement}",
'narrative': f"As a developer, I need to {requirement} to enable user features",
'acceptance_criteria': [
f"Technical requirement {requirement} is implemented",
"All tests pass",
"Documentation is updated",
"No regression in existing functionality"
],
'estimation': 5, # Default medium complexity
'priority': 'high',
'dependencies': [],
'invest_check': {
'independent': True,
'negotiable': False, # Technical requirements often non-negotiable
'valuable': True,
'estimable': True,
'small': True,
'testable': True
}
}
def _generate_title(self, feature: str) -> str:
"""Generate concise story title"""
# Simplify feature description to title
words = feature.split()[:5]
return ' '.join(words).title()
def _generate_narrative(self, persona: Dict, feature: str) -> str:
"""Generate story narrative in standard format"""
template = self.story_templates['feature']
action = self._extract_action(feature)
benefit = self._extract_benefit(feature, persona['needs'])
return template.format(
persona=persona['name'],
action=action,
benefit=benefit
)
def _generate_acceptance_criteria(self, feature: str) -> List[str]:
"""Generate acceptance criteria"""
criteria = []
# Happy path
criteria.append(f"Given user has access, When they {self._extract_action(feature)}, Then {self._extract_outcome(feature)}")
# Validation
criteria.append(f"Should validate input before processing")
# Error handling
criteria.append(f"Must show clear error message when action fails")
# Performance
criteria.append(f"Should complete within 2 seconds")
# Accessibility
criteria.append(f"Must be accessible via keyboard navigation")
return criteria
def _extract_action(self, feature: str) -> str:
"""Extract action from feature description"""
action_verbs = ['create', 'view', 'edit', 'delete', 'share', 'export', 'import', 'configure', 'search', 'filter']
feature_lower = feature.lower()
for verb in action_verbs:
if verb in feature_lower:
return feature_lower
return f"use {feature.lower()}"
def _extract_benefit(self, feature: str, needs: List[str]) -> str:
"""Extract benefit based on feature and persona needs"""
feature_lower = feature.lower()
if 'save' in feature_lower or 'quick' in feature_lower:
return "I can save time and work more efficiently"
elif 'share' in feature_lower or 'collab' in feature_lower:
return "I can collaborate with my team effectively"
elif 'report' in feature_lower or 'analyt' in feature_lower:
return "I can make data-driven decisions"
elif 'automat' in feature_lower:
return "I can reduce manual work and errors"
else:
return f"I can achieve my goals related to {needs[0]}"
def _extract_outcome(self, feature: str) -> str:
"""Extract expected outcome"""
return f"the {feature.lower()} is successfully completed"
def _estimate_complexity(self, feature: str) -> int:
"""Estimate story points based on complexity indicators"""
feature_lower = feature.lower()
# Complexity indicators
complexity = 3 # Base complexity
if any(word in feature_lower for word in ['simple', 'basic', 'view', 'display']):
complexity = 1
elif any(word in feature_lower for word in ['create', 'edit', 'update']):
complexity = 3
elif any(word in feature_lower for word in ['complex', 'advanced', 'integrate', 'migrate']):
complexity = 8
elif any(word in feature_lower for word in ['redesign', 'refactor', 'architect']):
complexity = 13
return complexity
def _determine_priority(self, persona: str, feature: str) -> str:
"""Determine story priority"""
feature_lower = feature.lower()
# Critical features
if any(word in feature_lower for word in ['security', 'fix', 'critical', 'broken']):
return 'critical'
# High priority for primary personas
if persona in ['end_user', 'admin']:
if any(word in feature_lower for word in ['core', 'essential', 'primary']):
return 'high'
# Medium for improvements
if any(word in feature_lower for word in ['improve', 'enhance', 'optimize']):
return 'medium'
# Low for nice-to-haves
return 'low'
def _check_invest_criteria(self, feature: str) -> Dict[str, bool]:
"""Check INVEST criteria compliance"""
return {
'independent': not any(word in feature.lower() for word in ['after', 'depends', 'requires']),
'negotiable': True, # Most features can be negotiated
'valuable': True, # Assume value if it made it to backlog
'estimable': len(feature.split()) < 20, # Can estimate if not too vague
'small': self._estimate_complexity(feature) <= 8, # 8 points or less
'testable': not any(word in feature.lower() for word in ['maybe', 'possibly', 'somehow'])
}
def generate_sprint_stories(self, capacity: int, backlog: List[Dict]) -> Dict:
"""Generate stories for a sprint based on capacity"""
sprint = {
'capacity': capacity,
'committed': [],
'stretch': [],
'total_points': 0,
'utilization': 0
}
# Sort backlog by priority and size
sorted_backlog = sorted(
backlog,
key=lambda x: (
{'critical': 0, 'high': 1, 'medium': 2, 'low': 3}[x['priority']],
x['estimation']
)
)
# Fill sprint
for story in sorted_backlog:
if sprint['total_points'] + story['estimation'] <= capacity:
sprint['committed'].append(story)
sprint['total_points'] += story['estimation']
elif sprint['total_points'] + story['estimation'] <= capacity * 1.2:
sprint['stretch'].append(story)
sprint['utilization'] = round((sprint['total_points'] / capacity) * 100, 1)
return sprint
def format_story_output(self, story: Dict) -> str:
"""Format story for display"""
output = []
output.append(f"USER STORY: {story['id']}")
output.append("=" * 40)
output.append(f"Title: {story['title']}")
output.append(f"Type: {story['type']}")
output.append(f"Priority: {story['priority'].upper()}")
output.append(f"Points: {story['estimation']}")
output.append("")
output.append("Story:")
output.append(story['narrative'])
output.append("")
output.append("Acceptance Criteria:")
for i, criterion in enumerate(story['acceptance_criteria'], 1):
output.append(f" {i}. {criterion}")
output.append("")
output.append("INVEST Checklist:")
for criterion, passed in story['invest_check'].items():
status = "β" if passed else "β"
output.append(f" {status} {criterion.capitalize()}")
return "\n".join(output)
def create_sample_epic():
"""Create a sample epic for testing"""
return {
'name': 'User Dashboard',
'description': 'Create a comprehensive dashboard for users to view their data',
'personas': ['end_user', 'power_user'],
'scope': [
'View key metrics and KPIs',
'Customize dashboard layout',
'Export dashboard data',
'Share dashboard with team members',
'Set up automated reports'
],
'technical_requirements': [
'Implement caching for performance',
'Set up real-time data pipeline'
]
}
def main():
import sys
generator = UserStoryGenerator()
if len(sys.argv) > 1 and sys.argv[1] == 'sprint':
# Generate sprint planning
capacity = int(sys.argv[2]) if len(sys.argv) > 2 else 30
# Create sample backlog
epic = create_sample_epic()
backlog = generator.generate_epic_stories(epic)
# Plan sprint
sprint = generator.generate_sprint_stories(capacity, backlog)
print("=" * 60)
print("SPRINT PLANNING")
print("=" * 60)
print(f"Sprint Capacity: {sprint['capacity']} points")
print(f"Committed: {sprint['total_points']} points ({sprint['utilization']}%)")
print(f"Stories: {len(sprint['committed'])} committed + {len(sprint['stretch'])} stretch")
print("\nπ COMMITTED STORIES:\n")
for story in sprint['committed']:
print(f" [{story['priority'][:1].upper()}] {story['id']}: {story['title']} ({story['estimation']}pts)")
if sprint['stretch']:
print("\nπ― STRETCH GOALS:\n")
for story in sprint['stretch']:
print(f" [{story['priority'][:1].upper()}] {story['id']}: {story['title']} ({story['estimation']}pts)")
else:
# Generate stories for epic
epic = create_sample_epic()
stories = generator.generate_epic_stories(epic)
print(f"Generated {len(stories)} stories from epic: {epic['name']}\n")
# Display first 3 stories in detail
for story in stories[:3]:
print(generator.format_story_output(story))
print("\n")
# Summary of all stories
print("=" * 60)
print("BACKLOG SUMMARY")
print("=" * 60)
total_points = sum(s['estimation'] for s in stories)
print(f"Total Stories: {len(stories)}")
print(f"Total Points: {total_points}")
print(f"Average Size: {total_points/len(stories):.1f} points")
print("\nPriority Breakdown:")
for priority in ['critical', 'high', 'medium', 'low']:
count = len([s for s in stories if s['priority'] == priority])
if count > 0:
print(f" {priority.capitalize()}: {count} stories")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/agile-product-owner/scripts/user_story_generator.py",
"license": "MIT License",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/ceo-advisor/scripts/financial_scenario_analyzer.py | #!/usr/bin/env python3
"""
Financial Scenario Analyzer - Model different business scenarios and their financial impact
"""
import json
from typing import Dict, List, Tuple
import math
class FinancialScenarioAnalyzer:
def __init__(self):
self.key_metrics = [
'revenue', 'gross_margin', 'operating_expenses',
'ebitda', 'cash_flow', 'runway', 'valuation'
]
self.growth_models = {
'linear': lambda base, rate, period: base * (1 + rate * period),
'exponential': lambda base, rate, period: base * math.pow(1 + rate, period),
'logarithmic': lambda base, rate, period: base * (1 + rate * math.log(period + 1)),
's_curve': lambda base, rate, period: base * (2 / (1 + math.exp(-rate * period)))
}
def analyze_scenarios(self, base_case: Dict, scenarios: List[Dict]) -> Dict:
"""Analyze multiple financial scenarios"""
results = {
'base_case_summary': self._summarize_financials(base_case),
'scenario_analysis': [],
'sensitivity_analysis': {},
'recommendation': {},
'risk_adjusted_view': {}
}
# Analyze each scenario
for scenario in scenarios:
scenario_result = self._analyze_scenario(base_case, scenario)
results['scenario_analysis'].append(scenario_result)
# Sensitivity analysis
results['sensitivity_analysis'] = self._perform_sensitivity_analysis(
base_case,
scenarios
)
# Risk-adjusted view
results['risk_adjusted_view'] = self._calculate_risk_adjusted_returns(
results['scenario_analysis']
)
# Generate recommendation
results['recommendation'] = self._generate_recommendation(
results['scenario_analysis'],
results['risk_adjusted_view']
)
return results
def _summarize_financials(self, financials: Dict) -> Dict:
"""Summarize key financial metrics"""
revenue = financials.get('revenue', 0)
cogs = financials.get('cogs', 0)
opex = financials.get('operating_expenses', 0)
gross_profit = revenue - cogs
gross_margin = (gross_profit / revenue * 100) if revenue > 0 else 0
ebitda = gross_profit - opex
ebitda_margin = (ebitda / revenue * 100) if revenue > 0 else 0
return {
'revenue': revenue,
'gross_profit': gross_profit,
'gross_margin': gross_margin,
'operating_expenses': opex,
'ebitda': ebitda,
'ebitda_margin': ebitda_margin,
'cash': financials.get('cash', 0),
'burn_rate': financials.get('burn_rate', 0),
'runway_months': self._calculate_runway(
financials.get('cash', 0),
financials.get('burn_rate', 0)
)
}
def _calculate_runway(self, cash: float, burn_rate: float) -> float:
"""Calculate months of runway"""
if burn_rate <= 0:
return float('inf')
return cash / burn_rate
def _analyze_scenario(self, base_case: Dict, scenario: Dict) -> Dict:
"""Analyze a single scenario"""
name = scenario.get('name', 'Unnamed Scenario')
probability = scenario.get('probability', 0.5)
# Apply scenario changes
projected_financials = self._apply_scenario_changes(base_case, scenario)
# Calculate metrics for each year
projections = []
current_state = projected_financials.copy()
for year in range(1, 4): # 3-year projection
year_projection = self._project_year(
current_state,
scenario,
year
)
projections.append(year_projection)
current_state = year_projection
# Calculate NPV and IRR
cash_flows = [p['free_cash_flow'] for p in projections]
npv = self._calculate_npv(cash_flows, scenario.get('discount_rate', 0.1))
irr = self._calculate_irr(cash_flows, base_case.get('initial_investment', 0))
return {
'name': name,
'probability': probability,
'projections': projections,
'npv': npv,
'irr': irr,
'break_even_month': self._find_break_even(projections),
'total_return': self._calculate_total_return(projections, base_case),
'key_assumptions': scenario.get('assumptions', [])
}
def _apply_scenario_changes(self, base_case: Dict, scenario: Dict) -> Dict:
"""Apply scenario changes to base case"""
result = base_case.copy()
changes = scenario.get('changes', {})
for key, change in changes.items():
if key in result:
if isinstance(change, dict):
# Relative change
if 'multiply' in change:
result[key] *= change['multiply']
elif 'add' in change:
result[key] += change['add']
else:
# Absolute change
result[key] = change
return result
def _project_year(self, current_state: Dict, scenario: Dict, year: int) -> Dict:
"""Project financials for a specific year"""
growth_model = scenario.get('growth_model', 'exponential')
growth_rate = scenario.get('growth_rate', 0.3)
# Apply growth model
model_func = self.growth_models.get(growth_model, self.growth_models['linear'])
revenue = model_func(
current_state.get('revenue', 0),
growth_rate,
year
)
# Scale other metrics
cogs = revenue * scenario.get('cogs_ratio', 0.3)
opex = current_state.get('operating_expenses', 0) * (1 + scenario.get('opex_growth', 0.15))
gross_profit = revenue - cogs
ebitda = gross_profit - opex
# Calculate free cash flow (simplified)
capex = revenue * scenario.get('capex_ratio', 0.05)
working_capital_change = (revenue - current_state.get('revenue', 0)) * 0.1
free_cash_flow = ebitda - capex - working_capital_change
return {
'year': year,
'revenue': revenue,
'gross_profit': gross_profit,
'gross_margin': (gross_profit / revenue * 100) if revenue > 0 else 0,
'operating_expenses': opex,
'ebitda': ebitda,
'ebitda_margin': (ebitda / revenue * 100) if revenue > 0 else 0,
'free_cash_flow': free_cash_flow,
'cumulative_cash_flow': current_state.get('cumulative_cash_flow', 0) + free_cash_flow
}
def _calculate_npv(self, cash_flows: List[float], discount_rate: float) -> float:
"""Calculate Net Present Value"""
npv = 0
for i, cf in enumerate(cash_flows):
npv += cf / math.pow(1 + discount_rate, i + 1)
return npv
def _calculate_irr(self, cash_flows: List[float], initial_investment: float) -> float:
"""Calculate Internal Rate of Return (simplified)"""
if not cash_flows or initial_investment == 0:
return 0
# Simple IRR approximation
total_return = sum(cash_flows)
years = len(cash_flows)
if initial_investment > 0:
return math.pow(total_return / initial_investment, 1/years) - 1
return 0
def _find_break_even(self, projections: List[Dict]) -> int:
"""Find break-even month"""
months = 0
for projection in projections:
months += 12
if projection.get('ebitda', 0) > 0:
# Interpolate to find exact month
if months == 12:
return months
prev_ebitda = projections[projection['year']-2].get('ebitda', 0) if projection['year'] > 1 else 0
monthly_improvement = (projection['ebitda'] - prev_ebitda) / 12
if monthly_improvement > 0:
months_to_breakeven = abs(prev_ebitda) / monthly_improvement
return int(months - 12 + months_to_breakeven)
return -1 # Not reached
def _calculate_total_return(self, projections: List[Dict], base_case: Dict) -> float:
"""Calculate total return multiple"""
initial = base_case.get('valuation', 1000000)
# Simple valuation at end (10x revenue multiple for SaaS)
final_revenue = projections[-1]['revenue'] if projections else 0
final_valuation = final_revenue * 10
return (final_valuation / initial) if initial > 0 else 0
def _perform_sensitivity_analysis(self, base_case: Dict, scenarios: List[Dict]) -> Dict:
"""Perform sensitivity analysis on key variables"""
sensitivity = {}
key_variables = ['growth_rate', 'gross_margin', 'customer_acquisition_cost']
for variable in key_variables:
sensitivity[variable] = {
'low': self._calculate_variable_impact(base_case, variable, -0.2),
'base': self._calculate_variable_impact(base_case, variable, 0),
'high': self._calculate_variable_impact(base_case, variable, 0.2)
}
return sensitivity
def _calculate_variable_impact(self, base_case: Dict, variable: str, change: float) -> float:
"""Calculate impact of variable change on valuation"""
# Simplified impact calculation
impacts = {
'growth_rate': 2.5, # 2.5x multiplier on valuation
'gross_margin': 1.8, # 1.8x multiplier
'customer_acquisition_cost': -1.2 # Negative impact
}
base_value = 10000000 # Base valuation
impact_multiplier = impacts.get(variable, 1.0)
return base_value * (1 + change * impact_multiplier)
def _calculate_risk_adjusted_returns(self, scenarios: List[Dict]) -> Dict:
"""Calculate risk-adjusted returns"""
expected_value = 0
best_case = None
worst_case = None
for scenario in scenarios:
probability = scenario['probability']
npv = scenario['npv']
expected_value += probability * npv
if best_case is None or npv > best_case['npv']:
best_case = scenario
if worst_case is None or npv < worst_case['npv']:
worst_case = scenario
# Calculate standard deviation (simplified)
variance = sum([
scenario['probability'] * math.pow(scenario['npv'] - expected_value, 2)
for scenario in scenarios
])
std_dev = math.sqrt(variance)
return {
'expected_value': expected_value,
'best_case': best_case['name'] if best_case else 'None',
'best_case_npv': best_case['npv'] if best_case else 0,
'worst_case': worst_case['name'] if worst_case else 'None',
'worst_case_npv': worst_case['npv'] if worst_case else 0,
'standard_deviation': std_dev,
'sharpe_ratio': (expected_value / std_dev) if std_dev > 0 else 0
}
def _generate_recommendation(self, scenarios: List[Dict], risk_adjusted: Dict) -> Dict:
"""Generate recommendation based on analysis"""
recommendation = {
'recommended_scenario': '',
'rationale': [],
'key_actions': [],
'risk_mitigation': []
}
# Find optimal scenario
best_risk_adjusted = max(scenarios, key=lambda s: s['npv'] * s['probability'])
recommendation['recommended_scenario'] = best_risk_adjusted['name']
# Generate rationale
if best_risk_adjusted['npv'] > 0:
recommendation['rationale'].append(f"Positive NPV of ${best_risk_adjusted['npv']:,.0f}")
if best_risk_adjusted['irr'] > 0.15:
recommendation['rationale'].append(f"Strong IRR of {best_risk_adjusted['irr']:.1%}")
if best_risk_adjusted['break_even_month'] > 0 and best_risk_adjusted['break_even_month'] < 24:
recommendation['rationale'].append(f"Quick path to profitability ({best_risk_adjusted['break_even_month']} months)")
# Key actions
recommendation['key_actions'] = [
'Secure funding for growth initiatives',
'Build scalable operational infrastructure',
'Invest in customer acquisition channels',
'Strengthen unit economics',
'Establish financial controls'
]
# Risk mitigation
if risk_adjusted['standard_deviation'] > risk_adjusted['expected_value'] * 0.5:
recommendation['risk_mitigation'].append('High variability - consider hedging strategies')
recommendation['risk_mitigation'].extend([
'Maintain 12+ months runway',
'Diversify revenue streams',
'Build contingency plans for downside scenarios'
])
return recommendation
def analyze_financial_scenarios(base_case: Dict, scenarios: List[Dict]) -> str:
"""Main function to analyze financial scenarios"""
analyzer = FinancialScenarioAnalyzer()
results = analyzer.analyze_scenarios(base_case, scenarios)
# Format output
output = [
"=== Financial Scenario Analysis ===",
"",
"Base Case Summary:",
f" Revenue: ${results['base_case_summary']['revenue']:,.0f}",
f" Gross Margin: {results['base_case_summary']['gross_margin']:.1f}%",
f" EBITDA: ${results['base_case_summary']['ebitda']:,.0f}",
f" Runway: {results['base_case_summary']['runway_months']:.1f} months",
"",
"Scenario Analysis:"
]
for scenario in results['scenario_analysis']:
output.append(f"\n{scenario['name']} (Probability: {scenario['probability']:.0%})")
output.append(f" NPV: ${scenario['npv']:,.0f}")
output.append(f" IRR: {scenario['irr']:.1%}")
output.append(f" Break-even: {scenario['break_even_month']} months")
output.append(f" Return Multiple: {scenario['total_return']:.1f}x")
# Show Year 3 projection
if scenario['projections']:
year3 = scenario['projections'][-1]
output.append(f" Year 3 Revenue: ${year3['revenue']:,.0f}")
output.append(f" Year 3 EBITDA Margin: {year3['ebitda_margin']:.1f}%")
output.extend([
"",
"Risk-Adjusted Analysis:",
f" Expected Value: ${results['risk_adjusted_view']['expected_value']:,.0f}",
f" Best Case: {results['risk_adjusted_view']['best_case']} (${results['risk_adjusted_view']['best_case_npv']:,.0f})",
f" Worst Case: {results['risk_adjusted_view']['worst_case']} (${results['risk_adjusted_view']['worst_case_npv']:,.0f})",
f" Risk (Std Dev): ${results['risk_adjusted_view']['standard_deviation']:,.0f}",
f" Sharpe Ratio: {results['risk_adjusted_view']['sharpe_ratio']:.2f}",
"",
f"RECOMMENDATION: {results['recommendation']['recommended_scenario']}",
"",
"Rationale:"
])
for reason in results['recommendation']['rationale']:
output.append(f" β’ {reason}")
output.extend([
"",
"Key Actions:"
])
for action in results['recommendation']['key_actions'][:3]:
output.append(f" β’ {action}")
return '\n'.join(output)
if __name__ == "__main__":
# Example usage
example_base_case = {
'revenue': 5000000,
'cogs': 1500000,
'operating_expenses': 3000000,
'cash': 2000000,
'burn_rate': 200000,
'valuation': 20000000,
'initial_investment': 5000000
}
example_scenarios = [
{
'name': 'Aggressive Growth',
'probability': 0.3,
'growth_model': 'exponential',
'growth_rate': 0.5,
'changes': {
'operating_expenses': {'multiply': 1.3}
},
'assumptions': ['Market expansion successful', 'Product-market fit achieved'],
'cogs_ratio': 0.25,
'opex_growth': 0.3,
'capex_ratio': 0.08,
'discount_rate': 0.12
},
{
'name': 'Moderate Growth',
'probability': 0.5,
'growth_model': 'exponential',
'growth_rate': 0.3,
'changes': {},
'assumptions': ['Steady market growth', 'Competition remains stable'],
'cogs_ratio': 0.3,
'opex_growth': 0.15,
'capex_ratio': 0.05,
'discount_rate': 0.10
},
{
'name': 'Conservative',
'probability': 0.2,
'growth_model': 'linear',
'growth_rate': 0.15,
'changes': {
'operating_expenses': {'multiply': 0.9}
},
'assumptions': ['Market headwinds', 'Focus on profitability'],
'cogs_ratio': 0.35,
'opex_growth': 0.05,
'capex_ratio': 0.03,
'discount_rate': 0.08
}
]
print(analyze_financial_scenarios(example_base_case, example_scenarios))
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/ceo-advisor/scripts/financial_scenario_analyzer.py",
"license": "MIT License",
"lines": 379,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/ceo-advisor/scripts/strategy_analyzer.py | #!/usr/bin/env python3
"""
Strategic Planning Analyzer - Comprehensive business strategy assessment tool
"""
import json
from typing import Dict, List, Tuple
from datetime import datetime, timedelta
import math
class StrategyAnalyzer:
def __init__(self):
self.strategic_pillars = {
'market_position': {
'weight': 0.25,
'factors': ['market_share', 'brand_strength', 'competitive_advantage', 'customer_loyalty']
},
'financial_health': {
'weight': 0.25,
'factors': ['revenue_growth', 'profitability', 'cash_flow', 'unit_economics']
},
'operational_excellence': {
'weight': 0.20,
'factors': ['efficiency', 'quality', 'scalability', 'innovation']
},
'organizational_capability': {
'weight': 0.20,
'factors': ['talent', 'culture', 'leadership', 'agility']
},
'growth_potential': {
'weight': 0.10,
'factors': ['market_size', 'expansion_opportunities', 'product_pipeline', 'partnerships']
}
}
self.strategic_frameworks = {
'porter_five_forces': [
'competitive_rivalry',
'supplier_power',
'buyer_power',
'threat_of_substitution',
'threat_of_new_entry'
],
'swot': ['strengths', 'weaknesses', 'opportunities', 'threats'],
'bcg_matrix': ['stars', 'cash_cows', 'question_marks', 'dogs'],
'ansoff_matrix': ['market_penetration', 'market_development', 'product_development', 'diversification']
}
def analyze_strategic_position(self, company_data: Dict) -> Dict:
"""Comprehensive strategic analysis"""
results = {
'timestamp': datetime.now().isoformat(),
'company': company_data.get('name', 'Company'),
'strategic_health_score': 0,
'pillar_analysis': {},
'framework_analysis': {},
'strategic_options': [],
'risk_assessment': {},
'recommendations': [],
'roadmap': {}
}
# Analyze strategic pillars
total_score = 0
for pillar, config in self.strategic_pillars.items():
pillar_score = self._analyze_pillar(
company_data.get(pillar, {}),
config['factors']
)
weighted_score = pillar_score * config['weight']
results['pillar_analysis'][pillar] = {
'score': pillar_score,
'weighted_score': weighted_score,
'level': self._get_level(pillar_score),
'factors': self._get_pillar_details(company_data.get(pillar, {}), config['factors'])
}
total_score += weighted_score
results['strategic_health_score'] = round(total_score, 1)
# Framework analysis
results['framework_analysis'] = self._apply_frameworks(company_data)
# Generate strategic options
results['strategic_options'] = self._generate_strategic_options(
results['pillar_analysis'],
company_data.get('context', {})
)
# Risk assessment
results['risk_assessment'] = self._assess_strategic_risks(
company_data,
results['strategic_options']
)
# Generate roadmap
results['roadmap'] = self._create_strategic_roadmap(
results['strategic_options'],
company_data.get('timeline', 12)
)
# Generate recommendations
results['recommendations'] = self._generate_recommendations(results)
return results
def _analyze_pillar(self, pillar_data: Dict, factors: List) -> float:
"""Analyze a strategic pillar"""
if not pillar_data:
return 50.0
total_score = 0
count = 0
for factor in factors:
if factor in pillar_data:
score = pillar_data[factor]
total_score += score
count += 1
return (total_score / count) if count > 0 else 50.0
def _get_pillar_details(self, pillar_data: Dict, factors: List) -> List[Dict]:
"""Get detailed factor analysis"""
details = []
for factor in factors:
score = pillar_data.get(factor, 50)
details.append({
'factor': factor.replace('_', ' ').title(),
'score': score,
'status': 'Strong' if score >= 70 else 'Adequate' if score >= 40 else 'Weak'
})
return details
def _get_level(self, score: float) -> str:
"""Convert score to level"""
if score >= 80:
return 'Excellent'
elif score >= 70:
return 'Strong'
elif score >= 50:
return 'Adequate'
elif score >= 30:
return 'Weak'
else:
return 'Critical'
def _apply_frameworks(self, company_data: Dict) -> Dict:
"""Apply strategic frameworks"""
frameworks = {}
# SWOT Analysis
swot_data = company_data.get('swot', {})
frameworks['swot'] = {
'strengths': swot_data.get('strengths', [
'Strong brand recognition',
'Experienced leadership team',
'Robust technology platform'
]),
'weaknesses': swot_data.get('weaknesses', [
'Limited geographic presence',
'High customer acquisition cost',
'Technical debt'
]),
'opportunities': swot_data.get('opportunities', [
'Growing market demand',
'M&A opportunities',
'New product categories'
]),
'threats': swot_data.get('threats', [
'Increasing competition',
'Regulatory changes',
'Economic uncertainty'
])
}
# Porter's Five Forces
forces = company_data.get('competitive_forces', {})
frameworks['porter_analysis'] = {
'competitive_rivalry': forces.get('rivalry', 70),
'supplier_power': forces.get('suppliers', 40),
'buyer_power': forces.get('buyers', 60),
'threat_of_substitutes': forces.get('substitutes', 50),
'threat_of_new_entrants': forces.get('new_entrants', 45),
'overall_attractiveness': self._calculate_industry_attractiveness(forces)
}
# BCG Matrix for product portfolio
products = company_data.get('products', [])
frameworks['portfolio_analysis'] = self._analyze_portfolio(products)
return frameworks
def _calculate_industry_attractiveness(self, forces: Dict) -> float:
"""Calculate industry attractiveness from Porter's forces"""
# Lower forces = more attractive industry
rivalry = 100 - forces.get('rivalry', 50)
supplier = 100 - forces.get('suppliers', 50)
buyer = 100 - forces.get('buyers', 50)
substitutes = 100 - forces.get('substitutes', 50)
new_entrants = 100 - forces.get('new_entrants', 50)
avg = (rivalry + supplier + buyer + substitutes + new_entrants) / 5
return round(avg, 1)
def _analyze_portfolio(self, products: List) -> Dict:
"""Analyze product portfolio using BCG matrix"""
portfolio = {
'stars': [],
'cash_cows': [],
'question_marks': [],
'dogs': []
}
for product in products:
growth = product.get('market_growth', 0)
share = product.get('market_share', 0)
if growth > 10 and share > 50:
portfolio['stars'].append(product.get('name', 'Product'))
elif growth <= 10 and share > 50:
portfolio['cash_cows'].append(product.get('name', 'Product'))
elif growth > 10 and share <= 50:
portfolio['question_marks'].append(product.get('name', 'Product'))
else:
portfolio['dogs'].append(product.get('name', 'Product'))
return portfolio
def _generate_strategic_options(self, pillar_analysis: Dict, context: Dict) -> List[Dict]:
"""Generate strategic options based on analysis"""
options = []
# Check market position
market_score = pillar_analysis['market_position']['score']
if market_score < 60:
options.append({
'name': 'Market Leadership Initiative',
'type': 'market_penetration',
'description': 'Aggressive market share capture through competitive pricing and marketing',
'investment': 'High',
'timeframe': '12-18 months',
'expected_impact': 'Increase market share by 10-15%',
'priority': 9
})
# Check financial health
financial_score = pillar_analysis['financial_health']['score']
if financial_score < 50:
options.append({
'name': 'Profitability Turnaround',
'type': 'operational_excellence',
'description': 'Cost reduction and revenue optimization program',
'investment': 'Medium',
'timeframe': '6-9 months',
'expected_impact': 'Improve margins by 5-8%',
'priority': 10
})
# Check growth potential
growth_score = pillar_analysis['growth_potential']['score']
if growth_score > 70:
options.append({
'name': 'Expansion Strategy',
'type': 'market_development',
'description': 'Enter new geographic markets or customer segments',
'investment': 'High',
'timeframe': '18-24 months',
'expected_impact': 'Revenue growth of 30-40%',
'priority': 8
})
# Innovation opportunities
if context.get('industry_disruption', False):
options.append({
'name': 'Digital Transformation',
'type': 'innovation',
'description': 'Comprehensive digitalization of business processes and customer experience',
'investment': 'Very High',
'timeframe': '24-36 months',
'expected_impact': 'Future-proof business model',
'priority': 9
})
# M&A opportunities
if context.get('cash_available', 0) > 100000000:
options.append({
'name': 'Strategic Acquisition',
'type': 'acquisition',
'description': 'Acquire complementary businesses or competitors',
'investment': 'Very High',
'timeframe': '6-12 months',
'expected_impact': 'Instant scale and capability',
'priority': 7
})
# Sort by priority
options.sort(key=lambda x: x['priority'], reverse=True)
return options[:5] # Top 5 strategic options
def _assess_strategic_risks(self, company_data: Dict, strategic_options: List) -> Dict:
"""Assess strategic risks"""
risks = {
'execution_risk': self._calculate_execution_risk(company_data),
'market_risk': self._calculate_market_risk(company_data),
'financial_risk': self._calculate_financial_risk(company_data),
'competitive_risk': self._calculate_competitive_risk(company_data),
'regulatory_risk': company_data.get('regulatory_risk', 30),
'overall_risk': 0,
'mitigation_strategies': []
}
# Calculate overall risk
risk_values = [
risks['execution_risk'],
risks['market_risk'],
risks['financial_risk'],
risks['competitive_risk'],
risks['regulatory_risk']
]
risks['overall_risk'] = sum(risk_values) / len(risk_values)
# Generate mitigation strategies
if risks['execution_risk'] > 60:
risks['mitigation_strategies'].append({
'risk': 'Execution',
'strategy': 'Strengthen PMO, hire experienced executives, implement OKRs'
})
if risks['market_risk'] > 60:
risks['mitigation_strategies'].append({
'risk': 'Market',
'strategy': 'Diversify revenue streams, build strategic partnerships'
})
if risks['financial_risk'] > 60:
risks['mitigation_strategies'].append({
'risk': 'Financial',
'strategy': 'Improve cash management, secure credit facilities, optimize working capital'
})
return risks
def _calculate_execution_risk(self, data: Dict) -> float:
"""Calculate execution risk"""
org_capability = data.get('organizational_capability', {})
factors = [
100 - org_capability.get('leadership', 50),
100 - org_capability.get('talent', 50),
100 - org_capability.get('agility', 50),
data.get('complexity_score', 50)
]
return sum(factors) / len(factors)
def _calculate_market_risk(self, data: Dict) -> float:
"""Calculate market risk"""
market = data.get('market_position', {})
factors = [
100 - market.get('market_share', 50),
data.get('market_volatility', 50),
data.get('customer_concentration', 50)
]
return sum(factors) / len(factors)
def _calculate_financial_risk(self, data: Dict) -> float:
"""Calculate financial risk"""
financial = data.get('financial_health', {})
factors = [
100 - financial.get('cash_flow', 50),
100 - financial.get('profitability', 50),
data.get('debt_ratio', 50),
data.get('burn_rate', 50) if 'burn_rate' in data else 30
]
return sum(factors) / len(factors)
def _calculate_competitive_risk(self, data: Dict) -> float:
"""Calculate competitive risk"""
forces = data.get('competitive_forces', {})
return (forces.get('rivalry', 50) + forces.get('new_entrants', 50)) / 2
def _create_strategic_roadmap(self, options: List, timeline_months: int) -> Dict:
"""Create implementation roadmap"""
roadmap = {
'phases': [],
'milestones': [],
'resource_requirements': {},
'success_metrics': []
}
# Define phases
phases = [
{
'phase': 'Foundation',
'months': '0-3',
'focus': 'Build capabilities and quick wins',
'initiatives': []
},
{
'phase': 'Acceleration',
'months': '3-9',
'focus': 'Execute core strategies',
'initiatives': []
},
{
'phase': 'Scale',
'months': '9-18',
'focus': 'Expand and optimize',
'initiatives': []
},
{
'phase': 'Transform',
'months': '18+',
'focus': 'Long-term transformation',
'initiatives': []
}
]
# Assign initiatives to phases
for i, option in enumerate(options[:4]):
if i == 0:
phases[0]['initiatives'].append(option['name'])
elif i == 1:
phases[1]['initiatives'].append(option['name'])
elif i == 2:
phases[2]['initiatives'].append(option['name'])
else:
phases[3]['initiatives'].append(option['name'])
roadmap['phases'] = phases
# Define key milestones
roadmap['milestones'] = [
{'month': 3, 'milestone': 'Complete foundation phase', 'success_criteria': 'Core team hired, processes defined'},
{'month': 6, 'milestone': 'First major initiative launch', 'success_criteria': 'KPIs showing positive trend'},
{'month': 12, 'milestone': 'Strategic review', 'success_criteria': 'ROI demonstrated, strategy validated'},
{'month': 18, 'milestone': 'Scale achievement', 'success_criteria': 'Market position improved, financial targets met'}
]
# Resource requirements
roadmap['resource_requirements'] = {
'leadership': 'C-suite alignment and commitment',
'financial': '$X million investment over 18 months',
'human': 'Additional 20-30 FTEs across functions',
'technology': 'Platform upgrades and new tools',
'external': 'Consultants and advisors as needed'
}
# Success metrics
roadmap['success_metrics'] = [
'Revenue growth: 25% YoY',
'Market share: +5 percentage points',
'EBITDA margin: +8 percentage points',
'Customer NPS: >70',
'Employee engagement: >80%'
]
return roadmap
def _generate_recommendations(self, results: Dict) -> List[str]:
"""Generate strategic recommendations"""
recommendations = []
# Based on overall score
score = results['strategic_health_score']
if score < 40:
recommendations.append('π¨ URGENT: Immediate turnaround required - consider bringing in crisis management team')
recommendations.append('Focus on cash preservation and core business stabilization')
elif score < 60:
recommendations.append('β οΈ Strategic repositioning needed - prioritize 2-3 key initiatives')
recommendations.append('Strengthen weak pillars before pursuing growth')
elif score < 80:
recommendations.append('β Solid position - focus on selective improvements and growth')
recommendations.append('Invest in innovation and market expansion')
else:
recommendations.append('β Excellent position - maintain momentum and explore bold moves')
recommendations.append('Consider industry disruption or category creation')
# Based on specific weaknesses
for pillar, analysis in results['pillar_analysis'].items():
if analysis['score'] < 50:
if pillar == 'market_position':
recommendations.append(f'Strengthen {pillar}: Launch competitive differentiation program')
elif pillar == 'financial_health':
recommendations.append(f'Improve {pillar}: Implement profitability improvement plan')
elif pillar == 'organizational_capability':
recommendations.append(f'Build {pillar}: Invest in talent and culture transformation')
# Based on opportunities
if results['framework_analysis']['porter_analysis']['overall_attractiveness'] > 70:
recommendations.append('Industry is attractive - consider aggressive expansion')
# Risk-based recommendations
if results['risk_assessment']['overall_risk'] > 60:
recommendations.append('High risk profile - implement comprehensive risk management')
return recommendations
def analyze_strategy(company_data: Dict) -> str:
"""Main function to analyze strategy"""
analyzer = StrategyAnalyzer()
results = analyzer.analyze_strategic_position(company_data)
# Format output
output = [
f"=== Strategic Analysis Report ===",
f"Company: {results['company']}",
f"Date: {results['timestamp'][:10]}",
f"",
f"STRATEGIC HEALTH SCORE: {results['strategic_health_score']}/100",
f"",
"Strategic Pillars:"
]
for pillar, analysis in results['pillar_analysis'].items():
output.append(f" {pillar.replace('_', ' ').title()}: {analysis['score']:.1f} ({analysis['level']})")
for factor in analysis['factors'][:2]: # Show top 2 factors
output.append(f" β’ {factor['factor']}: {factor['status']}")
output.extend([
f"",
"Strategic Options:"
])
for i, option in enumerate(results['strategic_options'][:3], 1):
output.append(f"\n{i}. {option['name']} (Priority: {option['priority']}/10)")
output.append(f" Type: {option['type']}")
output.append(f" Investment: {option['investment']}")
output.append(f" Timeframe: {option['timeframe']}")
output.append(f" Impact: {option['expected_impact']}")
output.extend([
f"",
f"Risk Assessment:",
f" Overall Risk: {results['risk_assessment']['overall_risk']:.1f}%",
f" Execution Risk: {results['risk_assessment']['execution_risk']:.1f}%",
f" Market Risk: {results['risk_assessment']['market_risk']:.1f}%",
f" Financial Risk: {results['risk_assessment']['financial_risk']:.1f}%",
f"",
"Strategic Roadmap:"
])
for phase in results['roadmap']['phases'][:3]:
output.append(f" {phase['phase']} ({phase['months']}): {phase['focus']}")
for initiative in phase['initiatives']:
output.append(f" β’ {initiative}")
output.extend([
f"",
"Key Recommendations:"
])
for rec in results['recommendations'][:5]:
output.append(f" β’ {rec}")
return '\n'.join(output)
if __name__ == "__main__":
# Example usage
example_company = {
'name': 'TechCorp Inc.',
'market_position': {
'market_share': 35,
'brand_strength': 65,
'competitive_advantage': 70,
'customer_loyalty': 60
},
'financial_health': {
'revenue_growth': 45,
'profitability': 40,
'cash_flow': 55,
'unit_economics': 60
},
'organizational_capability': {
'talent': 70,
'culture': 65,
'leadership': 75,
'agility': 60
},
'growth_potential': {
'market_size': 80,
'expansion_opportunities': 70,
'product_pipeline': 60,
'partnerships': 55
},
'competitive_forces': {
'rivalry': 70,
'suppliers': 40,
'buyers': 60,
'substitutes': 50,
'new_entrants': 45
},
'context': {
'industry_disruption': True,
'cash_available': 150000000
},
'timeline': 18
}
print(analyze_strategy(example_company))
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/ceo-advisor/scripts/strategy_analyzer.py",
"license": "MIT License",
"lines": 527,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/content-creator/scripts/brand_voice_analyzer.py | #!/usr/bin/env python3
"""
Brand Voice Analyzer - Analyzes content to establish and maintain brand voice consistency
"""
import re
from typing import Dict, List, Tuple
import json
class BrandVoiceAnalyzer:
def __init__(self):
self.voice_dimensions = {
'formality': {
'formal': ['hereby', 'therefore', 'furthermore', 'pursuant', 'regarding'],
'casual': ['hey', 'cool', 'awesome', 'stuff', 'yeah', 'gonna']
},
'tone': {
'professional': ['expertise', 'solution', 'optimize', 'leverage', 'strategic'],
'friendly': ['happy', 'excited', 'love', 'enjoy', 'together', 'share']
},
'perspective': {
'authoritative': ['proven', 'research shows', 'experts agree', 'data indicates'],
'conversational': ['you might', 'let\'s explore', 'we think', 'imagine if']
}
}
def analyze_text(self, text: str) -> Dict:
"""Analyze text for brand voice characteristics"""
text_lower = text.lower()
word_count = len(text.split())
results = {
'word_count': word_count,
'readability_score': self._calculate_readability(text),
'voice_profile': {},
'sentence_analysis': self._analyze_sentences(text),
'recommendations': []
}
# Analyze voice dimensions
for dimension, categories in self.voice_dimensions.items():
dim_scores = {}
for category, keywords in categories.items():
score = sum(1 for keyword in keywords if keyword in text_lower)
dim_scores[category] = score
# Determine dominant voice
if sum(dim_scores.values()) > 0:
dominant = max(dim_scores, key=dim_scores.get)
results['voice_profile'][dimension] = {
'dominant': dominant,
'scores': dim_scores
}
# Generate recommendations
results['recommendations'] = self._generate_recommendations(results)
return results
def _calculate_readability(self, text: str) -> float:
"""Calculate Flesch Reading Ease score"""
sentences = re.split(r'[.!?]+', text)
words = text.split()
syllables = sum(self._count_syllables(word) for word in words)
if len(sentences) == 0 or len(words) == 0:
return 0
avg_sentence_length = len(words) / len(sentences)
avg_syllables_per_word = syllables / len(words)
# Flesch Reading Ease formula
score = 206.835 - 1.015 * avg_sentence_length - 84.6 * avg_syllables_per_word
return max(0, min(100, score))
def _count_syllables(self, word: str) -> int:
"""Count syllables in a word (simplified)"""
word = word.lower()
vowels = 'aeiou'
syllable_count = 0
previous_was_vowel = False
for char in word:
is_vowel = char in vowels
if is_vowel and not previous_was_vowel:
syllable_count += 1
previous_was_vowel = is_vowel
# Adjust for silent e
if word.endswith('e'):
syllable_count -= 1
return max(1, syllable_count)
def _analyze_sentences(self, text: str) -> Dict:
"""Analyze sentence structure"""
sentences = re.split(r'[.!?]+', text)
sentences = [s.strip() for s in sentences if s.strip()]
if not sentences:
return {'average_length': 0, 'variety': 'low'}
lengths = [len(s.split()) for s in sentences]
avg_length = sum(lengths) / len(lengths) if lengths else 0
# Calculate variety
if len(set(lengths)) < 3:
variety = 'low'
elif len(set(lengths)) < 5:
variety = 'medium'
else:
variety = 'high'
return {
'average_length': round(avg_length, 1),
'variety': variety,
'count': len(sentences)
}
def _generate_recommendations(self, analysis: Dict) -> List[str]:
"""Generate recommendations based on analysis"""
recommendations = []
# Readability recommendations
if analysis['readability_score'] < 30:
recommendations.append("Consider simplifying language for better readability")
elif analysis['readability_score'] > 70:
recommendations.append("Content is very easy to read - consider if this matches your audience")
# Sentence variety
if analysis['sentence_analysis']['variety'] == 'low':
recommendations.append("Vary sentence length for better flow and engagement")
# Voice consistency
if analysis['voice_profile']:
recommendations.append("Maintain consistent voice across all content")
return recommendations
def analyze_content(content: str, output_format: str = 'json') -> str:
"""Main function to analyze content"""
analyzer = BrandVoiceAnalyzer()
results = analyzer.analyze_text(content)
if output_format == 'json':
return json.dumps(results, indent=2)
else:
# Human-readable format
output = [
f"=== Brand Voice Analysis ===",
f"Word Count: {results['word_count']}",
f"Readability Score: {results['readability_score']:.1f}/100",
f"",
f"Voice Profile:"
]
for dimension, profile in results['voice_profile'].items():
output.append(f" {dimension.title()}: {profile['dominant']}")
output.extend([
f"",
f"Sentence Analysis:",
f" Average Length: {results['sentence_analysis']['average_length']} words",
f" Variety: {results['sentence_analysis']['variety']}",
f" Total Sentences: {results['sentence_analysis']['count']}",
f"",
f"Recommendations:"
])
for rec in results['recommendations']:
output.append(f" β’ {rec}")
return '\n'.join(output)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f:
content = f.read()
output_format = sys.argv[2] if len(sys.argv) > 2 else 'text'
print(analyze_content(content, output_format))
else:
print("Usage: python brand_voice_analyzer.py <file> [json|text]")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/content-creator/scripts/brand_voice_analyzer.py",
"license": "MIT License",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/content-creator/scripts/seo_optimizer.py | #!/usr/bin/env python3
"""
SEO Content Optimizer - Analyzes and optimizes content for SEO
"""
import re
from typing import Dict, List, Set
import json
class SEOOptimizer:
def __init__(self):
# Common stop words to filter
self.stop_words = {
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
'of', 'with', 'by', 'from', 'as', 'is', 'was', 'are', 'were', 'be',
'been', 'being', 'have', 'has', 'had', 'do', 'does', 'did', 'will',
'would', 'could', 'should', 'may', 'might', 'must', 'can', 'shall'
}
# SEO best practices
self.best_practices = {
'title_length': (50, 60),
'meta_description_length': (150, 160),
'url_length': (50, 60),
'paragraph_length': (40, 150),
'heading_keyword_placement': True,
'keyword_density': (0.01, 0.03) # 1-3%
}
def analyze(self, content: str, target_keyword: str = None,
secondary_keywords: List[str] = None) -> Dict:
"""Analyze content for SEO optimization"""
analysis = {
'content_length': len(content.split()),
'keyword_analysis': {},
'structure_analysis': self._analyze_structure(content),
'readability': self._analyze_readability(content),
'meta_suggestions': {},
'optimization_score': 0,
'recommendations': []
}
# Keyword analysis
if target_keyword:
analysis['keyword_analysis'] = self._analyze_keywords(
content, target_keyword, secondary_keywords or []
)
# Generate meta suggestions
analysis['meta_suggestions'] = self._generate_meta_suggestions(
content, target_keyword
)
# Calculate optimization score
analysis['optimization_score'] = self._calculate_seo_score(analysis)
# Generate recommendations
analysis['recommendations'] = self._generate_recommendations(analysis)
return analysis
def _analyze_keywords(self, content: str, primary: str,
secondary: List[str]) -> Dict:
"""Analyze keyword usage and density"""
content_lower = content.lower()
word_count = len(content.split())
results = {
'primary_keyword': {
'keyword': primary,
'count': content_lower.count(primary.lower()),
'density': 0,
'in_title': False,
'in_headings': False,
'in_first_paragraph': False
},
'secondary_keywords': [],
'lsi_keywords': []
}
# Calculate primary keyword metrics
if word_count > 0:
results['primary_keyword']['density'] = (
results['primary_keyword']['count'] / word_count
)
# Check keyword placement
first_para = content.split('\n\n')[0] if '\n\n' in content else content[:200]
results['primary_keyword']['in_first_paragraph'] = (
primary.lower() in first_para.lower()
)
# Analyze secondary keywords
for keyword in secondary:
count = content_lower.count(keyword.lower())
results['secondary_keywords'].append({
'keyword': keyword,
'count': count,
'density': count / word_count if word_count > 0 else 0
})
# Extract potential LSI keywords
results['lsi_keywords'] = self._extract_lsi_keywords(content, primary)
return results
def _analyze_structure(self, content: str) -> Dict:
"""Analyze content structure for SEO"""
lines = content.split('\n')
structure = {
'headings': {'h1': 0, 'h2': 0, 'h3': 0, 'total': 0},
'paragraphs': 0,
'lists': 0,
'images': 0,
'links': {'internal': 0, 'external': 0},
'avg_paragraph_length': 0
}
paragraphs = []
current_para = []
for line in lines:
# Count headings
if line.startswith('# '):
structure['headings']['h1'] += 1
structure['headings']['total'] += 1
elif line.startswith('## '):
structure['headings']['h2'] += 1
structure['headings']['total'] += 1
elif line.startswith('### '):
structure['headings']['h3'] += 1
structure['headings']['total'] += 1
# Count lists
if line.strip().startswith(('- ', '* ', '1. ')):
structure['lists'] += 1
# Count links
internal_links = len(re.findall(r'\[.*?\]\(/.*?\)', line))
external_links = len(re.findall(r'\[.*?\]\(https?://.*?\)', line))
structure['links']['internal'] += internal_links
structure['links']['external'] += external_links
# Track paragraphs
if line.strip() and not line.startswith('#'):
current_para.append(line)
elif current_para:
paragraphs.append(' '.join(current_para))
current_para = []
if current_para:
paragraphs.append(' '.join(current_para))
structure['paragraphs'] = len(paragraphs)
if paragraphs:
avg_length = sum(len(p.split()) for p in paragraphs) / len(paragraphs)
structure['avg_paragraph_length'] = round(avg_length, 1)
return structure
def _analyze_readability(self, content: str) -> Dict:
"""Analyze content readability"""
sentences = re.split(r'[.!?]+', content)
words = content.split()
if not sentences or not words:
return {'score': 0, 'level': 'Unknown'}
avg_sentence_length = len(words) / len(sentences)
# Simple readability scoring
if avg_sentence_length < 15:
level = 'Easy'
score = 90
elif avg_sentence_length < 20:
level = 'Moderate'
score = 70
elif avg_sentence_length < 25:
level = 'Difficult'
score = 50
else:
level = 'Very Difficult'
score = 30
return {
'score': score,
'level': level,
'avg_sentence_length': round(avg_sentence_length, 1)
}
def _extract_lsi_keywords(self, content: str, primary_keyword: str) -> List[str]:
"""Extract potential LSI (semantically related) keywords"""
words = re.findall(r'\b[a-z]+\b', content.lower())
word_freq = {}
# Count word frequencies
for word in words:
if word not in self.stop_words and len(word) > 3:
word_freq[word] = word_freq.get(word, 0) + 1
# Sort by frequency and return top related terms
sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
# Filter out the primary keyword and return top 10
lsi_keywords = []
for word, count in sorted_words:
if word != primary_keyword.lower() and count > 1:
lsi_keywords.append(word)
if len(lsi_keywords) >= 10:
break
return lsi_keywords
def _generate_meta_suggestions(self, content: str, keyword: str = None) -> Dict:
"""Generate SEO meta tag suggestions"""
# Extract first sentence for description base
sentences = re.split(r'[.!?]+', content)
first_sentence = sentences[0] if sentences else content[:160]
suggestions = {
'title': '',
'meta_description': '',
'url_slug': '',
'og_title': '',
'og_description': ''
}
if keyword:
# Title suggestion
suggestions['title'] = f"{keyword.title()} - Complete Guide"
if len(suggestions['title']) > 60:
suggestions['title'] = keyword.title()[:57] + "..."
# Meta description
desc_base = f"Learn everything about {keyword}. {first_sentence}"
if len(desc_base) > 160:
desc_base = desc_base[:157] + "..."
suggestions['meta_description'] = desc_base
# URL slug
suggestions['url_slug'] = re.sub(r'[^a-z0-9-]+', '-',
keyword.lower()).strip('-')
# Open Graph tags
suggestions['og_title'] = suggestions['title']
suggestions['og_description'] = suggestions['meta_description']
return suggestions
def _calculate_seo_score(self, analysis: Dict) -> int:
"""Calculate overall SEO optimization score"""
score = 0
max_score = 100
# Content length scoring (20 points)
if 300 <= analysis['content_length'] <= 2500:
score += 20
elif 200 <= analysis['content_length'] < 300:
score += 10
elif analysis['content_length'] > 2500:
score += 15
# Keyword optimization (30 points)
if analysis['keyword_analysis']:
kw_data = analysis['keyword_analysis']['primary_keyword']
# Density scoring
if 0.01 <= kw_data['density'] <= 0.03:
score += 15
elif 0.005 <= kw_data['density'] < 0.01:
score += 8
# Placement scoring
if kw_data['in_first_paragraph']:
score += 10
if kw_data.get('in_headings'):
score += 5
# Structure scoring (25 points)
struct = analysis['structure_analysis']
if struct['headings']['total'] > 0:
score += 10
if struct['paragraphs'] >= 3:
score += 10
if struct['links']['internal'] > 0 or struct['links']['external'] > 0:
score += 5
# Readability scoring (25 points)
readability_score = analysis['readability']['score']
score += int(readability_score * 0.25)
return min(score, max_score)
def _generate_recommendations(self, analysis: Dict) -> List[str]:
"""Generate SEO improvement recommendations"""
recommendations = []
# Content length recommendations
if analysis['content_length'] < 300:
recommendations.append(
f"Increase content length to at least 300 words (currently {analysis['content_length']})"
)
elif analysis['content_length'] > 3000:
recommendations.append(
"Consider breaking long content into multiple pages or adding a table of contents"
)
# Keyword recommendations
if analysis['keyword_analysis']:
kw_data = analysis['keyword_analysis']['primary_keyword']
if kw_data['density'] < 0.01:
recommendations.append(
f"Increase keyword density for '{kw_data['keyword']}' (currently {kw_data['density']:.2%})"
)
elif kw_data['density'] > 0.03:
recommendations.append(
f"Reduce keyword density to avoid over-optimization (currently {kw_data['density']:.2%})"
)
if not kw_data['in_first_paragraph']:
recommendations.append(
"Include primary keyword in the first paragraph"
)
# Structure recommendations
struct = analysis['structure_analysis']
if struct['headings']['total'] == 0:
recommendations.append("Add headings (H1, H2, H3) to improve content structure")
if struct['links']['internal'] == 0:
recommendations.append("Add internal links to related content")
if struct['avg_paragraph_length'] > 150:
recommendations.append("Break up long paragraphs for better readability")
# Readability recommendations
if analysis['readability']['avg_sentence_length'] > 20:
recommendations.append("Simplify sentences for better readability")
return recommendations
def optimize_content(content: str, keyword: str = None,
secondary_keywords: List[str] = None) -> str:
"""Main function to optimize content"""
optimizer = SEOOptimizer()
# Parse secondary keywords from comma-separated string if provided
if secondary_keywords and isinstance(secondary_keywords, str):
secondary_keywords = [kw.strip() for kw in secondary_keywords.split(',')]
results = optimizer.analyze(content, keyword, secondary_keywords)
# Format output
output = [
"=== SEO Content Analysis ===",
f"Overall SEO Score: {results['optimization_score']}/100",
f"Content Length: {results['content_length']} words",
f"",
"Content Structure:",
f" Headings: {results['structure_analysis']['headings']['total']}",
f" Paragraphs: {results['structure_analysis']['paragraphs']}",
f" Avg Paragraph Length: {results['structure_analysis']['avg_paragraph_length']} words",
f" Internal Links: {results['structure_analysis']['links']['internal']}",
f" External Links: {results['structure_analysis']['links']['external']}",
f"",
f"Readability: {results['readability']['level']} (Score: {results['readability']['score']})",
f""
]
if results['keyword_analysis']:
kw = results['keyword_analysis']['primary_keyword']
output.extend([
"Keyword Analysis:",
f" Primary Keyword: {kw['keyword']}",
f" Count: {kw['count']}",
f" Density: {kw['density']:.2%}",
f" In First Paragraph: {'Yes' if kw['in_first_paragraph'] else 'No'}",
f""
])
if results['keyword_analysis']['lsi_keywords']:
output.append(" Related Keywords Found:")
for lsi in results['keyword_analysis']['lsi_keywords'][:5]:
output.append(f" β’ {lsi}")
output.append("")
if results['meta_suggestions']:
output.extend([
"Meta Tag Suggestions:",
f" Title: {results['meta_suggestions']['title']}",
f" Description: {results['meta_suggestions']['meta_description']}",
f" URL Slug: {results['meta_suggestions']['url_slug']}",
f""
])
output.extend([
"Recommendations:",
])
for rec in results['recommendations']:
output.append(f" β’ {rec}")
return '\n'.join(output)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f:
content = f.read()
keyword = sys.argv[2] if len(sys.argv) > 2 else None
secondary = sys.argv[3] if len(sys.argv) > 3 else None
print(optimize_content(content, keyword, secondary))
else:
print("Usage: python seo_optimizer.py <file> [primary_keyword] [secondary_keywords]")
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/content-creator/scripts/seo_optimizer.py",
"license": "MIT License",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/cto-advisor/scripts/team_scaling_calculator.py | #!/usr/bin/env python3
"""
Engineering Team Scaling Calculator - Optimize team growth and structure
"""
import json
import math
from typing import Dict, List, Tuple
class TeamScalingCalculator:
def __init__(self):
self.conway_factor = 1.5 # Conway's Law impact factor
self.brooks_factor = 0.75 # Brooks' Law diminishing returns
# Optimal team structures based on size
self.team_structures = {
'startup': {'min': 1, 'max': 10, 'structure': 'flat'},
'growth': {'min': 11, 'max': 50, 'structure': 'team_leads'},
'scale': {'min': 51, 'max': 150, 'structure': 'departments'},
'enterprise': {'min': 151, 'max': 9999, 'structure': 'divisions'}
}
# Role ratios for balanced teams
self.role_ratios = {
'engineering_manager': 0.125, # 1:8 ratio
'tech_lead': 0.167, # 1:6 ratio
'senior_engineer': 0.3,
'mid_engineer': 0.4,
'junior_engineer': 0.2,
'devops': 0.1,
'qa': 0.15,
'product_manager': 0.1,
'designer': 0.08,
'data_engineer': 0.05
}
def calculate_scaling_plan(self, current_state: Dict, growth_targets: Dict) -> Dict:
"""Calculate optimal scaling plan"""
results = {
'current_analysis': self._analyze_current_state(current_state),
'growth_timeline': self._create_growth_timeline(current_state, growth_targets),
'hiring_plan': {},
'team_structure': {},
'budget_projection': {},
'risk_factors': [],
'recommendations': []
}
# Generate hiring plan
results['hiring_plan'] = self._generate_hiring_plan(
current_state,
growth_targets
)
# Design team structure
results['team_structure'] = self._design_team_structure(
growth_targets['target_headcount']
)
# Calculate budget
results['budget_projection'] = self._calculate_budget(
results['hiring_plan'],
current_state.get('location', 'US')
)
# Assess risks
results['risk_factors'] = self._assess_scaling_risks(
current_state,
growth_targets
)
# Generate recommendations
results['recommendations'] = self._generate_recommendations(results)
return results
def _analyze_current_state(self, current_state: Dict) -> Dict:
"""Analyze current team state"""
total_engineers = current_state.get('headcount', 0)
analysis = {
'total_headcount': total_engineers,
'team_stage': self._get_team_stage(total_engineers),
'productivity_index': 0,
'balance_score': 0,
'issues': []
}
# Calculate productivity index
if total_engineers > 0:
velocity = current_state.get('velocity', 100)
expected_velocity = total_engineers * 20 # baseline 20 points per engineer
analysis['productivity_index'] = (velocity / expected_velocity) * 100
# Check team balance
roles = current_state.get('roles', {})
analysis['balance_score'] = self._calculate_balance_score(roles, total_engineers)
# Identify issues
if analysis['productivity_index'] < 70:
analysis['issues'].append('Low productivity - possible process or tooling issues')
if analysis['balance_score'] < 60:
analysis['issues'].append('Team imbalance - review role distribution')
manager_ratio = roles.get('managers', 0) / max(total_engineers, 1)
if manager_ratio > 0.2:
analysis['issues'].append('Over-managed - too many managers')
elif manager_ratio < 0.08 and total_engineers > 20:
analysis['issues'].append('Under-managed - need more engineering managers')
return analysis
def _get_team_stage(self, headcount: int) -> str:
"""Determine team stage based on size"""
for stage, config in self.team_structures.items():
if config['min'] <= headcount <= config['max']:
return stage
return 'startup'
def _calculate_balance_score(self, roles: Dict, total: int) -> float:
"""Calculate team balance score"""
if total == 0:
return 0
score = 100
ideal_ratios = self.role_ratios
for role, ideal_ratio in ideal_ratios.items():
actual_count = roles.get(role, 0)
actual_ratio = actual_count / total
# Penalize deviation from ideal ratio
deviation = abs(actual_ratio - ideal_ratio)
penalty = deviation * 100
score -= min(penalty, 20) # Max 20 point penalty per role
return max(0, score)
def _create_growth_timeline(self, current: Dict, targets: Dict) -> List[Dict]:
"""Create quarterly growth timeline"""
current_headcount = current.get('headcount', 0)
target_headcount = targets.get('target_headcount', current_headcount)
timeline_quarters = targets.get('timeline_quarters', 4)
growth_needed = target_headcount - current_headcount
timeline = []
for quarter in range(1, timeline_quarters + 1):
# Apply Brooks' Law - diminishing returns with rapid growth
if quarter == 1:
quarterly_growth = math.ceil(growth_needed * 0.4) # Front-load hiring
else:
remaining_growth = target_headcount - current_headcount
quarters_left = timeline_quarters - quarter + 1
quarterly_growth = math.ceil(remaining_growth / quarters_left)
# Adjust for onboarding capacity
max_onboarding = math.ceil(current_headcount * 0.25) # 25% growth per quarter max
quarterly_growth = min(quarterly_growth, max_onboarding)
current_headcount += quarterly_growth
timeline.append({
'quarter': f'Q{quarter}',
'headcount': current_headcount,
'new_hires': quarterly_growth,
'onboarding_capacity': max_onboarding,
'productivity_factor': 1.0 - (0.2 * (quarterly_growth / max(current_headcount, 1)))
})
return timeline
def _generate_hiring_plan(self, current: Dict, targets: Dict) -> Dict:
"""Generate detailed hiring plan"""
current_roles = current.get('roles', {})
target_headcount = targets.get('target_headcount', 0)
hiring_plan = {
'total_hires_needed': target_headcount - current.get('headcount', 0),
'by_role': {},
'by_quarter': {},
'interview_capacity_needed': 0,
'recruiting_resources': 0
}
# Calculate ideal role distribution
for role, ideal_ratio in self.role_ratios.items():
ideal_count = math.ceil(target_headcount * ideal_ratio)
current_count = current_roles.get(role, 0)
hires_needed = max(0, ideal_count - current_count)
if hires_needed > 0:
hiring_plan['by_role'][role] = {
'current': current_count,
'target': ideal_count,
'hires_needed': hires_needed,
'priority': self._get_role_priority(role, current_roles, target_headcount)
}
# Distribute hires across quarters
timeline = self._create_growth_timeline(current, targets)
for quarter_data in timeline:
quarter = quarter_data['quarter']
hires = quarter_data['new_hires']
hiring_plan['by_quarter'][quarter] = {
'total_hires': hires,
'breakdown': self._distribute_quarterly_hires(hires, hiring_plan['by_role'])
}
# Calculate interview capacity (5 interviews per hire average)
hiring_plan['interview_capacity_needed'] = hiring_plan['total_hires_needed'] * 5
# Calculate recruiting resources (1 recruiter per 50 hires/year)
annual_hires = hiring_plan['total_hires_needed'] * (4 / max(targets.get('timeline_quarters', 4), 1))
hiring_plan['recruiting_resources'] = math.ceil(annual_hires / 50)
return hiring_plan
def _get_role_priority(self, role: str, current_roles: Dict, target_size: int) -> int:
"""Determine hiring priority for a role"""
# Priority based on criticality and current gaps
priorities = {
'engineering_manager': 10 if target_size > 20 else 5,
'tech_lead': 9,
'senior_engineer': 8,
'devops': 7 if current_roles.get('devops', 0) == 0 else 5,
'qa': 6,
'mid_engineer': 5,
'product_manager': 6,
'designer': 5,
'data_engineer': 4,
'junior_engineer': 3
}
return priorities.get(role, 5)
def _distribute_quarterly_hires(self, total_hires: int, role_needs: Dict) -> Dict:
"""Distribute quarterly hires across roles"""
distribution = {}
# Sort roles by priority
sorted_roles = sorted(
role_needs.items(),
key=lambda x: x[1]['priority'],
reverse=True
)
remaining_hires = total_hires
for role, needs in sorted_roles:
if remaining_hires <= 0:
break
hires = min(needs['hires_needed'], max(1, remaining_hires // 3))
distribution[role] = hires
remaining_hires -= hires
return distribution
def _design_team_structure(self, target_headcount: int) -> Dict:
"""Design optimal team structure"""
stage = self._get_team_stage(target_headcount)
structure = {
'organizational_model': self.team_structures[stage]['structure'],
'teams': [],
'reporting_structure': {},
'communication_paths': 0
}
if stage == 'startup':
structure['teams'] = [{
'name': 'Core Team',
'size': target_headcount,
'focus': 'Full-stack'
}]
elif stage == 'growth':
# Create 2-4 teams
team_size = 6
num_teams = math.ceil(target_headcount / team_size)
structure['teams'] = [
{
'name': f'Team {i+1}',
'size': team_size,
'focus': ['Platform', 'Product', 'Infrastructure', 'Growth'][i % 4]
}
for i in range(num_teams)
]
elif stage == 'scale':
# Create departments with multiple teams
structure['departments'] = [
{'name': 'Platform', 'teams': 3, 'headcount': target_headcount * 0.3},
{'name': 'Product', 'teams': 4, 'headcount': target_headcount * 0.4},
{'name': 'Infrastructure', 'teams': 2, 'headcount': target_headcount * 0.2},
{'name': 'Data', 'teams': 1, 'headcount': target_headcount * 0.1}
]
# Calculate communication paths (n*(n-1)/2)
structure['communication_paths'] = (target_headcount * (target_headcount - 1)) // 2
# Add management layers
structure['management_layers'] = math.ceil(math.log(target_headcount, 7))
return structure
def _calculate_budget(self, hiring_plan: Dict, location: str) -> Dict:
"""Calculate budget projection"""
# Average salaries by role and location (in USD)
salary_bands = {
'US': {
'engineering_manager': 200000,
'tech_lead': 180000,
'senior_engineer': 160000,
'mid_engineer': 120000,
'junior_engineer': 85000,
'devops': 150000,
'qa': 100000,
'product_manager': 150000,
'designer': 120000,
'data_engineer': 140000
},
'EU': {k: v * 0.8 for k, v in salary_bands.get('US', {}).items()},
'APAC': {k: v * 0.6 for k, v in salary_bands.get('US', {}).items()}
}
location_salaries = salary_bands.get(location, salary_bands['US'])
budget = {
'annual_salary_cost': 0,
'benefits_cost': 0, # 30% of salary
'equipment_cost': 0, # $5k per hire
'recruiting_cost': 0, # 20% of first-year salary
'onboarding_cost': 0, # $10k per hire
'total_cost': 0,
'cost_per_hire': 0
}
for role, details in hiring_plan['by_role'].items():
hires = details['hires_needed']
salary = location_salaries.get(role, 100000)
budget['annual_salary_cost'] += hires * salary
budget['recruiting_cost'] += hires * salary * 0.2
budget['benefits_cost'] = budget['annual_salary_cost'] * 0.3
budget['equipment_cost'] = hiring_plan['total_hires_needed'] * 5000
budget['onboarding_cost'] = hiring_plan['total_hires_needed'] * 10000
budget['total_cost'] = sum([
budget['annual_salary_cost'],
budget['benefits_cost'],
budget['equipment_cost'],
budget['recruiting_cost'],
budget['onboarding_cost']
])
if hiring_plan['total_hires_needed'] > 0:
budget['cost_per_hire'] = budget['total_cost'] / hiring_plan['total_hires_needed']
return budget
def _assess_scaling_risks(self, current: Dict, targets: Dict) -> List[Dict]:
"""Assess risks in scaling plan"""
risks = []
growth_rate = (targets['target_headcount'] - current['headcount']) / max(current['headcount'], 1)
if growth_rate > 1.0: # More than 100% growth
risks.append({
'risk': 'Rapid growth dilution',
'impact': 'High',
'mitigation': 'Implement strong onboarding and mentorship programs'
})
if current.get('attrition_rate', 0) > 15:
risks.append({
'risk': 'High attrition during scaling',
'impact': 'High',
'mitigation': 'Address retention issues before aggressive hiring'
})
if targets.get('timeline_quarters', 4) < 4:
risks.append({
'risk': 'Compressed timeline',
'impact': 'Medium',
'mitigation': 'Consider extending timeline or increasing recruiting resources'
})
return risks
def _generate_recommendations(self, results: Dict) -> List[str]:
"""Generate scaling recommendations"""
recommendations = []
# Based on growth rate
total_hires = results['hiring_plan']['total_hires_needed']
current_size = results['current_analysis']['total_headcount']
if current_size > 0:
growth_rate = total_hires / current_size
if growth_rate > 0.5:
recommendations.append('Consider hiring a dedicated recruiting team')
recommendations.append('Implement scalable onboarding processes')
recommendations.append('Establish clear team charters and boundaries')
if growth_rate > 1.0:
recommendations.append('β οΈ High growth risk - consider slowing timeline')
recommendations.append('Focus on senior hires first to establish culture')
recommendations.append('Implement continuous integration practices early')
# Based on structure
if results['team_structure']['communication_paths'] > 1000:
recommendations.append('Implement clear communication channels and tools')
recommendations.append('Consider platform teams to reduce dependencies')
# Based on balance
if results['current_analysis']['balance_score'] < 70:
recommendations.append('Prioritize hiring for underrepresented roles')
recommendations.append('Consider role rotation for skill development')
return recommendations
def calculate_team_scaling(current_state: Dict, growth_targets: Dict) -> str:
"""Main function to calculate team scaling"""
calculator = TeamScalingCalculator()
results = calculator.calculate_scaling_plan(current_state, growth_targets)
# Format output
output = [
"=== Engineering Team Scaling Plan ===",
f"",
f"Current State Analysis:",
f" Current Headcount: {results['current_analysis']['total_headcount']}",
f" Team Stage: {results['current_analysis']['team_stage']}",
f" Productivity Index: {results['current_analysis']['productivity_index']:.1f}%",
f" Team Balance Score: {results['current_analysis']['balance_score']:.1f}/100",
f"",
f"Growth Plan:",
f" Target Headcount: {growth_targets['target_headcount']}",
f" Total Hires Needed: {results['hiring_plan']['total_hires_needed']}",
f" Timeline: {growth_targets['timeline_quarters']} quarters",
f"",
"Quarterly Timeline:"
]
for quarter in results['growth_timeline']:
output.append(
f" {quarter['quarter']}: {quarter['headcount']} total "
f"(+{quarter['new_hires']} hires, "
f"{quarter['productivity_factor']:.0%} productivity)"
)
output.extend([
f"",
"Hiring Priorities:"
])
sorted_roles = sorted(
results['hiring_plan']['by_role'].items(),
key=lambda x: x[1]['priority'],
reverse=True
)
for role, details in sorted_roles[:5]:
output.append(
f" {role}: {details['hires_needed']} hires "
f"(Priority: {details['priority']}/10)"
)
output.extend([
f"",
f"Budget Projection:",
f" Annual Salary Cost: ${results['budget_projection']['annual_salary_cost']:,.0f}",
f" Total Investment: ${results['budget_projection']['total_cost']:,.0f}",
f" Cost per Hire: ${results['budget_projection']['cost_per_hire']:,.0f}",
f"",
f"Team Structure:",
f" Model: {results['team_structure']['organizational_model']}",
f" Management Layers: {results['team_structure']['management_layers']}",
f" Communication Paths: {results['team_structure']['communication_paths']:,}",
f"",
"Key Recommendations:"
])
for rec in results['recommendations']:
output.append(f" β’ {rec}")
return '\n'.join(output)
if __name__ == "__main__":
# Example usage
example_current = {
'headcount': 25,
'velocity': 450,
'roles': {
'engineering_manager': 2,
'tech_lead': 3,
'senior_engineer': 8,
'mid_engineer': 10,
'junior_engineer': 2
},
'attrition_rate': 12,
'location': 'US'
}
example_targets = {
'target_headcount': 75,
'timeline_quarters': 4
}
print(calculate_team_scaling(example_current, example_targets))
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/cto-advisor/scripts/team_scaling_calculator.py",
"license": "MIT License",
"lines": 424,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/cto-advisor/scripts/tech_debt_analyzer.py | #!/usr/bin/env python3
"""
Technical Debt Analyzer - Assess and prioritize technical debt across systems
"""
import json
from typing import Dict, List, Tuple
from datetime import datetime
import math
class TechDebtAnalyzer:
def __init__(self):
self.debt_categories = {
'architecture': {
'weight': 0.25,
'indicators': [
'monolithic_design', 'tight_coupling', 'no_microservices',
'legacy_patterns', 'no_api_gateway', 'synchronous_only'
]
},
'code_quality': {
'weight': 0.20,
'indicators': [
'low_test_coverage', 'high_complexity', 'code_duplication',
'no_documentation', 'inconsistent_standards', 'legacy_language'
]
},
'infrastructure': {
'weight': 0.20,
'indicators': [
'manual_deployments', 'no_ci_cd', 'single_points_failure',
'no_monitoring', 'no_auto_scaling', 'outdated_servers'
]
},
'security': {
'weight': 0.20,
'indicators': [
'outdated_dependencies', 'no_security_scans', 'plain_text_secrets',
'no_encryption', 'missing_auth', 'no_audit_logs'
]
},
'performance': {
'weight': 0.15,
'indicators': [
'slow_response_times', 'no_caching', 'inefficient_queries',
'memory_leaks', 'no_optimization', 'blocking_operations'
]
}
}
self.impact_matrix = {
'user_impact': {'weight': 0.30, 'score': 0},
'developer_velocity': {'weight': 0.25, 'score': 0},
'system_reliability': {'weight': 0.20, 'score': 0},
'scalability': {'weight': 0.15, 'score': 0},
'maintenance_cost': {'weight': 0.10, 'score': 0}
}
def analyze_system(self, system_data: Dict) -> Dict:
"""Analyze a system for technical debt"""
results = {
'timestamp': datetime.now().isoformat(),
'system_name': system_data.get('name', 'Unknown'),
'debt_score': 0,
'debt_level': '',
'category_scores': {},
'prioritized_actions': [],
'estimated_effort': {},
'risk_assessment': {},
'recommendations': []
}
# Calculate debt scores by category
total_debt_score = 0
for category, config in self.debt_categories.items():
category_score = self._calculate_category_score(
system_data.get(category, {}),
config['indicators']
)
weighted_score = category_score * config['weight']
results['category_scores'][category] = {
'raw_score': category_score,
'weighted_score': weighted_score,
'level': self._get_level(category_score)
}
total_debt_score += weighted_score
results['debt_score'] = round(total_debt_score, 2)
results['debt_level'] = self._get_level(total_debt_score)
# Calculate impact and prioritize
results['prioritized_actions'] = self._prioritize_actions(
results['category_scores'],
system_data.get('business_context', {})
)
# Estimate effort
results['estimated_effort'] = self._estimate_effort(
results['prioritized_actions'],
system_data.get('team_size', 5)
)
# Risk assessment
results['risk_assessment'] = self._assess_risks(
results['debt_score'],
system_data.get('system_criticality', 'medium')
)
# Generate recommendations
results['recommendations'] = self._generate_recommendations(results)
return results
def _calculate_category_score(self, category_data: Dict, indicators: List) -> float:
"""Calculate score for a specific category"""
if not category_data:
return 50.0 # Default middle score if no data
total_score = 0
count = 0
for indicator in indicators:
if indicator in category_data:
# Score from 0 (no debt) to 100 (high debt)
total_score += category_data[indicator]
count += 1
return (total_score / count) if count > 0 else 50.0
def _get_level(self, score: float) -> str:
"""Convert numerical score to level"""
if score < 20:
return 'Low'
elif score < 40:
return 'Medium-Low'
elif score < 60:
return 'Medium'
elif score < 80:
return 'Medium-High'
else:
return 'Critical'
def _prioritize_actions(self, category_scores: Dict, business_context: Dict) -> List:
"""Prioritize technical debt reduction actions"""
actions = []
for category, scores in category_scores.items():
if scores['raw_score'] > 60: # Focus on high debt areas
priority = self._calculate_priority(
scores['raw_score'],
category,
business_context
)
action = {
'category': category,
'priority': priority,
'score': scores['raw_score'],
'action_items': self._get_action_items(category, scores['level'])
}
actions.append(action)
# Sort by priority
actions.sort(key=lambda x: x['priority'], reverse=True)
return actions[:5] # Top 5 priorities
def _calculate_priority(self, score: float, category: str, context: Dict) -> float:
"""Calculate priority based on score and business context"""
base_priority = score
# Adjust based on business context
if context.get('growth_phase') == 'rapid' and category in ['scalability', 'performance']:
base_priority *= 1.5
if context.get('compliance_required') and category == 'security':
base_priority *= 2.0
if context.get('cost_pressure') and category == 'infrastructure':
base_priority *= 1.3
return min(100, base_priority)
def _get_action_items(self, category: str, level: str) -> List[str]:
"""Get specific action items based on category and level"""
actions = {
'architecture': {
'Critical': [
'Immediate: Create architecture migration roadmap',
'Week 1: Identify service boundaries for decomposition',
'Month 1: Begin extracting first microservice',
'Month 2: Implement API gateway',
'Quarter: Complete critical service separation'
],
'Medium-High': [
'Month 1: Document current architecture',
'Month 2: Design target architecture',
'Quarter: Begin gradual migration',
'Monitor: Track coupling metrics'
]
},
'code_quality': {
'Critical': [
'Immediate: Implement code quality gates',
'Week 1: Set up automated testing pipeline',
'Month 1: Achieve 40% test coverage',
'Month 2: Refactor critical modules',
'Quarter: Reach 70% test coverage'
],
'Medium-High': [
'Month 1: Establish coding standards',
'Month 2: Implement code review process',
'Quarter: Gradual refactoring plan'
]
},
'infrastructure': {
'Critical': [
'Immediate: Implement basic CI/CD',
'Week 1: Set up monitoring and alerts',
'Month 1: Automate critical deployments',
'Month 2: Implement disaster recovery',
'Quarter: Full infrastructure as code'
],
'Medium-High': [
'Month 1: Document infrastructure',
'Month 2: Begin automation',
'Quarter: Modernize critical components'
]
},
'security': {
'Critical': [
'Immediate: Security audit and patching',
'Week 1: Implement secrets management',
'Month 1: Set up vulnerability scanning',
'Month 2: Implement security training',
'Quarter: Achieve compliance standards'
],
'Medium-High': [
'Month 1: Security assessment',
'Month 2: Implement security tools',
'Quarter: Regular security reviews'
]
},
'performance': {
'Critical': [
'Immediate: Performance profiling',
'Week 1: Implement caching strategy',
'Month 1: Optimize database queries',
'Month 2: Implement CDN',
'Quarter: Re-architect bottlenecks'
],
'Medium-High': [
'Month 1: Performance baseline',
'Month 2: Optimization plan',
'Quarter: Incremental improvements'
]
}
}
return actions.get(category, {}).get(level, ['Create action plan'])
def _estimate_effort(self, actions: List, team_size: int) -> Dict:
"""Estimate effort required for debt reduction"""
total_story_points = 0
effort_breakdown = {}
for action in actions:
# Estimate based on category and score
base_points = action['score'] * 2 # Higher debt = more effort
if action['category'] == 'architecture':
points = base_points * 1.5 # Architecture changes are complex
elif action['category'] == 'security':
points = base_points * 1.2 # Security requires careful work
else:
points = base_points
effort_breakdown[action['category']] = {
'story_points': round(points),
'sprints': math.ceil(points / (team_size * 20)), # 20 points per dev per sprint
'developers_needed': math.ceil(points / 100)
}
total_story_points += points
return {
'total_story_points': round(total_story_points),
'estimated_sprints': math.ceil(total_story_points / (team_size * 20)),
'recommended_team_size': max(team_size, math.ceil(total_story_points / 200)),
'breakdown': effort_breakdown
}
def _assess_risks(self, debt_score: float, criticality: str) -> Dict:
"""Assess risks associated with technical debt"""
risk_level = 'Low'
if debt_score > 70 and criticality == 'high':
risk_level = 'Critical'
elif debt_score > 60 or criticality == 'high':
risk_level = 'High'
elif debt_score > 40:
risk_level = 'Medium'
risks = {
'overall_risk': risk_level,
'specific_risks': []
}
if debt_score > 60:
risks['specific_risks'].extend([
'System failure risk increasing',
'Developer productivity declining',
'Innovation velocity blocked',
'Maintenance costs escalating'
])
if debt_score > 80:
risks['specific_risks'].extend([
'Competitive disadvantage emerging',
'Talent retention risk',
'Customer satisfaction impact',
'Potential data breach vulnerability'
])
return risks
def _generate_recommendations(self, results: Dict) -> List[str]:
"""Generate strategic recommendations"""
recommendations = []
# Overall strategy based on debt level
if results['debt_level'] == 'Critical':
recommendations.append('π¨ URGENT: Dedicate 40% of engineering capacity to debt reduction')
recommendations.append('Create dedicated debt reduction team')
recommendations.append('Implement weekly debt reduction reviews')
recommendations.append('Consider temporary feature freeze')
elif results['debt_level'] in ['Medium-High', 'High']:
recommendations.append('Allocate 25-30% of sprints to debt reduction')
recommendations.append('Establish technical debt budget')
recommendations.append('Implement debt prevention practices')
else:
recommendations.append('Maintain 15-20% ongoing debt reduction allocation')
recommendations.append('Focus on prevention over correction')
# Category-specific recommendations
for category, scores in results['category_scores'].items():
if scores['raw_score'] > 70:
if category == 'architecture':
recommendations.append(f'Consider hiring architecture specialist')
elif category == 'security':
recommendations.append(f'Engage security audit firm')
elif category == 'performance':
recommendations.append(f'Implement performance SLA monitoring')
# Team recommendations
effort = results.get('estimated_effort', {})
if effort.get('recommended_team_size', 0) > effort.get('total_story_points', 0) / 200:
recommendations.append(f"Scale team to {effort['recommended_team_size']} engineers")
return recommendations
def analyze_technical_debt(system_config: Dict) -> str:
"""Main function to analyze technical debt"""
analyzer = TechDebtAnalyzer()
results = analyzer.analyze_system(system_config)
# Format output
output = [
f"=== Technical Debt Analysis Report ===",
f"System: {results['system_name']}",
f"Analysis Date: {results['timestamp'][:10]}",
f"",
f"OVERALL DEBT SCORE: {results['debt_score']}/100 ({results['debt_level']})",
f"",
"Category Breakdown:"
]
for category, scores in results['category_scores'].items():
output.append(f" {category.title()}: {scores['raw_score']:.1f} ({scores['level']})")
output.extend([
f"",
"Risk Assessment:",
f" Overall Risk: {results['risk_assessment']['overall_risk']}"
])
for risk in results['risk_assessment']['specific_risks']:
output.append(f" β’ {risk}")
output.extend([
f"",
"Effort Estimation:",
f" Total Story Points: {results['estimated_effort']['total_story_points']}",
f" Estimated Sprints: {results['estimated_effort']['estimated_sprints']}",
f" Recommended Team Size: {results['estimated_effort']['recommended_team_size']}",
f"",
"Top Priority Actions:"
])
for i, action in enumerate(results['prioritized_actions'][:3], 1):
output.append(f"\n{i}. {action['category'].title()} (Priority: {action['priority']:.0f})")
for item in action['action_items'][:3]:
output.append(f" - {item}")
output.extend([
f"",
"Strategic Recommendations:"
])
for rec in results['recommendations']:
output.append(f" β’ {rec}")
return '\n'.join(output)
if __name__ == "__main__":
# Example usage
example_system = {
'name': 'Legacy E-commerce Platform',
'architecture': {
'monolithic_design': 80,
'tight_coupling': 70,
'no_microservices': 90,
'legacy_patterns': 60
},
'code_quality': {
'low_test_coverage': 75,
'high_complexity': 65,
'code_duplication': 55
},
'infrastructure': {
'manual_deployments': 70,
'no_ci_cd': 60,
'no_monitoring': 40
},
'security': {
'outdated_dependencies': 85,
'no_security_scans': 70
},
'performance': {
'slow_response_times': 60,
'no_caching': 50
},
'team_size': 8,
'system_criticality': 'high',
'business_context': {
'growth_phase': 'rapid',
'compliance_required': True,
'cost_pressure': False
}
}
print(analyze_technical_debt(example_system))
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/cto-advisor/scripts/tech_debt_analyzer.py",
"license": "MIT License",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/marketing-demand-acquisition/scripts/calculate_cac.py | #!/usr/bin/env python3
"""
CAC (Customer Acquisition Cost) Calculator
Calculate blended and channel-specific CAC for marketing campaigns.
Supports multiple time periods and channel breakdowns.
"""
import sys
from typing import Dict, List
def calculate_cac(total_spend: float, customers_acquired: int) -> float:
"""Calculate basic CAC"""
if customers_acquired == 0:
return 0.0
return round(total_spend / customers_acquired, 2)
def calculate_channel_cac(channel_data: List[Dict]) -> Dict:
"""
Calculate CAC per channel
Args:
channel_data: List of dicts with 'channel', 'spend', 'customers' keys
Returns:
Dict with channel CAC breakdown and blended CAC
"""
results = {}
total_spend = 0
total_customers = 0
for channel in channel_data:
name = channel['channel']
spend = channel['spend']
customers = channel['customers']
cac = calculate_cac(spend, customers)
results[name] = {
'spend': spend,
'customers': customers,
'cac': cac
}
total_spend += spend
total_customers += customers
results['blended'] = {
'total_spend': total_spend,
'total_customers': total_customers,
'blended_cac': calculate_cac(total_spend, total_customers)
}
return results
def print_results(results: Dict):
"""Pretty print CAC results"""
print("\n" + "="*60)
print("CAC CALCULATION RESULTS")
print("="*60 + "\n")
for channel, data in results.items():
if channel == 'blended':
print("-"*60)
print(f"BLENDED CAC")
print(f" Total Spend: ${data['total_spend']:,.2f}")
print(f" Total Customers: {data['total_customers']:,}")
print(f" Blended CAC: ${data['blended_cac']:,.2f}")
else:
print(f"{channel.upper()}")
print(f" Spend: ${data['spend']:,.2f}")
print(f" Customers: {data['customers']:,}")
print(f" CAC: ${data['cac']:,.2f}")
print()
def main():
# Example data - replace with your actual numbers
example_data = [
{'channel': 'LinkedIn Ads', 'spend': 15000, 'customers': 10},
{'channel': 'Google Search', 'spend': 12000, 'customers': 20},
{'channel': 'SEO/Organic', 'spend': 5000, 'customers': 15},
{'channel': 'Partnerships', 'spend': 3000, 'customers': 5},
]
print("Marketing CAC Calculator")
print("Edit the script to input your actual channel data\n")
results = calculate_channel_cac(example_data)
print_results(results)
# CAC benchmarks
print("\n" + "="*60)
print("B2B SAAS BENCHMARKS (Series A)")
print("="*60)
print("LinkedIn Ads: $150-$400")
print("Google Search: $80-$250")
print("SEO/Organic: $50-$150")
print("Partnerships: $100-$300")
print("Blended Target: <$300")
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/marketing-demand-acquisition/scripts/calculate_cac.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/product-manager-toolkit/scripts/customer_interview_analyzer.py | #!/usr/bin/env python3
"""
Customer Interview Analyzer
Extracts insights, patterns, and opportunities from user interviews
"""
import re
from typing import Dict, List, Tuple, Set
from collections import Counter, defaultdict
import json
class InterviewAnalyzer:
"""Analyze customer interviews for insights and patterns"""
def __init__(self):
# Pain point indicators
self.pain_indicators = [
'frustrat', 'annoy', 'difficult', 'hard', 'confus', 'slow',
'problem', 'issue', 'struggle', 'challeng', 'pain', 'waste',
'manual', 'repetitive', 'tedious', 'boring', 'time-consuming',
'complicated', 'complex', 'unclear', 'wish', 'need', 'want'
]
# Positive indicators
self.delight_indicators = [
'love', 'great', 'awesome', 'amazing', 'perfect', 'easy',
'simple', 'quick', 'fast', 'helpful', 'useful', 'valuable',
'save', 'efficient', 'convenient', 'intuitive', 'clear'
]
# Feature request indicators
self.request_indicators = [
'would be nice', 'wish', 'hope', 'want', 'need', 'should',
'could', 'would love', 'if only', 'it would help', 'suggest',
'recommend', 'idea', 'what if', 'have you considered'
]
# Jobs to be done patterns
self.jtbd_patterns = [
r'when i\s+(.+?),\s+i want to\s+(.+?)\s+so that\s+(.+)',
r'i need to\s+(.+?)\s+because\s+(.+)',
r'my goal is to\s+(.+)',
r'i\'m trying to\s+(.+)',
r'i use \w+ to\s+(.+)',
r'helps me\s+(.+)',
]
def analyze_interview(self, text: str) -> Dict:
"""Analyze a single interview transcript"""
text_lower = text.lower()
sentences = self._split_sentences(text)
analysis = {
'pain_points': self._extract_pain_points(sentences),
'delights': self._extract_delights(sentences),
'feature_requests': self._extract_requests(sentences),
'jobs_to_be_done': self._extract_jtbd(text_lower),
'sentiment_score': self._calculate_sentiment(text_lower),
'key_themes': self._extract_themes(text_lower),
'quotes': self._extract_key_quotes(sentences),
'metrics_mentioned': self._extract_metrics(text),
'competitors_mentioned': self._extract_competitors(text)
}
return analysis
def _split_sentences(self, text: str) -> List[str]:
"""Split text into sentences"""
# Simple sentence splitting
sentences = re.split(r'[.!?]+', text)
return [s.strip() for s in sentences if s.strip()]
def _extract_pain_points(self, sentences: List[str]) -> List[Dict]:
"""Extract pain points from sentences"""
pain_points = []
for sentence in sentences:
sentence_lower = sentence.lower()
for indicator in self.pain_indicators:
if indicator in sentence_lower:
# Extract context around the pain point
pain_points.append({
'quote': sentence,
'indicator': indicator,
'severity': self._assess_severity(sentence_lower)
})
break
return pain_points[:10] # Return top 10
def _extract_delights(self, sentences: List[str]) -> List[Dict]:
"""Extract positive feedback"""
delights = []
for sentence in sentences:
sentence_lower = sentence.lower()
for indicator in self.delight_indicators:
if indicator in sentence_lower:
delights.append({
'quote': sentence,
'indicator': indicator,
'strength': self._assess_strength(sentence_lower)
})
break
return delights[:10]
def _extract_requests(self, sentences: List[str]) -> List[Dict]:
"""Extract feature requests and suggestions"""
requests = []
for sentence in sentences:
sentence_lower = sentence.lower()
for indicator in self.request_indicators:
if indicator in sentence_lower:
requests.append({
'quote': sentence,
'type': self._classify_request(sentence_lower),
'priority': self._assess_request_priority(sentence_lower)
})
break
return requests[:10]
def _extract_jtbd(self, text: str) -> List[Dict]:
"""Extract Jobs to Be Done patterns"""
jobs = []
for pattern in self.jtbd_patterns:
matches = re.findall(pattern, text, re.IGNORECASE)
for match in matches:
if isinstance(match, tuple):
job = ' β '.join(match)
else:
job = match
jobs.append({
'job': job,
'pattern': pattern.pattern if hasattr(pattern, 'pattern') else pattern
})
return jobs[:5]
def _calculate_sentiment(self, text: str) -> Dict:
"""Calculate overall sentiment of the interview"""
positive_count = sum(1 for ind in self.delight_indicators if ind in text)
negative_count = sum(1 for ind in self.pain_indicators if ind in text)
total = positive_count + negative_count
if total == 0:
sentiment_score = 0
else:
sentiment_score = (positive_count - negative_count) / total
if sentiment_score > 0.3:
sentiment_label = 'positive'
elif sentiment_score < -0.3:
sentiment_label = 'negative'
else:
sentiment_label = 'neutral'
return {
'score': round(sentiment_score, 2),
'label': sentiment_label,
'positive_signals': positive_count,
'negative_signals': negative_count
}
def _extract_themes(self, text: str) -> List[str]:
"""Extract key themes using word frequency"""
# Remove common words
stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at',
'to', 'for', 'of', 'with', 'by', 'from', 'as', 'is',
'was', 'are', 'were', 'been', 'be', 'have', 'has',
'had', 'do', 'does', 'did', 'will', 'would', 'could',
'should', 'may', 'might', 'must', 'can', 'shall',
'it', 'i', 'you', 'we', 'they', 'them', 'their'}
# Extract meaningful words
words = re.findall(r'\b[a-z]{4,}\b', text)
meaningful_words = [w for w in words if w not in stop_words]
# Count frequency
word_freq = Counter(meaningful_words)
# Extract themes (top frequent meaningful words)
themes = [word for word, count in word_freq.most_common(10) if count >= 3]
return themes
def _extract_key_quotes(self, sentences: List[str]) -> List[str]:
"""Extract the most insightful quotes"""
scored_sentences = []
for sentence in sentences:
if len(sentence) < 20 or len(sentence) > 200:
continue
score = 0
sentence_lower = sentence.lower()
# Score based on insight indicators
if any(ind in sentence_lower for ind in self.pain_indicators):
score += 2
if any(ind in sentence_lower for ind in self.request_indicators):
score += 2
if 'because' in sentence_lower:
score += 1
if 'but' in sentence_lower:
score += 1
if '?' in sentence:
score += 1
if score > 0:
scored_sentences.append((score, sentence))
# Sort by score and return top quotes
scored_sentences.sort(reverse=True)
return [s[1] for s in scored_sentences[:5]]
def _extract_metrics(self, text: str) -> List[str]:
"""Extract any metrics or numbers mentioned"""
metrics = []
# Find percentages
percentages = re.findall(r'\d+%', text)
metrics.extend(percentages)
# Find time metrics
time_metrics = re.findall(r'\d+\s*(?:hours?|minutes?|days?|weeks?|months?)', text, re.IGNORECASE)
metrics.extend(time_metrics)
# Find money metrics
money_metrics = re.findall(r'\$[\d,]+', text)
metrics.extend(money_metrics)
# Find general numbers with context
number_contexts = re.findall(r'(\d+)\s+(\w+)', text)
for num, context in number_contexts:
if context.lower() not in ['the', 'a', 'an', 'and', 'or', 'of']:
metrics.append(f"{num} {context}")
return list(set(metrics))[:10]
def _extract_competitors(self, text: str) -> List[str]:
"""Extract competitor mentions"""
# Common competitor indicators
competitor_patterns = [
r'(?:use|used|using|tried|trying|switch from|switched from|instead of)\s+(\w+)',
r'(\w+)\s+(?:is better|works better|is easier)',
r'compared to\s+(\w+)',
r'like\s+(\w+)',
r'similar to\s+(\w+)',
]
competitors = set()
for pattern in competitor_patterns:
matches = re.findall(pattern, text, re.IGNORECASE)
competitors.update(matches)
# Filter out common words
common_words = {'this', 'that', 'it', 'them', 'other', 'another', 'something'}
competitors = [c for c in competitors if c.lower() not in common_words and len(c) > 2]
return list(competitors)[:5]
def _assess_severity(self, text: str) -> str:
"""Assess severity of pain point"""
if any(word in text for word in ['very', 'extremely', 'really', 'totally', 'completely']):
return 'high'
elif any(word in text for word in ['somewhat', 'bit', 'little', 'slightly']):
return 'low'
return 'medium'
def _assess_strength(self, text: str) -> str:
"""Assess strength of positive feedback"""
if any(word in text for word in ['absolutely', 'definitely', 'really', 'very']):
return 'strong'
return 'moderate'
def _classify_request(self, text: str) -> str:
"""Classify the type of request"""
if any(word in text for word in ['ui', 'design', 'look', 'color', 'layout']):
return 'ui_improvement'
elif any(word in text for word in ['feature', 'add', 'new', 'build']):
return 'new_feature'
elif any(word in text for word in ['fix', 'bug', 'broken', 'work']):
return 'bug_fix'
elif any(word in text for word in ['faster', 'slow', 'performance', 'speed']):
return 'performance'
return 'general'
def _assess_request_priority(self, text: str) -> str:
"""Assess priority of request"""
if any(word in text for word in ['critical', 'urgent', 'asap', 'immediately', 'blocking']):
return 'critical'
elif any(word in text for word in ['need', 'important', 'should', 'must']):
return 'high'
elif any(word in text for word in ['nice', 'would', 'could', 'maybe']):
return 'low'
return 'medium'
def aggregate_interviews(interviews: List[Dict]) -> Dict:
"""Aggregate insights from multiple interviews"""
aggregated = {
'total_interviews': len(interviews),
'common_pain_points': defaultdict(list),
'common_requests': defaultdict(list),
'jobs_to_be_done': [],
'overall_sentiment': {
'positive': 0,
'negative': 0,
'neutral': 0
},
'top_themes': Counter(),
'metrics_summary': set(),
'competitors_mentioned': Counter()
}
for interview in interviews:
# Aggregate pain points
for pain in interview.get('pain_points', []):
indicator = pain.get('indicator', 'unknown')
aggregated['common_pain_points'][indicator].append(pain['quote'])
# Aggregate requests
for request in interview.get('feature_requests', []):
req_type = request.get('type', 'general')
aggregated['common_requests'][req_type].append(request['quote'])
# Aggregate JTBD
aggregated['jobs_to_be_done'].extend(interview.get('jobs_to_be_done', []))
# Aggregate sentiment
sentiment = interview.get('sentiment_score', {}).get('label', 'neutral')
aggregated['overall_sentiment'][sentiment] += 1
# Aggregate themes
for theme in interview.get('key_themes', []):
aggregated['top_themes'][theme] += 1
# Aggregate metrics
aggregated['metrics_summary'].update(interview.get('metrics_mentioned', []))
# Aggregate competitors
for competitor in interview.get('competitors_mentioned', []):
aggregated['competitors_mentioned'][competitor] += 1
# Process aggregated data
aggregated['common_pain_points'] = dict(aggregated['common_pain_points'])
aggregated['common_requests'] = dict(aggregated['common_requests'])
aggregated['top_themes'] = dict(aggregated['top_themes'].most_common(10))
aggregated['metrics_summary'] = list(aggregated['metrics_summary'])
aggregated['competitors_mentioned'] = dict(aggregated['competitors_mentioned'])
return aggregated
def format_single_interview(analysis: Dict) -> str:
"""Format single interview analysis"""
output = ["=" * 60]
output.append("CUSTOMER INTERVIEW ANALYSIS")
output.append("=" * 60)
# Sentiment
sentiment = analysis['sentiment_score']
output.append(f"\nπ Overall Sentiment: {sentiment['label'].upper()}")
output.append(f" Score: {sentiment['score']}")
output.append(f" Positive signals: {sentiment['positive_signals']}")
output.append(f" Negative signals: {sentiment['negative_signals']}")
# Pain Points
if analysis['pain_points']:
output.append("\nπ₯ Pain Points Identified:")
for i, pain in enumerate(analysis['pain_points'][:5], 1):
output.append(f"\n{i}. [{pain['severity'].upper()}] {pain['quote'][:100]}...")
# Feature Requests
if analysis['feature_requests']:
output.append("\nπ‘ Feature Requests:")
for i, req in enumerate(analysis['feature_requests'][:5], 1):
output.append(f"\n{i}. [{req['type']}] Priority: {req['priority']}")
output.append(f" \"{req['quote'][:100]}...\"")
# Jobs to Be Done
if analysis['jobs_to_be_done']:
output.append("\nπ― Jobs to Be Done:")
for i, job in enumerate(analysis['jobs_to_be_done'], 1):
output.append(f"{i}. {job['job']}")
# Key Themes
if analysis['key_themes']:
output.append("\nπ·οΈ Key Themes:")
output.append(", ".join(analysis['key_themes']))
# Key Quotes
if analysis['quotes']:
output.append("\n㪠Key Quotes:")
for i, quote in enumerate(analysis['quotes'][:3], 1):
output.append(f'{i}. "{quote}"')
# Metrics
if analysis['metrics_mentioned']:
output.append("\nπ Metrics Mentioned:")
output.append(", ".join(analysis['metrics_mentioned']))
# Competitors
if analysis['competitors_mentioned']:
output.append("\nπ’ Competitors Mentioned:")
output.append(", ".join(analysis['competitors_mentioned']))
return "\n".join(output)
def main():
import sys
if len(sys.argv) < 2:
print("Usage: python customer_interview_analyzer.py <interview_file.txt>")
print("\nThis tool analyzes customer interview transcripts to extract:")
print(" - Pain points and frustrations")
print(" - Feature requests and suggestions")
print(" - Jobs to be done")
print(" - Sentiment analysis")
print(" - Key themes and quotes")
sys.exit(1)
# Read interview transcript
with open(sys.argv[1], 'r') as f:
interview_text = f.read()
# Analyze
analyzer = InterviewAnalyzer()
analysis = analyzer.analyze_interview(interview_text)
# Output
if len(sys.argv) > 2 and sys.argv[2] == 'json':
print(json.dumps(analysis, indent=2))
else:
print(format_single_interview(analysis))
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/product-manager-toolkit/scripts/customer_interview_analyzer.py",
"license": "MIT License",
"lines": 363,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/product-manager-toolkit/scripts/rice_prioritizer.py | #!/usr/bin/env python3
"""
RICE Prioritization Framework
Calculates RICE scores for feature prioritization
RICE = (Reach x Impact x Confidence) / Effort
"""
import json
import csv
from typing import List, Dict, Tuple
import argparse
class RICECalculator:
"""Calculate RICE scores for feature prioritization"""
def __init__(self):
self.impact_map = {
'massive': 3.0,
'high': 2.0,
'medium': 1.0,
'low': 0.5,
'minimal': 0.25
}
self.confidence_map = {
'high': 100,
'medium': 80,
'low': 50
}
self.effort_map = {
'xl': 13,
'l': 8,
'm': 5,
's': 3,
'xs': 1
}
def calculate_rice(self, reach: int, impact: str, confidence: str, effort: str) -> float:
"""
Calculate RICE score
Args:
reach: Number of users/customers affected per quarter
impact: massive/high/medium/low/minimal
confidence: high/medium/low (percentage)
effort: xl/l/m/s/xs (person-months)
"""
impact_score = self.impact_map.get(impact.lower(), 1.0)
confidence_score = self.confidence_map.get(confidence.lower(), 50) / 100
effort_score = self.effort_map.get(effort.lower(), 5)
if effort_score == 0:
return 0
rice_score = (reach * impact_score * confidence_score) / effort_score
return round(rice_score, 2)
def prioritize_features(self, features: List[Dict]) -> List[Dict]:
"""
Calculate RICE scores and rank features
Args:
features: List of feature dictionaries with RICE components
"""
for feature in features:
feature['rice_score'] = self.calculate_rice(
feature.get('reach', 0),
feature.get('impact', 'medium'),
feature.get('confidence', 'medium'),
feature.get('effort', 'm')
)
# Sort by RICE score descending
return sorted(features, key=lambda x: x['rice_score'], reverse=True)
def analyze_portfolio(self, features: List[Dict]) -> Dict:
"""
Analyze the feature portfolio for balance and insights
"""
if not features:
return {}
total_effort = sum(
self.effort_map.get(f.get('effort', 'm').lower(), 5)
for f in features
)
total_reach = sum(f.get('reach', 0) for f in features)
effort_distribution = {}
impact_distribution = {}
for feature in features:
effort = feature.get('effort', 'm').lower()
impact = feature.get('impact', 'medium').lower()
effort_distribution[effort] = effort_distribution.get(effort, 0) + 1
impact_distribution[impact] = impact_distribution.get(impact, 0) + 1
# Calculate quick wins (high impact, low effort)
quick_wins = [
f for f in features
if f.get('impact', '').lower() in ['massive', 'high']
and f.get('effort', '').lower() in ['xs', 's']
]
# Calculate big bets (high impact, high effort)
big_bets = [
f for f in features
if f.get('impact', '').lower() in ['massive', 'high']
and f.get('effort', '').lower() in ['l', 'xl']
]
return {
'total_features': len(features),
'total_effort_months': total_effort,
'total_reach': total_reach,
'average_rice': round(sum(f['rice_score'] for f in features) / len(features), 2),
'effort_distribution': effort_distribution,
'impact_distribution': impact_distribution,
'quick_wins': len(quick_wins),
'big_bets': len(big_bets),
'quick_wins_list': quick_wins[:3], # Top 3 quick wins
'big_bets_list': big_bets[:3] # Top 3 big bets
}
def generate_roadmap(self, features: List[Dict], team_capacity: int = 10) -> List[Dict]:
"""
Generate a quarterly roadmap based on team capacity
Args:
features: Prioritized feature list
team_capacity: Person-months available per quarter
"""
quarters = []
current_quarter = {
'quarter': 1,
'features': [],
'capacity_used': 0,
'capacity_available': team_capacity
}
for feature in features:
effort = self.effort_map.get(feature.get('effort', 'm').lower(), 5)
if current_quarter['capacity_used'] + effort <= team_capacity:
current_quarter['features'].append(feature)
current_quarter['capacity_used'] += effort
else:
# Move to next quarter
current_quarter['capacity_available'] = team_capacity - current_quarter['capacity_used']
quarters.append(current_quarter)
current_quarter = {
'quarter': len(quarters) + 1,
'features': [feature],
'capacity_used': effort,
'capacity_available': team_capacity - effort
}
if current_quarter['features']:
current_quarter['capacity_available'] = team_capacity - current_quarter['capacity_used']
quarters.append(current_quarter)
return quarters
def format_output(features: List[Dict], analysis: Dict, roadmap: List[Dict]) -> str:
"""Format the results for display"""
output = ["=" * 60]
output.append("RICE PRIORITIZATION RESULTS")
output.append("=" * 60)
# Top prioritized features
output.append("\nπ TOP PRIORITIZED FEATURES\n")
for i, feature in enumerate(features[:10], 1):
output.append(f"{i}. {feature.get('name', 'Unnamed')}")
output.append(f" RICE Score: {feature['rice_score']}")
output.append(f" Reach: {feature.get('reach', 0)} | Impact: {feature.get('impact', 'medium')} | "
f"Confidence: {feature.get('confidence', 'medium')} | Effort: {feature.get('effort', 'm')}")
output.append("")
# Portfolio analysis
output.append("\nπ PORTFOLIO ANALYSIS\n")
output.append(f"Total Features: {analysis.get('total_features', 0)}")
output.append(f"Total Effort: {analysis.get('total_effort_months', 0)} person-months")
output.append(f"Total Reach: {analysis.get('total_reach', 0):,} users")
output.append(f"Average RICE Score: {analysis.get('average_rice', 0)}")
output.append(f"\nπ― Quick Wins: {analysis.get('quick_wins', 0)} features")
for qw in analysis.get('quick_wins_list', []):
output.append(f" β’ {qw.get('name', 'Unnamed')} (RICE: {qw['rice_score']})")
output.append(f"\nπ Big Bets: {analysis.get('big_bets', 0)} features")
for bb in analysis.get('big_bets_list', []):
output.append(f" β’ {bb.get('name', 'Unnamed')} (RICE: {bb['rice_score']})")
# Roadmap
output.append("\n\nπ
SUGGESTED ROADMAP\n")
for quarter in roadmap:
output.append(f"\nQ{quarter['quarter']} - Capacity: {quarter['capacity_used']}/{quarter['capacity_used'] + quarter['capacity_available']} person-months")
for feature in quarter['features']:
output.append(f" β’ {feature.get('name', 'Unnamed')} (RICE: {feature['rice_score']})")
return "\n".join(output)
def load_features_from_csv(filepath: str) -> List[Dict]:
"""Load features from CSV file"""
features = []
with open(filepath, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
feature = {
'name': row.get('name', ''),
'reach': int(row.get('reach', 0)),
'impact': row.get('impact', 'medium'),
'confidence': row.get('confidence', 'medium'),
'effort': row.get('effort', 'm'),
'description': row.get('description', '')
}
features.append(feature)
return features
def create_sample_csv(filepath: str):
"""Create a sample CSV file for testing"""
sample_features = [
['name', 'reach', 'impact', 'confidence', 'effort', 'description'],
['User Dashboard Redesign', '5000', 'high', 'high', 'l', 'Complete redesign of user dashboard'],
['Mobile Push Notifications', '10000', 'massive', 'medium', 'm', 'Add push notification support'],
['Dark Mode', '8000', 'medium', 'high', 's', 'Implement dark mode theme'],
['API Rate Limiting', '2000', 'low', 'high', 'xs', 'Add rate limiting to API'],
['Social Login', '12000', 'high', 'medium', 'm', 'Add Google/Facebook login'],
['Export to PDF', '3000', 'medium', 'low', 's', 'Export reports as PDF'],
['Team Collaboration', '4000', 'massive', 'low', 'xl', 'Real-time collaboration features'],
['Search Improvements', '15000', 'high', 'high', 'm', 'Enhance search functionality'],
['Onboarding Flow', '20000', 'massive', 'high', 's', 'Improve new user onboarding'],
['Analytics Dashboard', '6000', 'high', 'medium', 'l', 'Advanced analytics for users'],
]
with open(filepath, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(sample_features)
print(f"Sample CSV created at: {filepath}")
def main():
parser = argparse.ArgumentParser(description='RICE Framework for Feature Prioritization')
parser.add_argument('input', nargs='?', help='CSV file with features or "sample" to create sample')
parser.add_argument('--capacity', type=int, default=10, help='Team capacity per quarter (person-months)')
parser.add_argument('--output', choices=['text', 'json', 'csv'], default='text', help='Output format')
args = parser.parse_args()
# Create sample if requested
if args.input == 'sample':
create_sample_csv('sample_features.csv')
return
# Use sample data if no input provided
if not args.input:
features = [
{'name': 'User Dashboard', 'reach': 5000, 'impact': 'high', 'confidence': 'high', 'effort': 'l'},
{'name': 'Push Notifications', 'reach': 10000, 'impact': 'massive', 'confidence': 'medium', 'effort': 'm'},
{'name': 'Dark Mode', 'reach': 8000, 'impact': 'medium', 'confidence': 'high', 'effort': 's'},
{'name': 'API Rate Limiting', 'reach': 2000, 'impact': 'low', 'confidence': 'high', 'effort': 'xs'},
{'name': 'Social Login', 'reach': 12000, 'impact': 'high', 'confidence': 'medium', 'effort': 'm'},
]
else:
features = load_features_from_csv(args.input)
# Calculate RICE scores
calculator = RICECalculator()
prioritized = calculator.prioritize_features(features)
analysis = calculator.analyze_portfolio(prioritized)
roadmap = calculator.generate_roadmap(prioritized, args.capacity)
# Output results
if args.output == 'json':
result = {
'features': prioritized,
'analysis': analysis,
'roadmap': roadmap
}
print(json.dumps(result, indent=2))
elif args.output == 'csv':
# Output prioritized features as CSV
if prioritized:
keys = prioritized[0].keys()
print(','.join(keys))
for feature in prioritized:
print(','.join(str(feature.get(k, '')) for k in keys))
else:
print(format_output(prioritized, analysis, roadmap))
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/product-manager-toolkit/scripts/rice_prioritizer.py",
"license": "MIT License",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/business-marketing/product-strategist/scripts/okr_cascade_generator.py | #!/usr/bin/env python3
"""
OKR Cascade Generator
Creates aligned OKRs from company strategy down to team level
"""
import json
from typing import Dict, List
from datetime import datetime, timedelta
class OKRGenerator:
"""Generate and cascade OKRs across the organization"""
def __init__(self):
self.okr_templates = {
'growth': {
'objectives': [
'Accelerate user acquisition and market expansion',
'Achieve product-market fit in new segments',
'Build sustainable growth engine'
],
'key_results': [
'Increase MAU from {current} to {target}',
'Achieve {target}% MoM growth rate',
'Expand to {target} new markets',
'Reduce CAC by {target}%',
'Improve activation rate to {target}%'
]
},
'retention': {
'objectives': [
'Create lasting customer value and loyalty',
'Build best-in-class user experience',
'Maximize customer lifetime value'
],
'key_results': [
'Improve retention from {current}% to {target}%',
'Increase NPS from {current} to {target}',
'Reduce churn to below {target}%',
'Achieve {target}% product stickiness',
'Increase LTV/CAC ratio to {target}'
]
},
'revenue': {
'objectives': [
'Drive sustainable revenue growth',
'Optimize monetization strategy',
'Expand revenue per customer'
],
'key_results': [
'Grow ARR from ${current}M to ${target}M',
'Increase ARPU by {target}%',
'Launch {target} new revenue streams',
'Achieve {target}% gross margin',
'Reduce revenue churn to {target}%'
]
},
'innovation': {
'objectives': [
'Pioneer next-generation product capabilities',
'Establish market leadership through innovation',
'Build competitive moat'
],
'key_results': [
'Launch {target} breakthrough features',
'Achieve {target}% of revenue from new products',
'File {target} patents/IP',
'Reduce time-to-market by {target}%',
'Achieve {target} innovation score'
]
},
'operational': {
'objectives': [
'Build world-class product organization',
'Achieve operational excellence',
'Scale efficiently'
],
'key_results': [
'Improve velocity by {target}%',
'Reduce cycle time to {target} days',
'Achieve {target}% automation',
'Improve team NPS to {target}',
'Reduce incidents by {target}%'
]
}
}
def generate_company_okrs(self, strategy: str, metrics: Dict) -> Dict:
"""Generate company-level OKRs based on strategy"""
if strategy not in self.okr_templates:
strategy = 'growth' # Default
template = self.okr_templates[strategy]
company_okrs = {
'level': 'Company',
'quarter': self._get_current_quarter(),
'strategy': strategy,
'objectives': []
}
# Generate 3 objectives
for i in range(min(3, len(template['objectives']))):
obj = {
'id': f'CO-{i+1}',
'title': template['objectives'][i],
'key_results': [],
'owner': 'CEO',
'status': 'draft'
}
# Add 3-5 key results per objective
for j in range(3):
if j < len(template['key_results']):
kr_template = template['key_results'][j]
kr = {
'id': f'CO-{i+1}-KR{j+1}',
'title': self._fill_metrics(kr_template, metrics),
'current': metrics.get('current', 0),
'target': metrics.get('target', 100),
'unit': self._extract_unit(kr_template),
'status': 'not_started'
}
obj['key_results'].append(kr)
company_okrs['objectives'].append(obj)
return company_okrs
def cascade_to_product(self, company_okrs: Dict) -> Dict:
"""Cascade company OKRs to product organization"""
product_okrs = {
'level': 'Product',
'quarter': company_okrs['quarter'],
'parent': 'Company',
'objectives': []
}
# Map company objectives to product objectives
for company_obj in company_okrs['objectives']:
product_obj = {
'id': f'PO-{company_obj["id"].split("-")[1]}',
'title': self._translate_to_product(company_obj['title']),
'parent_objective': company_obj['id'],
'key_results': [],
'owner': 'Head of Product',
'status': 'draft'
}
# Generate product-specific key results
for kr in company_obj['key_results']:
product_kr = {
'id': f'PO-{product_obj["id"].split("-")[1]}-KR{kr["id"].split("KR")[1]}',
'title': self._translate_kr_to_product(kr['title']),
'contributes_to': kr['id'],
'current': kr['current'],
'target': kr['target'] * 0.3, # Product typically contributes 30%
'unit': kr['unit'],
'status': 'not_started'
}
product_obj['key_results'].append(product_kr)
product_okrs['objectives'].append(product_obj)
return product_okrs
def cascade_to_teams(self, product_okrs: Dict) -> List[Dict]:
"""Cascade product OKRs to individual teams"""
teams = ['Growth', 'Platform', 'Mobile', 'Data']
team_okrs = []
for team in teams:
team_okr = {
'level': 'Team',
'team': team,
'quarter': product_okrs['quarter'],
'parent': 'Product',
'objectives': []
}
# Each team takes relevant objectives
for product_obj in product_okrs['objectives']:
if self._is_relevant_for_team(product_obj['title'], team):
team_obj = {
'id': f'{team[:3].upper()}-{product_obj["id"].split("-")[1]}',
'title': self._translate_to_team(product_obj['title'], team),
'parent_objective': product_obj['id'],
'key_results': [],
'owner': f'{team} PM',
'status': 'draft'
}
# Add team-specific key results
for kr in product_obj['key_results'][:2]: # Each team takes 2 KRs
team_kr = {
'id': f'{team[:3].upper()}-{team_obj["id"].split("-")[1]}-KR{kr["id"].split("KR")[1]}',
'title': self._translate_kr_to_team(kr['title'], team),
'contributes_to': kr['id'],
'current': kr['current'],
'target': kr['target'] / len(teams),
'unit': kr['unit'],
'status': 'not_started'
}
team_obj['key_results'].append(team_kr)
team_okr['objectives'].append(team_obj)
if team_okr['objectives']:
team_okrs.append(team_okr)
return team_okrs
def generate_okr_dashboard(self, all_okrs: Dict) -> str:
"""Generate OKR dashboard view"""
dashboard = ["=" * 60]
dashboard.append("OKR CASCADE DASHBOARD")
dashboard.append(f"Quarter: {all_okrs.get('quarter', 'Q1 2025')}")
dashboard.append("=" * 60)
# Company OKRs
if 'company' in all_okrs:
dashboard.append("\nπ’ COMPANY OKRS\n")
for obj in all_okrs['company']['objectives']:
dashboard.append(f"π {obj['id']}: {obj['title']}")
for kr in obj['key_results']:
dashboard.append(f" ββ {kr['id']}: {kr['title']}")
# Product OKRs
if 'product' in all_okrs:
dashboard.append("\nπ PRODUCT OKRS\n")
for obj in all_okrs['product']['objectives']:
dashboard.append(f"π {obj['id']}: {obj['title']}")
dashboard.append(f" β³ Supports: {obj.get('parent_objective', 'N/A')}")
for kr in obj['key_results']:
dashboard.append(f" ββ {kr['id']}: {kr['title']}")
# Team OKRs
if 'teams' in all_okrs:
dashboard.append("\nπ₯ TEAM OKRS\n")
for team_okr in all_okrs['teams']:
dashboard.append(f"\n{team_okr['team']} Team:")
for obj in team_okr['objectives']:
dashboard.append(f" π {obj['id']}: {obj['title']}")
for kr in obj['key_results']:
dashboard.append(f" ββ {kr['id']}: {kr['title']}")
# Alignment Matrix
dashboard.append("\n\nπ ALIGNMENT MATRIX\n")
dashboard.append("Company β Product β Teams")
dashboard.append("-" * 40)
if 'company' in all_okrs and 'product' in all_okrs:
for c_obj in all_okrs['company']['objectives']:
dashboard.append(f"\n{c_obj['id']}")
for p_obj in all_okrs['product']['objectives']:
if p_obj.get('parent_objective') == c_obj['id']:
dashboard.append(f" ββ {p_obj['id']}")
if 'teams' in all_okrs:
for team_okr in all_okrs['teams']:
for t_obj in team_okr['objectives']:
if t_obj.get('parent_objective') == p_obj['id']:
dashboard.append(f" ββ {t_obj['id']} ({team_okr['team']})")
return "\n".join(dashboard)
def calculate_alignment_score(self, all_okrs: Dict) -> Dict:
"""Calculate alignment score across OKR cascade"""
scores = {
'vertical_alignment': 0,
'horizontal_alignment': 0,
'coverage': 0,
'balance': 0,
'overall': 0
}
# Vertical alignment: How well each level supports the above
total_objectives = 0
aligned_objectives = 0
if 'product' in all_okrs:
for obj in all_okrs['product']['objectives']:
total_objectives += 1
if 'parent_objective' in obj:
aligned_objectives += 1
if 'teams' in all_okrs:
for team in all_okrs['teams']:
for obj in team['objectives']:
total_objectives += 1
if 'parent_objective' in obj:
aligned_objectives += 1
if total_objectives > 0:
scores['vertical_alignment'] = round((aligned_objectives / total_objectives) * 100, 1)
# Horizontal alignment: How well teams coordinate
if 'teams' in all_okrs and len(all_okrs['teams']) > 1:
shared_objectives = set()
for team in all_okrs['teams']:
for obj in team['objectives']:
parent = obj.get('parent_objective')
if parent:
shared_objectives.add(parent)
scores['horizontal_alignment'] = min(100, len(shared_objectives) * 25)
# Coverage: How much of company OKRs are covered
if 'company' in all_okrs and 'product' in all_okrs:
company_krs = sum(len(obj['key_results']) for obj in all_okrs['company']['objectives'])
covered_krs = sum(len(obj['key_results']) for obj in all_okrs['product']['objectives'])
if company_krs > 0:
scores['coverage'] = round((covered_krs / company_krs) * 100, 1)
# Balance: Distribution across teams
if 'teams' in all_okrs:
objectives_per_team = [len(team['objectives']) for team in all_okrs['teams']]
if objectives_per_team:
avg_objectives = sum(objectives_per_team) / len(objectives_per_team)
variance = sum((x - avg_objectives) ** 2 for x in objectives_per_team) / len(objectives_per_team)
scores['balance'] = round(max(0, 100 - variance * 10), 1)
# Overall score
scores['overall'] = round(sum([
scores['vertical_alignment'] * 0.4,
scores['horizontal_alignment'] * 0.2,
scores['coverage'] * 0.2,
scores['balance'] * 0.2
]), 1)
return scores
def _get_current_quarter(self) -> str:
"""Get current quarter"""
now = datetime.now()
quarter = (now.month - 1) // 3 + 1
return f"Q{quarter} {now.year}"
def _fill_metrics(self, template: str, metrics: Dict) -> str:
"""Fill template with actual metrics"""
result = template
for key, value in metrics.items():
result = result.replace(f'{{{key}}}', str(value))
return result
def _extract_unit(self, kr_template: str) -> str:
"""Extract measurement unit from KR template"""
if '%' in kr_template:
return '%'
elif '$' in kr_template:
return '$'
elif 'days' in kr_template.lower():
return 'days'
elif 'score' in kr_template.lower():
return 'points'
return 'count'
def _translate_to_product(self, company_objective: str) -> str:
"""Translate company objective to product objective"""
translations = {
'Accelerate user acquisition': 'Build viral product features',
'Achieve product-market fit': 'Validate product hypotheses',
'Build sustainable growth': 'Create product-led growth loops',
'Create lasting customer value': 'Design sticky user experiences',
'Drive sustainable revenue': 'Optimize product monetization',
'Pioneer next-generation': 'Ship innovative features',
'Build world-class': 'Elevate product excellence'
}
for key, value in translations.items():
if key in company_objective:
return company_objective.replace(key, value)
return f"Product: {company_objective}"
def _translate_kr_to_product(self, kr: str) -> str:
"""Translate KR to product context"""
product_terms = {
'MAU': 'product MAU',
'growth rate': 'feature adoption rate',
'CAC': 'product onboarding efficiency',
'retention': 'product retention',
'NPS': 'product NPS',
'ARR': 'product-driven revenue',
'churn': 'product churn'
}
result = kr
for term, replacement in product_terms.items():
if term in result:
result = result.replace(term, replacement)
break
return result
def _translate_to_team(self, objective: str, team: str) -> str:
"""Translate objective to team context"""
team_focus = {
'Growth': 'acquisition and activation',
'Platform': 'infrastructure and reliability',
'Mobile': 'mobile experience',
'Data': 'analytics and insights'
}
focus = team_focus.get(team, 'delivery')
return f"{objective} through {focus}"
def _translate_kr_to_team(self, kr: str, team: str) -> str:
"""Translate KR to team context"""
return f"[{team}] {kr}"
def _is_relevant_for_team(self, objective: str, team: str) -> bool:
"""Check if objective is relevant for team"""
relevance = {
'Growth': ['acquisition', 'growth', 'activation', 'viral'],
'Platform': ['infrastructure', 'reliability', 'scale', 'performance'],
'Mobile': ['mobile', 'app', 'ios', 'android'],
'Data': ['analytics', 'metrics', 'insights', 'data']
}
keywords = relevance.get(team, [])
objective_lower = objective.lower()
return any(keyword in objective_lower for keyword in keywords) or team == 'Platform'
def main():
import sys
# Sample metrics
metrics = {
'current': 100000,
'target': 150000,
'current_revenue': 10,
'target_revenue': 15,
'current_nps': 40,
'target_nps': 60
}
# Get strategy from command line or default
strategy = sys.argv[1] if len(sys.argv) > 1 else 'growth'
# Generate OKRs
generator = OKRGenerator()
# Generate company OKRs
company_okrs = generator.generate_company_okrs(strategy, metrics)
# Cascade to product
product_okrs = generator.cascade_to_product(company_okrs)
# Cascade to teams
team_okrs = generator.cascade_to_teams(product_okrs)
# Combine all OKRs
all_okrs = {
'company': company_okrs,
'product': product_okrs,
'teams': team_okrs
}
# Generate dashboard
dashboard = generator.generate_okr_dashboard(all_okrs)
print(dashboard)
# Calculate alignment
alignment = generator.calculate_alignment_score(all_okrs)
print("\n\nπ― ALIGNMENT SCORES\n" + "-" * 40)
for metric, score in alignment.items():
print(f"{metric.replace('_', ' ').title()}: {score}%")
# Export as JSON if requested
if len(sys.argv) > 2 and sys.argv[2] == 'json':
print("\n\nJSON Output:")
print(json.dumps(all_okrs, indent=2))
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/business-marketing/product-strategist/scripts/okr_cascade_generator.py",
"license": "MIT License",
"lines": 409,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/creative-design/ui-design-system/scripts/design_token_generator.py | #!/usr/bin/env python3
"""
Design Token Generator
Creates consistent design system tokens for colors, typography, spacing, and more
"""
import json
from typing import Dict, List, Tuple
import colorsys
class DesignTokenGenerator:
"""Generate comprehensive design system tokens"""
def __init__(self):
self.base_unit = 8 # 8pt grid system
self.type_scale_ratio = 1.25 # Major third
self.base_font_size = 16
def generate_complete_system(self, brand_color: str = "#0066CC",
style: str = "modern") -> Dict:
"""Generate complete design token system"""
tokens = {
'meta': {
'version': '1.0.0',
'style': style,
'generated': 'auto-generated'
},
'colors': self.generate_color_palette(brand_color),
'typography': self.generate_typography_system(style),
'spacing': self.generate_spacing_system(),
'sizing': self.generate_sizing_tokens(),
'borders': self.generate_border_tokens(style),
'shadows': self.generate_shadow_tokens(style),
'animation': self.generate_animation_tokens(),
'breakpoints': self.generate_breakpoints(),
'z-index': self.generate_z_index_scale()
}
return tokens
def generate_color_palette(self, brand_color: str) -> Dict:
"""Generate comprehensive color palette from brand color"""
# Convert hex to RGB
brand_rgb = self._hex_to_rgb(brand_color)
brand_hsv = colorsys.rgb_to_hsv(*[c/255 for c in brand_rgb])
palette = {
'primary': self._generate_color_scale(brand_color, 'primary'),
'secondary': self._generate_color_scale(
self._adjust_hue(brand_color, 180), 'secondary'
),
'neutral': self._generate_neutral_scale(),
'semantic': {
'success': {
'base': '#10B981',
'light': '#34D399',
'dark': '#059669',
'contrast': '#FFFFFF'
},
'warning': {
'base': '#F59E0B',
'light': '#FBB
D24',
'dark': '#D97706',
'contrast': '#FFFFFF'
},
'error': {
'base': '#EF4444',
'light': '#F87171',
'dark': '#DC2626',
'contrast': '#FFFFFF'
},
'info': {
'base': '#3B82F6',
'light': '#60A5FA',
'dark': '#2563EB',
'contrast': '#FFFFFF'
}
},
'surface': {
'background': '#FFFFFF',
'foreground': '#111827',
'card': '#FFFFFF',
'overlay': 'rgba(0, 0, 0, 0.5)',
'divider': '#E5E7EB'
}
}
return palette
def _generate_color_scale(self, base_color: str, name: str) -> Dict:
"""Generate color scale from base color"""
scale = {}
rgb = self._hex_to_rgb(base_color)
h, s, v = colorsys.rgb_to_hsv(*[c/255 for c in rgb])
# Generate scale from 50 to 900
steps = [50, 100, 200, 300, 400, 500, 600, 700, 800, 900]
for step in steps:
# Adjust lightness based on step
factor = (1000 - step) / 1000
new_v = 0.95 if step < 500 else v * (1 - (step - 500) / 500)
new_s = s * (0.3 + 0.7 * (step / 900))
new_rgb = colorsys.hsv_to_rgb(h, new_s, new_v)
scale[str(step)] = self._rgb_to_hex([int(c * 255) for c in new_rgb])
scale['DEFAULT'] = base_color
return scale
def _generate_neutral_scale(self) -> Dict:
"""Generate neutral color scale"""
return {
'50': '#F9FAFB',
'100': '#F3F4F6',
'200': '#E5E7EB',
'300': '#D1D5DB',
'400': '#9CA3AF',
'500': '#6B7280',
'600': '#4B5563',
'700': '#374151',
'800': '#1F2937',
'900': '#111827',
'DEFAULT': '#6B7280'
}
def generate_typography_system(self, style: str) -> Dict:
"""Generate typography system"""
# Font families based on style
font_families = {
'modern': {
'sans': 'Inter, system-ui, -apple-system, sans-serif',
'serif': 'Merriweather, Georgia, serif',
'mono': 'Fira Code, Monaco, monospace'
},
'classic': {
'sans': 'Helvetica, Arial, sans-serif',
'serif': 'Times New Roman, Times, serif',
'mono': 'Courier New, monospace'
},
'playful': {
'sans': 'Poppins, Roboto, sans-serif',
'serif': 'Playfair Display, Georgia, serif',
'mono': 'Source Code Pro, monospace'
}
}
typography = {
'fontFamily': font_families.get(style, font_families['modern']),
'fontSize': self._generate_type_scale(),
'fontWeight': {
'thin': 100,
'light': 300,
'normal': 400,
'medium': 500,
'semibold': 600,
'bold': 700,
'extrabold': 800,
'black': 900
},
'lineHeight': {
'none': 1,
'tight': 1.25,
'snug': 1.375,
'normal': 1.5,
'relaxed': 1.625,
'loose': 2
},
'letterSpacing': {
'tighter': '-0.05em',
'tight': '-0.025em',
'normal': '0',
'wide': '0.025em',
'wider': '0.05em',
'widest': '0.1em'
},
'textStyles': self._generate_text_styles()
}
return typography
def _generate_type_scale(self) -> Dict:
"""Generate modular type scale"""
scale = {}
sizes = ['xs', 'sm', 'base', 'lg', 'xl', '2xl', '3xl', '4xl', '5xl']
for i, size in enumerate(sizes):
if size == 'base':
scale[size] = f'{self.base_font_size}px'
elif i < sizes.index('base'):
factor = self.type_scale_ratio ** (sizes.index('base') - i)
scale[size] = f'{round(self.base_font_size / factor)}px'
else:
factor = self.type_scale_ratio ** (i - sizes.index('base'))
scale[size] = f'{round(self.base_font_size * factor)}px'
return scale
def _generate_text_styles(self) -> Dict:
"""Generate pre-composed text styles"""
return {
'h1': {
'fontSize': '48px',
'fontWeight': 700,
'lineHeight': 1.2,
'letterSpacing': '-0.02em'
},
'h2': {
'fontSize': '36px',
'fontWeight': 700,
'lineHeight': 1.3,
'letterSpacing': '-0.01em'
},
'h3': {
'fontSize': '28px',
'fontWeight': 600,
'lineHeight': 1.4,
'letterSpacing': '0'
},
'h4': {
'fontSize': '24px',
'fontWeight': 600,
'lineHeight': 1.4,
'letterSpacing': '0'
},
'h5': {
'fontSize': '20px',
'fontWeight': 600,
'lineHeight': 1.5,
'letterSpacing': '0'
},
'h6': {
'fontSize': '16px',
'fontWeight': 600,
'lineHeight': 1.5,
'letterSpacing': '0.01em'
},
'body': {
'fontSize': '16px',
'fontWeight': 400,
'lineHeight': 1.5,
'letterSpacing': '0'
},
'small': {
'fontSize': '14px',
'fontWeight': 400,
'lineHeight': 1.5,
'letterSpacing': '0'
},
'caption': {
'fontSize': '12px',
'fontWeight': 400,
'lineHeight': 1.5,
'letterSpacing': '0.01em'
}
}
def generate_spacing_system(self) -> Dict:
"""Generate spacing system based on 8pt grid"""
spacing = {}
multipliers = [0, 0.5, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 20, 24, 32, 40, 48, 56, 64]
for i, mult in enumerate(multipliers):
spacing[str(i)] = f'{int(self.base_unit * mult)}px'
# Add semantic spacing
spacing.update({
'xs': spacing['1'], # 4px
'sm': spacing['2'], # 8px
'md': spacing['4'], # 16px
'lg': spacing['6'], # 24px
'xl': spacing['8'], # 32px
'2xl': spacing['12'], # 48px
'3xl': spacing['16'] # 64px
})
return spacing
def generate_sizing_tokens(self) -> Dict:
"""Generate sizing tokens for components"""
return {
'container': {
'sm': '640px',
'md': '768px',
'lg': '1024px',
'xl': '1280px',
'2xl': '1536px'
},
'components': {
'button': {
'sm': {'height': '32px', 'paddingX': '12px'},
'md': {'height': '40px', 'paddingX': '16px'},
'lg': {'height': '48px', 'paddingX': '20px'}
},
'input': {
'sm': {'height': '32px', 'paddingX': '12px'},
'md': {'height': '40px', 'paddingX': '16px'},
'lg': {'height': '48px', 'paddingX': '20px'}
},
'icon': {
'sm': '16px',
'md': '20px',
'lg': '24px',
'xl': '32px'
}
}
}
def generate_border_tokens(self, style: str) -> Dict:
"""Generate border tokens"""
radius_values = {
'modern': {
'none': '0',
'sm': '4px',
'DEFAULT': '8px',
'md': '12px',
'lg': '16px',
'xl': '24px',
'full': '9999px'
},
'classic': {
'none': '0',
'sm': '2px',
'DEFAULT': '4px',
'md': '6px',
'lg': '8px',
'xl': '12px',
'full': '9999px'
},
'playful': {
'none': '0',
'sm': '8px',
'DEFAULT': '16px',
'md': '20px',
'lg': '24px',
'xl': '32px',
'full': '9999px'
}
}
return {
'radius': radius_values.get(style, radius_values['modern']),
'width': {
'none': '0',
'thin': '1px',
'DEFAULT': '1px',
'medium': '2px',
'thick': '4px'
}
}
def generate_shadow_tokens(self, style: str) -> Dict:
"""Generate shadow tokens"""
shadow_styles = {
'modern': {
'none': 'none',
'sm': '0 1px 2px 0 rgba(0, 0, 0, 0.05)',
'DEFAULT': '0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px 0 rgba(0, 0, 0, 0.06)',
'md': '0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06)',
'lg': '0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05)',
'xl': '0 20px 25px -5px rgba(0, 0, 0, 0.1), 0 10px 10px -5px rgba(0, 0, 0, 0.04)',
'2xl': '0 25px 50px -12px rgba(0, 0, 0, 0.25)',
'inner': 'inset 0 2px 4px 0 rgba(0, 0, 0, 0.06)'
},
'classic': {
'none': 'none',
'sm': '0 1px 2px rgba(0, 0, 0, 0.1)',
'DEFAULT': '0 2px 4px rgba(0, 0, 0, 0.1)',
'md': '0 4px 8px rgba(0, 0, 0, 0.1)',
'lg': '0 8px 16px rgba(0, 0, 0, 0.1)',
'xl': '0 16px 32px rgba(0, 0, 0, 0.1)'
}
}
return shadow_styles.get(style, shadow_styles['modern'])
def generate_animation_tokens(self) -> Dict:
"""Generate animation tokens"""
return {
'duration': {
'instant': '0ms',
'fast': '150ms',
'DEFAULT': '250ms',
'slow': '350ms',
'slower': '500ms'
},
'easing': {
'linear': 'linear',
'ease': 'ease',
'easeIn': 'ease-in',
'easeOut': 'ease-out',
'easeInOut': 'ease-in-out',
'spring': 'cubic-bezier(0.68, -0.55, 0.265, 1.55)'
},
'keyframes': {
'fadeIn': {
'from': {'opacity': 0},
'to': {'opacity': 1}
},
'slideUp': {
'from': {'transform': 'translateY(10px)', 'opacity': 0},
'to': {'transform': 'translateY(0)', 'opacity': 1}
},
'scale': {
'from': {'transform': 'scale(0.95)'},
'to': {'transform': 'scale(1)'}
}
}
}
def generate_breakpoints(self) -> Dict:
"""Generate responsive breakpoints"""
return {
'xs': '480px',
'sm': '640px',
'md': '768px',
'lg': '1024px',
'xl': '1280px',
'2xl': '1536px'
}
def generate_z_index_scale(self) -> Dict:
"""Generate z-index scale"""
return {
'hide': -1,
'base': 0,
'dropdown': 1000,
'sticky': 1020,
'overlay': 1030,
'modal': 1040,
'popover': 1050,
'tooltip': 1060,
'notification': 1070
}
def export_tokens(self, tokens: Dict, format: str = 'json') -> str:
"""Export tokens in various formats"""
if format == 'json':
return json.dumps(tokens, indent=2)
elif format == 'css':
return self._export_as_css(tokens)
elif format == 'scss':
return self._export_as_scss(tokens)
else:
return json.dumps(tokens, indent=2)
def _export_as_css(self, tokens: Dict) -> str:
"""Export as CSS variables"""
css = [':root {']
def flatten_dict(obj, prefix=''):
for key, value in obj.items():
if isinstance(value, dict):
flatten_dict(value, f'{prefix}-{key}' if prefix else key)
else:
css.append(f' --{prefix}-{key}: {value};')
flatten_dict(tokens)
css.append('}')
return '\n'.join(css)
def _hex_to_rgb(self, hex_color: str) -> Tuple[int, int, int]:
"""Convert hex to RGB"""
hex_color = hex_color.lstrip('#')
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
def _rgb_to_hex(self, rgb: List[int]) -> str:
"""Convert RGB to hex"""
return '#{:02x}{:02x}{:02x}'.format(*rgb)
def _adjust_hue(self, hex_color: str, degrees: int) -> str:
"""Adjust hue of color"""
rgb = self._hex_to_rgb(hex_color)
h, s, v = colorsys.rgb_to_hsv(*[c/255 for c in rgb])
h = (h + degrees/360) % 1
new_rgb = colorsys.hsv_to_rgb(h, s, v)
return self._rgb_to_hex([int(c * 255) for c in new_rgb])
def main():
import sys
generator = DesignTokenGenerator()
# Get parameters
brand_color = sys.argv[1] if len(sys.argv) > 1 else "#0066CC"
style = sys.argv[2] if len(sys.argv) > 2 else "modern"
output_format = sys.argv[3] if len(sys.argv) > 3 else "json"
# Generate tokens
tokens = generator.generate_complete_system(brand_color, style)
# Output
if output_format == 'summary':
print("=" * 60)
print("DESIGN SYSTEM TOKENS")
print("=" * 60)
print(f"\nπ¨ Style: {style}")
print(f"π¨ Brand Color: {brand_color}")
print("\nπ Generated Tokens:")
print(f" β’ Colors: {len(tokens['colors'])} palettes")
print(f" β’ Typography: {len(tokens['typography'])} categories")
print(f" β’ Spacing: {len(tokens['spacing'])} values")
print(f" β’ Shadows: {len(tokens['shadows'])} styles")
print(f" β’ Breakpoints: {len(tokens['breakpoints'])} sizes")
print("\nπΎ Export formats available: json, css, scss")
else:
print(generator.export_tokens(tokens, output_format))
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/creative-design/ui-design-system/scripts/design_token_generator.py",
"license": "MIT License",
"lines": 465,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/creative-design/ux-researcher-designer/scripts/persona_generator.py | #!/usr/bin/env python3
"""
Data-Driven Persona Generator
Creates research-backed user personas from user data and interviews
"""
import json
from typing import Dict, List, Tuple
from collections import Counter, defaultdict
import random
class PersonaGenerator:
"""Generate data-driven personas from user research"""
def __init__(self):
self.persona_components = {
'demographics': ['age', 'location', 'occupation', 'education', 'income'],
'psychographics': ['goals', 'frustrations', 'motivations', 'values'],
'behaviors': ['tech_savviness', 'usage_frequency', 'preferred_devices', 'key_activities'],
'needs': ['functional', 'emotional', 'social']
}
self.archetype_templates = {
'power_user': {
'characteristics': ['tech-savvy', 'frequent user', 'early adopter', 'efficiency-focused'],
'goals': ['maximize productivity', 'automate workflows', 'access advanced features'],
'frustrations': ['slow performance', 'limited customization', 'lack of shortcuts'],
'quote': "I need tools that can keep up with my workflow"
},
'casual_user': {
'characteristics': ['occasional user', 'basic needs', 'prefers simplicity'],
'goals': ['accomplish specific tasks', 'easy to use', 'minimal learning curve'],
'frustrations': ['complexity', 'too many options', 'unclear navigation'],
'quote': "I just want it to work without having to think about it"
},
'business_user': {
'characteristics': ['professional context', 'ROI-focused', 'team collaboration'],
'goals': ['improve team efficiency', 'track metrics', 'integrate with tools'],
'frustrations': ['lack of reporting', 'poor collaboration features', 'no enterprise features'],
'quote': "I need to show clear value to my stakeholders"
},
'mobile_first': {
'characteristics': ['primarily mobile', 'on-the-go usage', 'quick interactions'],
'goals': ['access anywhere', 'quick actions', 'offline capability'],
'frustrations': ['poor mobile experience', 'desktop-only features', 'slow loading'],
'quote': "My phone is my primary computing device"
}
}
def generate_persona_from_data(self, user_data: List[Dict],
interview_insights: List[Dict] = None) -> Dict:
"""Generate persona from user data and optional interview insights"""
# Analyze user data for patterns
patterns = self._analyze_user_patterns(user_data)
# Identify persona archetype
archetype = self._identify_archetype(patterns)
# Generate persona
persona = {
'name': self._generate_name(archetype),
'archetype': archetype,
'tagline': self._generate_tagline(patterns),
'demographics': self._aggregate_demographics(user_data),
'psychographics': self._extract_psychographics(patterns, interview_insights),
'behaviors': self._analyze_behaviors(user_data),
'needs_and_goals': self._identify_needs(patterns, interview_insights),
'frustrations': self._extract_frustrations(patterns, interview_insights),
'scenarios': self._generate_scenarios(archetype, patterns),
'quote': self._select_quote(interview_insights, archetype),
'data_points': self._calculate_data_points(user_data),
'design_implications': self._derive_design_implications(patterns)
}
return persona
def _analyze_user_patterns(self, user_data: List[Dict]) -> Dict:
"""Analyze patterns in user data"""
patterns = {
'usage_frequency': defaultdict(int),
'feature_usage': defaultdict(int),
'devices': defaultdict(int),
'contexts': defaultdict(int),
'pain_points': [],
'success_metrics': []
}
for user in user_data:
# Frequency patterns
freq = user.get('usage_frequency', 'medium')
patterns['usage_frequency'][freq] += 1
# Feature usage
for feature in user.get('features_used', []):
patterns['feature_usage'][feature] += 1
# Device patterns
device = user.get('primary_device', 'desktop')
patterns['devices'][device] += 1
# Context patterns
context = user.get('usage_context', 'work')
patterns['contexts'][context] += 1
# Pain points
if 'pain_points' in user:
patterns['pain_points'].extend(user['pain_points'])
return patterns
def _identify_archetype(self, patterns: Dict) -> str:
"""Identify persona archetype based on patterns"""
# Simple heuristic-based archetype identification
freq_pattern = max(patterns['usage_frequency'].items(), key=lambda x: x[1])[0] if patterns['usage_frequency'] else 'medium'
device_pattern = max(patterns['devices'].items(), key=lambda x: x[1])[0] if patterns['devices'] else 'desktop'
if freq_pattern == 'daily' and len(patterns['feature_usage']) > 10:
return 'power_user'
elif device_pattern in ['mobile', 'tablet']:
return 'mobile_first'
elif patterns['contexts'].get('work', 0) > patterns['contexts'].get('personal', 0):
return 'business_user'
else:
return 'casual_user'
def _generate_name(self, archetype: str) -> str:
"""Generate persona name based on archetype"""
names = {
'power_user': ['Alex', 'Sam', 'Jordan', 'Morgan'],
'casual_user': ['Pat', 'Jamie', 'Casey', 'Riley'],
'business_user': ['Taylor', 'Cameron', 'Avery', 'Blake'],
'mobile_first': ['Quinn', 'Skylar', 'River', 'Sage']
}
name_pool = names.get(archetype, names['casual_user'])
first_name = random.choice(name_pool)
roles = {
'power_user': 'the Power User',
'casual_user': 'the Casual User',
'business_user': 'the Business Professional',
'mobile_first': 'the Mobile Native'
}
return f"{first_name} {roles[archetype]}"
def _generate_tagline(self, patterns: Dict) -> str:
"""Generate persona tagline"""
freq = max(patterns['usage_frequency'].items(), key=lambda x: x[1])[0] if patterns['usage_frequency'] else 'regular'
context = max(patterns['contexts'].items(), key=lambda x: x[1])[0] if patterns['contexts'] else 'general'
return f"A {freq} user who primarily uses the product for {context} purposes"
def _aggregate_demographics(self, user_data: List[Dict]) -> Dict:
"""Aggregate demographic information"""
demographics = {
'age_range': '',
'location_type': '',
'occupation_category': '',
'education_level': '',
'tech_proficiency': ''
}
if not user_data:
return demographics
# Age range
ages = [u.get('age', 30) for u in user_data if 'age' in u]
if ages:
avg_age = sum(ages) / len(ages)
if avg_age < 25:
demographics['age_range'] = '18-24'
elif avg_age < 35:
demographics['age_range'] = '25-34'
elif avg_age < 45:
demographics['age_range'] = '35-44'
else:
demographics['age_range'] = '45+'
# Location type
locations = [u.get('location_type', 'urban') for u in user_data if 'location_type' in u]
if locations:
demographics['location_type'] = Counter(locations).most_common(1)[0][0]
# Tech proficiency
tech_scores = [u.get('tech_proficiency', 5) for u in user_data if 'tech_proficiency' in u]
if tech_scores:
avg_tech = sum(tech_scores) / len(tech_scores)
if avg_tech < 3:
demographics['tech_proficiency'] = 'Beginner'
elif avg_tech < 7:
demographics['tech_proficiency'] = 'Intermediate'
else:
demographics['tech_proficiency'] = 'Advanced'
return demographics
def _extract_psychographics(self, patterns: Dict, interviews: List[Dict] = None) -> Dict:
"""Extract psychographic information"""
psychographics = {
'motivations': [],
'values': [],
'attitudes': [],
'lifestyle': ''
}
# Extract from patterns
if patterns['usage_frequency'].get('daily', 0) > 0:
psychographics['motivations'].append('Efficiency')
psychographics['values'].append('Time-saving')
if patterns['devices'].get('mobile', 0) > patterns['devices'].get('desktop', 0):
psychographics['lifestyle'] = 'On-the-go, mobile-first'
psychographics['values'].append('Flexibility')
# Extract from interviews if available
if interviews:
for interview in interviews:
if 'motivations' in interview:
psychographics['motivations'].extend(interview['motivations'])
if 'values' in interview:
psychographics['values'].extend(interview['values'])
# Deduplicate
psychographics['motivations'] = list(set(psychographics['motivations']))[:5]
psychographics['values'] = list(set(psychographics['values']))[:5]
return psychographics
def _analyze_behaviors(self, user_data: List[Dict]) -> Dict:
"""Analyze user behaviors"""
behaviors = {
'usage_patterns': [],
'feature_preferences': [],
'interaction_style': '',
'learning_preference': ''
}
if not user_data:
return behaviors
# Usage patterns
frequencies = [u.get('usage_frequency', 'medium') for u in user_data]
freq_counter = Counter(frequencies)
behaviors['usage_patterns'] = [f"{freq}: {count} users" for freq, count in freq_counter.most_common(3)]
# Feature preferences
all_features = []
for user in user_data:
all_features.extend(user.get('features_used', []))
feature_counter = Counter(all_features)
behaviors['feature_preferences'] = [feat for feat, count in feature_counter.most_common(5)]
# Interaction style
if len(behaviors['feature_preferences']) > 10:
behaviors['interaction_style'] = 'Exploratory - uses many features'
else:
behaviors['interaction_style'] = 'Focused - uses core features'
return behaviors
def _identify_needs(self, patterns: Dict, interviews: List[Dict] = None) -> Dict:
"""Identify user needs and goals"""
needs = {
'primary_goals': [],
'secondary_goals': [],
'functional_needs': [],
'emotional_needs': []
}
# Derive from usage patterns
if patterns['usage_frequency'].get('daily', 0) > 0:
needs['primary_goals'].append('Complete tasks efficiently')
needs['functional_needs'].append('Speed and performance')
if patterns['contexts'].get('work', 0) > 0:
needs['primary_goals'].append('Professional productivity')
needs['functional_needs'].append('Integration with work tools')
# Common emotional needs
needs['emotional_needs'] = [
'Feel confident using the product',
'Trust the system with data',
'Feel supported when issues arise'
]
# Extract from interviews
if interviews:
for interview in interviews:
if 'goals' in interview:
needs['primary_goals'].extend(interview['goals'][:2])
if 'needs' in interview:
needs['functional_needs'].extend(interview['needs'][:3])
return needs
def _extract_frustrations(self, patterns: Dict, interviews: List[Dict] = None) -> List[str]:
"""Extract user frustrations"""
frustrations = []
# Common frustrations from patterns
if patterns['pain_points']:
frustration_counter = Counter(patterns['pain_points'])
frustrations = [pain for pain, count in frustration_counter.most_common(5)]
# Add archetype-specific frustrations if not enough from data
if len(frustrations) < 3:
frustrations.extend([
'Slow loading times',
'Confusing navigation',
'Lack of mobile optimization'
])
return frustrations[:5]
def _generate_scenarios(self, archetype: str, patterns: Dict) -> List[Dict]:
"""Generate usage scenarios"""
scenarios = []
# Common scenarios based on archetype
scenario_templates = {
'power_user': [
{
'title': 'Bulk Processing',
'context': 'Monday morning, needs to process week\'s data',
'goal': 'Complete batch operations quickly',
'steps': ['Import data', 'Apply bulk actions', 'Export results'],
'pain_points': ['No keyboard shortcuts', 'Slow processing']
}
],
'casual_user': [
{
'title': 'Quick Task',
'context': 'Needs to complete single task',
'goal': 'Get in, complete task, get out',
'steps': ['Find feature', 'Complete task', 'Save/Exit'],
'pain_points': ['Can\'t find feature', 'Too many steps']
}
],
'business_user': [
{
'title': 'Team Collaboration',
'context': 'Working with team on project',
'goal': 'Share and collaborate efficiently',
'steps': ['Create content', 'Share with team', 'Track feedback'],
'pain_points': ['No real-time collaboration', 'Poor permission management']
}
],
'mobile_first': [
{
'title': 'On-the-Go Access',
'context': 'Commuting, needs quick access',
'goal': 'Complete task on mobile',
'steps': ['Open mobile app', 'Quick action', 'Sync with desktop'],
'pain_points': ['Feature parity issues', 'Poor mobile UX']
}
]
}
return scenario_templates.get(archetype, scenario_templates['casual_user'])
def _select_quote(self, interviews: List[Dict] = None, archetype: str = 'casual_user') -> str:
"""Select representative quote"""
if interviews:
# Try to find a real quote
for interview in interviews:
if 'quotes' in interview and interview['quotes']:
return interview['quotes'][0]
# Use archetype default
return self.archetype_templates[archetype]['quote']
def _calculate_data_points(self, user_data: List[Dict]) -> Dict:
"""Calculate supporting data points"""
return {
'sample_size': len(user_data),
'confidence_level': 'High' if len(user_data) > 50 else 'Medium' if len(user_data) > 20 else 'Low',
'last_updated': 'Current',
'validation_method': 'Quantitative analysis + Qualitative interviews'
}
def _derive_design_implications(self, patterns: Dict) -> List[str]:
"""Derive design implications from persona"""
implications = []
# Based on frequency
if patterns['usage_frequency'].get('daily', 0) > patterns['usage_frequency'].get('weekly', 0):
implications.append('Optimize for speed and efficiency')
implications.append('Provide keyboard shortcuts and power features')
else:
implications.append('Focus on discoverability and guidance')
implications.append('Simplify onboarding experience')
# Based on device
if patterns['devices'].get('mobile', 0) > 0:
implications.append('Mobile-first responsive design')
implications.append('Touch-optimized interactions')
# Based on context
if patterns['contexts'].get('work', 0) > patterns['contexts'].get('personal', 0):
implications.append('Professional visual design')
implications.append('Enterprise features (SSO, audit logs)')
return implications[:5]
def format_persona_output(self, persona: Dict) -> str:
"""Format persona for display"""
output = []
output.append("=" * 60)
output.append(f"PERSONA: {persona['name']}")
output.append("=" * 60)
output.append(f"\nπ {persona['tagline']}\n")
output.append(f"Archetype: {persona['archetype'].replace('_', ' ').title()}")
output.append(f"Quote: \"{persona['quote']}\"\n")
output.append("π€ Demographics:")
for key, value in persona['demographics'].items():
if value:
output.append(f" β’ {key.replace('_', ' ').title()}: {value}")
output.append("\nπ§ Psychographics:")
if persona['psychographics']['motivations']:
output.append(f" Motivations: {', '.join(persona['psychographics']['motivations'])}")
if persona['psychographics']['values']:
output.append(f" Values: {', '.join(persona['psychographics']['values'])}")
output.append("\nπ― Goals & Needs:")
for goal in persona['needs_and_goals'].get('primary_goals', [])[:3]:
output.append(f" β’ {goal}")
output.append("\nπ€ Frustrations:")
for frustration in persona['frustrations'][:3]:
output.append(f" β’ {frustration}")
output.append("\nπ Behaviors:")
for pref in persona['behaviors'].get('feature_preferences', [])[:3]:
output.append(f" β’ Frequently uses: {pref}")
output.append("\nπ‘ Design Implications:")
for implication in persona['design_implications']:
output.append(f" β {implication}")
output.append(f"\nπ Data: Based on {persona['data_points']['sample_size']} users")
output.append(f" Confidence: {persona['data_points']['confidence_level']}")
return "\n".join(output)
def create_sample_user_data():
"""Create sample user data for testing"""
return [
{
'user_id': f'user_{i}',
'age': 25 + (i % 30),
'usage_frequency': ['daily', 'weekly', 'monthly'][i % 3],
'features_used': ['dashboard', 'reports', 'settings', 'sharing', 'export'][:3 + (i % 3)],
'primary_device': ['desktop', 'mobile', 'tablet'][i % 3],
'usage_context': ['work', 'personal'][i % 2],
'tech_proficiency': 3 + (i % 7),
'pain_points': ['slow loading', 'confusing UI', 'missing features'][:(i % 3) + 1]
}
for i in range(30)
]
def main():
import sys
generator = PersonaGenerator()
# Create sample data
user_data = create_sample_user_data()
# Optional interview insights
interview_insights = [
{
'quotes': ["I need to see all my data in one place"],
'motivations': ['Efficiency', 'Control'],
'goals': ['Save time', 'Make better decisions']
}
]
# Generate persona
persona = generator.generate_persona_from_data(user_data, interview_insights)
# Output
if len(sys.argv) > 1 and sys.argv[1] == 'json':
print(json.dumps(persona, indent=2))
else:
print(generator.format_persona_output(persona))
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/creative-design/ux-researcher-designer/scripts/persona_generator.py",
"license": "MIT License",
"lines": 412,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/code-reviewer/scripts/code_quality_checker.py | #!/usr/bin/env python3
"""
Code Quality Checker
Automated tool for code reviewer tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class CodeQualityChecker:
"""Main class for code quality checker functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Code Quality Checker"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = CodeQualityChecker(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/code-reviewer/scripts/code_quality_checker.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/code-reviewer/scripts/pr_analyzer.py | #!/usr/bin/env python3
"""
Pr Analyzer
Automated tool for code reviewer tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class PrAnalyzer:
"""Main class for pr analyzer functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Pr Analyzer"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = PrAnalyzer(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/code-reviewer/scripts/pr_analyzer.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/code-reviewer/scripts/review_report_generator.py | #!/usr/bin/env python3
"""
Review Report Generator
Automated tool for code reviewer tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ReviewReportGenerator:
"""Main class for review report generator functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Review Report Generator"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ReviewReportGenerator(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/code-reviewer/scripts/review_report_generator.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-architect/scripts/architecture_diagram_generator.py | #!/usr/bin/env python3
"""
Architecture Diagram Generator
Automated tool for senior architect tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ArchitectureDiagramGenerator:
"""Main class for architecture diagram generator functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Architecture Diagram Generator"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ArchitectureDiagramGenerator(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-architect/scripts/architecture_diagram_generator.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-architect/scripts/dependency_analyzer.py | #!/usr/bin/env python3
"""
Dependency Analyzer
Automated tool for senior architect tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class DependencyAnalyzer:
"""Main class for dependency analyzer functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Dependency Analyzer"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = DependencyAnalyzer(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-architect/scripts/dependency_analyzer.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-architect/scripts/project_architect.py | #!/usr/bin/env python3
"""
Project Architect
Automated tool for senior architect tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ProjectArchitect:
"""Main class for project architect functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Project Architect"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ProjectArchitect(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-architect/scripts/project_architect.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-backend/scripts/api_load_tester.py | #!/usr/bin/env python3
"""
Api Load Tester
Automated tool for senior backend tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ApiLoadTester:
"""Main class for api load tester functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Api Load Tester"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ApiLoadTester(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-backend/scripts/api_load_tester.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-backend/scripts/api_scaffolder.py | #!/usr/bin/env python3
"""
Api Scaffolder
Automated tool for senior backend tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ApiScaffolder:
"""Main class for api scaffolder functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Api Scaffolder"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ApiScaffolder(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-backend/scripts/api_scaffolder.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-backend/scripts/database_migration_tool.py | #!/usr/bin/env python3
"""
Database Migration Tool
Automated tool for senior backend tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class DatabaseMigrationTool:
"""Main class for database migration tool functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Database Migration Tool"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = DatabaseMigrationTool(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-backend/scripts/database_migration_tool.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-computer-vision/scripts/dataset_pipeline_builder.py | #!/usr/bin/env python3
"""
Dataset Pipeline Builder
Production-grade tool for senior computer vision engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class DatasetPipelineBuilder:
"""Production-grade dataset pipeline builder"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Dataset Pipeline Builder"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = DatasetPipelineBuilder(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-computer-vision/scripts/dataset_pipeline_builder.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-computer-vision/scripts/inference_optimizer.py | #!/usr/bin/env python3
"""
Inference Optimizer
Production-grade tool for senior computer vision engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class InferenceOptimizer:
"""Production-grade inference optimizer"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Inference Optimizer"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = InferenceOptimizer(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-computer-vision/scripts/inference_optimizer.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-computer-vision/scripts/vision_model_trainer.py | #!/usr/bin/env python3
"""
Vision Model Trainer
Production-grade tool for senior computer vision engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class VisionModelTrainer:
"""Production-grade vision model trainer"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Vision Model Trainer"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = VisionModelTrainer(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-computer-vision/scripts/vision_model_trainer.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-data-engineer/scripts/data_quality_validator.py | #!/usr/bin/env python3
"""
Data Quality Validator
Production-grade tool for senior data engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class DataQualityValidator:
"""Production-grade data quality validator"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Data Quality Validator"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = DataQualityValidator(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-data-engineer/scripts/data_quality_validator.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-data-engineer/scripts/etl_performance_optimizer.py | #!/usr/bin/env python3
"""
Etl Performance Optimizer
Production-grade tool for senior data engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class EtlPerformanceOptimizer:
"""Production-grade etl performance optimizer"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Etl Performance Optimizer"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = EtlPerformanceOptimizer(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-data-engineer/scripts/etl_performance_optimizer.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-data-engineer/scripts/pipeline_orchestrator.py | #!/usr/bin/env python3
"""
Pipeline Orchestrator
Production-grade tool for senior data engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class PipelineOrchestrator:
"""Production-grade pipeline orchestrator"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Pipeline Orchestrator"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = PipelineOrchestrator(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-data-engineer/scripts/pipeline_orchestrator.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-data-scientist/scripts/experiment_designer.py | #!/usr/bin/env python3
"""
Experiment Designer
Production-grade tool for senior data scientist
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class ExperimentDesigner:
"""Production-grade experiment designer"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Experiment Designer"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = ExperimentDesigner(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-data-scientist/scripts/experiment_designer.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-data-scientist/scripts/feature_engineering_pipeline.py | #!/usr/bin/env python3
"""
Feature Engineering Pipeline
Production-grade tool for senior data scientist
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class FeatureEngineeringPipeline:
"""Production-grade feature engineering pipeline"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Feature Engineering Pipeline"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = FeatureEngineeringPipeline(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-data-scientist/scripts/feature_engineering_pipeline.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-data-scientist/scripts/model_evaluation_suite.py | #!/usr/bin/env python3
"""
Model Evaluation Suite
Production-grade tool for senior data scientist
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class ModelEvaluationSuite:
"""Production-grade model evaluation suite"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Model Evaluation Suite"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = ModelEvaluationSuite(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-data-scientist/scripts/model_evaluation_suite.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-devops/scripts/deployment_manager.py | #!/usr/bin/env python3
"""
Deployment Manager
Automated tool for senior devops tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class DeploymentManager:
"""Main class for deployment manager functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Deployment Manager"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = DeploymentManager(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-devops/scripts/deployment_manager.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-devops/scripts/pipeline_generator.py | #!/usr/bin/env python3
"""
Pipeline Generator
Automated tool for senior devops tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class PipelineGenerator:
"""Main class for pipeline generator functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Pipeline Generator"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = PipelineGenerator(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-devops/scripts/pipeline_generator.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-devops/scripts/terraform_scaffolder.py | #!/usr/bin/env python3
"""
Terraform Scaffolder
Automated tool for senior devops tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class TerraformScaffolder:
"""Main class for terraform scaffolder functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Terraform Scaffolder"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = TerraformScaffolder(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-devops/scripts/terraform_scaffolder.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-frontend/scripts/bundle_analyzer.py | #!/usr/bin/env python3
"""
Bundle Analyzer
Automated tool for senior frontend tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class BundleAnalyzer:
"""Main class for bundle analyzer functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Bundle Analyzer"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = BundleAnalyzer(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-frontend/scripts/bundle_analyzer.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-frontend/scripts/component_generator.py | #!/usr/bin/env python3
"""
Component Generator
Automated tool for senior frontend tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ComponentGenerator:
"""Main class for component generator functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Component Generator"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ComponentGenerator(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-frontend/scripts/component_generator.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-frontend/scripts/frontend_scaffolder.py | #!/usr/bin/env python3
"""
Frontend Scaffolder
Automated tool for senior frontend tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class FrontendScaffolder:
"""Main class for frontend scaffolder functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Frontend Scaffolder"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = FrontendScaffolder(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-frontend/scripts/frontend_scaffolder.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-fullstack/scripts/code_quality_analyzer.py | #!/usr/bin/env python3
"""
Code Quality Analyzer
Automated tool for senior fullstack tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class CodeQualityAnalyzer:
"""Main class for code quality analyzer functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Code Quality Analyzer"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = CodeQualityAnalyzer(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-fullstack/scripts/code_quality_analyzer.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-fullstack/scripts/fullstack_scaffolder.py | #!/usr/bin/env python3
"""
Fullstack Scaffolder
Automated tool for senior fullstack tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class FullstackScaffolder:
"""Main class for fullstack scaffolder functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Fullstack Scaffolder"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = FullstackScaffolder(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-fullstack/scripts/fullstack_scaffolder.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-fullstack/scripts/project_scaffolder.py | #!/usr/bin/env python3
"""
Project Scaffolder
Automated tool for senior fullstack tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ProjectScaffolder:
"""Main class for project scaffolder functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Project Scaffolder"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ProjectScaffolder(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-fullstack/scripts/project_scaffolder.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-ml-engineer/scripts/ml_monitoring_suite.py | #!/usr/bin/env python3
"""
Ml Monitoring Suite
Production-grade tool for senior ml/ai engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class MlMonitoringSuite:
"""Production-grade ml monitoring suite"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Ml Monitoring Suite"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = MlMonitoringSuite(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-ml-engineer/scripts/ml_monitoring_suite.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-ml-engineer/scripts/model_deployment_pipeline.py | #!/usr/bin/env python3
"""
Model Deployment Pipeline
Production-grade tool for senior ml/ai engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class ModelDeploymentPipeline:
"""Production-grade model deployment pipeline"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Model Deployment Pipeline"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = ModelDeploymentPipeline(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-ml-engineer/scripts/model_deployment_pipeline.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-ml-engineer/scripts/rag_system_builder.py | #!/usr/bin/env python3
"""
Rag System Builder
Production-grade tool for senior ml/ai engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class RagSystemBuilder:
"""Production-grade rag system builder"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Rag System Builder"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = RagSystemBuilder(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-ml-engineer/scripts/rag_system_builder.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-prompt-engineer/scripts/agent_orchestrator.py | #!/usr/bin/env python3
"""
Agent Orchestrator
Production-grade tool for senior prompt engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class AgentOrchestrator:
"""Production-grade agent orchestrator"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Agent Orchestrator"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = AgentOrchestrator(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-prompt-engineer/scripts/agent_orchestrator.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-prompt-engineer/scripts/prompt_optimizer.py | #!/usr/bin/env python3
"""
Prompt Optimizer
Production-grade tool for senior prompt engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class PromptOptimizer:
"""Production-grade prompt optimizer"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Prompt Optimizer"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = PromptOptimizer(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-prompt-engineer/scripts/prompt_optimizer.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-prompt-engineer/scripts/rag_evaluator.py | #!/usr/bin/env python3
"""
Rag Evaluator
Production-grade tool for senior prompt engineer
"""
import os
import sys
import json
import logging
import argparse
from pathlib import Path
from typing import Dict, List, Optional
from datetime import datetime
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class RagEvaluator:
"""Production-grade rag evaluator"""
def __init__(self, config: Dict):
self.config = config
self.results = {
'status': 'initialized',
'start_time': datetime.now().isoformat(),
'processed_items': 0
}
logger.info(f"Initialized {self.__class__.__name__}")
def validate_config(self) -> bool:
"""Validate configuration"""
logger.info("Validating configuration...")
# Add validation logic
logger.info("Configuration validated")
return True
def process(self) -> Dict:
"""Main processing logic"""
logger.info("Starting processing...")
try:
self.validate_config()
# Main processing
result = self._execute()
self.results['status'] = 'completed'
self.results['end_time'] = datetime.now().isoformat()
logger.info("Processing completed successfully")
return self.results
except Exception as e:
self.results['status'] = 'failed'
self.results['error'] = str(e)
logger.error(f"Processing failed: {e}")
raise
def _execute(self) -> Dict:
"""Execute main logic"""
# Implementation here
return {'success': True}
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Rag Evaluator"
)
parser.add_argument('--input', '-i', required=True, help='Input path')
parser.add_argument('--output', '-o', required=True, help='Output path')
parser.add_argument('--config', '-c', help='Configuration file')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose output')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
try:
config = {
'input': args.input,
'output': args.output
}
processor = RagEvaluator(config)
results = processor.process()
print(json.dumps(results, indent=2))
sys.exit(0)
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-prompt-engineer/scripts/rag_evaluator.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-qa/scripts/coverage_analyzer.py | #!/usr/bin/env python3
"""
Coverage Analyzer
Automated tool for senior qa tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class CoverageAnalyzer:
"""Main class for coverage analyzer functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Coverage Analyzer"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = CoverageAnalyzer(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-qa/scripts/coverage_analyzer.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-qa/scripts/e2e_test_scaffolder.py | #!/usr/bin/env python3
"""
E2E Test Scaffolder
Automated tool for senior qa tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class E2ETestScaffolder:
"""Main class for e2e test scaffolder functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="E2E Test Scaffolder"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = E2ETestScaffolder(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-qa/scripts/e2e_test_scaffolder.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-qa/scripts/test_suite_generator.py | #!/usr/bin/env python3
"""
Test Suite Generator
Automated tool for senior qa tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class TestSuiteGenerator:
"""Main class for test suite generator functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Test Suite Generator"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = TestSuiteGenerator(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-qa/scripts/test_suite_generator.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-secops/scripts/compliance_checker.py | #!/usr/bin/env python3
"""
Compliance Checker
Automated tool for senior secops tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ComplianceChecker:
"""Main class for compliance checker functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Compliance Checker"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ComplianceChecker(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-secops/scripts/compliance_checker.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-secops/scripts/security_scanner.py | #!/usr/bin/env python3
"""
Security Scanner
Automated tool for senior secops tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class SecurityScanner:
"""Main class for security scanner functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Security Scanner"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = SecurityScanner(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-secops/scripts/security_scanner.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-secops/scripts/vulnerability_assessor.py | #!/usr/bin/env python3
"""
Vulnerability Assessor
Automated tool for senior secops tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class VulnerabilityAssessor:
"""Main class for vulnerability assessor functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Vulnerability Assessor"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = VulnerabilityAssessor(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-secops/scripts/vulnerability_assessor.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-security/scripts/pentest_automator.py | #!/usr/bin/env python3
"""
Pentest Automator
Automated tool for senior security tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class PentestAutomator:
"""Main class for pentest automator functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Pentest Automator"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = PentestAutomator(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-security/scripts/pentest_automator.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-security/scripts/security_auditor.py | #!/usr/bin/env python3
"""
Security Auditor
Automated tool for senior security tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class SecurityAuditor:
"""Main class for security auditor functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Security Auditor"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = SecurityAuditor(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-security/scripts/security_auditor.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/development/senior-security/scripts/threat_modeler.py | #!/usr/bin/env python3
"""
Threat Modeler
Automated tool for senior security tasks
"""
import os
import sys
import json
import argparse
from pathlib import Path
from typing import Dict, List, Optional
class ThreatModeler:
"""Main class for threat modeler functionality"""
def __init__(self, target_path: str, verbose: bool = False):
self.target_path = Path(target_path)
self.verbose = verbose
self.results = {}
def run(self) -> Dict:
"""Execute the main functionality"""
print(f"π Running {self.__class__.__name__}...")
print(f"π Target: {self.target_path}")
try:
self.validate_target()
self.analyze()
self.generate_report()
print("β
Completed successfully!")
return self.results
except Exception as e:
print(f"β Error: {e}")
sys.exit(1)
def validate_target(self):
"""Validate the target path exists and is accessible"""
if not self.target_path.exists():
raise ValueError(f"Target path does not exist: {self.target_path}")
if self.verbose:
print(f"β Target validated: {self.target_path}")
def analyze(self):
"""Perform the main analysis or operation"""
if self.verbose:
print("π Analyzing...")
# Main logic here
self.results['status'] = 'success'
self.results['target'] = str(self.target_path)
self.results['findings'] = []
# Add analysis results
if self.verbose:
print(f"β Analysis complete: {len(self.results.get('findings', []))} findings")
def generate_report(self):
"""Generate and display the report"""
print("\n" + "="*50)
print("REPORT")
print("="*50)
print(f"Target: {self.results.get('target')}")
print(f"Status: {self.results.get('status')}")
print(f"Findings: {len(self.results.get('findings', []))}")
print("="*50 + "\n")
def main():
"""Main entry point"""
parser = argparse.ArgumentParser(
description="Threat Modeler"
)
parser.add_argument(
'target',
help='Target path to analyze or process'
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help='Enable verbose output'
)
parser.add_argument(
'--json',
action='store_true',
help='Output results as JSON'
)
parser.add_argument(
'--output', '-o',
help='Output file path'
)
args = parser.parse_args()
tool = ThreatModeler(
args.target,
verbose=args.verbose
)
results = tool.run()
if args.json:
output = json.dumps(results, indent=2)
if args.output:
with open(args.output, 'w') as f:
f.write(output)
print(f"Results written to {args.output}")
else:
print(output)
if __name__ == '__main__':
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/development/senior-security/scripts/threat_modeler.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/capa-officer/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for capa-officer
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for capa-officer")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/capa-officer/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/fda-consultant-specialist/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for fda-consultant-specialist
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for fda-consultant-specialist")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/fda-consultant-specialist/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/gdpr-dsgvo-expert/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for gdpr-dsgvo-expert
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for gdpr-dsgvo-expert")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/gdpr-dsgvo-expert/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/information-security-manager-iso27001/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for information-security-manager-iso27001
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for information-security-manager-iso27001")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/information-security-manager-iso27001/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/isms-audit-expert/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for isms-audit-expert
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for isms-audit-expert")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/isms-audit-expert/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/mdr-745-specialist/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for mdr-745-specialist
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for mdr-745-specialist")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/mdr-745-specialist/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.