sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/qms-audit-expert/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for qms-audit-expert
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for qms-audit-expert")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/qms-audit-expert/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/quality-documentation-manager/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for quality-documentation-manager
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for quality-documentation-manager")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/quality-documentation-manager/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/quality-manager-qmr/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for quality-manager-qmr
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for quality-manager-qmr")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/quality-manager-qmr/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/quality-manager-qms-iso13485/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for quality-manager-qms-iso13485
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for quality-manager-qms-iso13485")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/quality-manager-qms-iso13485/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/regulatory-affairs-head/scripts/regulatory_tracker.py | #!/usr/bin/env python3
"""
Regulatory Submission Tracking System
Automates monitoring and reporting of regulatory submission status
"""
import json
import datetime
from typing import Dict, List, Optional
from dataclasses import dataclass, asdict
from enum import Enum
class SubmissionType(Enum):
FDA_510K = "FDA_510K"
FDA_PMA = "FDA_PMA"
FDA_DE_NOVO = "FDA_DE_NOVO"
EU_MDR_CE = "EU_MDR_CE"
ISO_CERTIFICATION = "ISO_CERTIFICATION"
GLOBAL_REGULATORY = "GLOBAL_REGULATORY"
class SubmissionStatus(Enum):
PLANNING = "PLANNING"
IN_PREPARATION = "IN_PREPARATION"
SUBMITTED = "SUBMITTED"
UNDER_REVIEW = "UNDER_REVIEW"
ADDITIONAL_INFO_REQUESTED = "ADDITIONAL_INFO_REQUESTED"
APPROVED = "APPROVED"
REJECTED = "REJECTED"
WITHDRAWN = "WITHDRAWN"
@dataclass
class RegulatorySubmission:
submission_id: str
product_name: str
submission_type: SubmissionType
submission_status: SubmissionStatus
target_market: str
submission_date: Optional[datetime.date] = None
target_approval_date: Optional[datetime.date] = None
actual_approval_date: Optional[datetime.date] = None
regulatory_authority: str = ""
responsible_person: str = ""
notes: str = ""
last_updated: datetime.date = datetime.date.today()
class RegulatoryTracker:
def __init__(self, data_file: str = "regulatory_submissions.json"):
self.data_file = data_file
self.submissions: Dict[str, RegulatorySubmission] = {}
self.load_data()
def load_data(self):
"""Load existing submission data from JSON file"""
try:
with open(self.data_file, 'r') as f:
data = json.load(f)
for sub_id, sub_data in data.items():
# Convert date strings back to date objects
for date_field in ['submission_date', 'target_approval_date',
'actual_approval_date', 'last_updated']:
if sub_data.get(date_field):
sub_data[date_field] = datetime.datetime.strptime(
sub_data[date_field], '%Y-%m-%d').date()
# Convert enums
sub_data['submission_type'] = SubmissionType(sub_data['submission_type'])
sub_data['submission_status'] = SubmissionStatus(sub_data['submission_status'])
self.submissions[sub_id] = RegulatorySubmission(**sub_data)
except FileNotFoundError:
print(f"No existing data file found. Starting fresh.")
except Exception as e:
print(f"Error loading data: {e}")
def save_data(self):
"""Save submission data to JSON file"""
data = {}
for sub_id, submission in self.submissions.items():
sub_dict = asdict(submission)
# Convert date objects to strings
for date_field in ['submission_date', 'target_approval_date',
'actual_approval_date', 'last_updated']:
if sub_dict.get(date_field):
sub_dict[date_field] = sub_dict[date_field].strftime('%Y-%m-%d')
# Convert enums to strings
sub_dict['submission_type'] = sub_dict['submission_type'].value
sub_dict['submission_status'] = sub_dict['submission_status'].value
data[sub_id] = sub_dict
with open(self.data_file, 'w') as f:
json.dump(data, f, indent=2)
def add_submission(self, submission: RegulatorySubmission):
"""Add new regulatory submission"""
self.submissions[submission.submission_id] = submission
self.save_data()
print(f"Added submission: {submission.submission_id}")
def update_submission_status(self, submission_id: str,
new_status: SubmissionStatus,
notes: str = ""):
"""Update submission status"""
if submission_id in self.submissions:
self.submissions[submission_id].submission_status = new_status
self.submissions[submission_id].notes = notes
self.submissions[submission_id].last_updated = datetime.date.today()
self.save_data()
print(f"Updated {submission_id} status to {new_status.value}")
else:
print(f"Submission {submission_id} not found")
def get_submissions_by_status(self, status: SubmissionStatus) -> List[RegulatorySubmission]:
"""Get all submissions with specific status"""
return [sub for sub in self.submissions.values() if sub.submission_status == status]
def get_overdue_submissions(self) -> List[RegulatorySubmission]:
"""Get submissions that are overdue"""
today = datetime.date.today()
overdue = []
for submission in self.submissions.values():
if (submission.target_approval_date and
submission.target_approval_date < today and
submission.submission_status not in [SubmissionStatus.APPROVED,
SubmissionStatus.REJECTED,
SubmissionStatus.WITHDRAWN]):
overdue.append(submission)
return overdue
def generate_status_report(self) -> str:
"""Generate comprehensive status report"""
report = []
report.append("REGULATORY SUBMISSION STATUS REPORT")
report.append("=" * 50)
report.append(f"Generated: {datetime.date.today()}")
report.append("")
# Summary by status
status_counts = {}
for status in SubmissionStatus:
count = len(self.get_submissions_by_status(status))
if count > 0:
status_counts[status] = count
report.append("SUBMISSION STATUS SUMMARY:")
for status, count in status_counts.items():
report.append(f" {status.value}: {count}")
report.append("")
# Overdue submissions
overdue = self.get_overdue_submissions()
if overdue:
report.append("OVERDUE SUBMISSIONS:")
for submission in overdue:
days_overdue = (datetime.date.today() - submission.target_approval_date).days
report.append(f" {submission.submission_id} - {days_overdue} days overdue")
report.append("")
# Active submissions requiring attention
active_statuses = [SubmissionStatus.SUBMITTED, SubmissionStatus.UNDER_REVIEW,
SubmissionStatus.ADDITIONAL_INFO_REQUESTED]
active_submissions = []
for status in active_statuses:
active_submissions.extend(self.get_submissions_by_status(status))
if active_submissions:
report.append("ACTIVE SUBMISSIONS REQUIRING ATTENTION:")
for submission in active_submissions:
report.append(f" {submission.submission_id} - {submission.product_name}")
report.append(f" Status: {submission.submission_status.value}")
report.append(f" Target Date: {submission.target_approval_date}")
report.append(f" Authority: {submission.regulatory_authority}")
report.append("")
return "\n".join(report)
def main():
"""Main function for command-line usage"""
tracker = RegulatoryTracker()
# Generate and print status report
print(tracker.generate_status_report())
# Example: Add a new submission
# new_submission = RegulatorySubmission(
# submission_id="SUB-2024-001",
# product_name="HealthTech Device X",
# submission_type=SubmissionType.FDA_510K,
# submission_status=SubmissionStatus.PLANNING,
# target_market="United States",
# target_approval_date=datetime.date(2024, 12, 31),
# regulatory_authority="FDA",
# responsible_person="John Doe"
# )
# tracker.add_submission(new_submission)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/regulatory-affairs-head/scripts/regulatory_tracker.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/skills/enterprise-communication/risk-management-specialist/scripts/example.py | #!/usr/bin/env python3
"""
Example helper script for risk-management-specialist
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for risk-management-specialist")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/skills/enterprise-communication/risk-management-specialist/scripts/example.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
davila7/claude-code-templates:cli-tool/components/hooks/git/conventional-commits.py | #!/usr/bin/env python3
import json
import sys
import re
try:
input_data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
command = tool_input.get("command", "")
# Only validate git commit commands
if tool_name != "Bash" or "git commit" not in command:
sys.exit(0)
# Extract commit message from -m flag
# Handle both -m "message" and -m 'message' formats
match = re.search(r'git commit.*?-m\s+["\']([^"\']+)["\']', command)
if not match:
# Also try heredoc format: -m "$(cat <<'EOF' ... EOF)"
heredoc_match = re.search(r'git commit.*?-m\s+"?\$\(cat\s+<<["\']?EOF["\']?\s*\n(.+?)\nEOF', command, re.DOTALL)
if heredoc_match:
commit_msg = heredoc_match.group(1).strip()
else:
sys.exit(0) # Can't extract message, allow it
else:
commit_msg = match.group(1)
# Check if message follows Conventional Commits format
# Format: type(scope)?: description
# Types: feat, fix, docs, style, refactor, perf, test, chore, ci, build, revert
conventional_pattern = r'^(feat|fix|docs|style|refactor|perf|test|chore|ci|build|revert)(\(.+\))?:\s.+'
if not re.match(conventional_pattern, commit_msg):
reason = f"""β Invalid commit message format
Your message: {commit_msg}
Commit messages must follow Conventional Commits:
type(scope): description
Types:
feat: New feature
fix: Bug fix
docs: Documentation changes
style: Code style changes (formatting)
refactor: Code refactoring
perf: Performance improvements
test: Adding or updating tests
chore: Maintenance tasks
ci: CI/CD changes
build: Build system changes
revert: Revert previous commit
Examples:
β
feat: add user authentication
β
feat(auth): implement JWT tokens
β
fix: resolve memory leak in parser
β
fix(api): handle null responses
β
docs: update API documentation
Invalid:
β Added new feature (no type)
β feat:add feature (missing space after colon)
β feature: add login (wrong type, use 'feat')
π‘ Tip: Start your message with one of the types above followed by a colon and space."""
output = {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "deny",
"permissionDecisionReason": reason
}
}
print(json.dumps(output))
sys.exit(0)
# Allow the command
sys.exit(0)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/hooks/git/conventional-commits.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/hooks/git/prevent-direct-push.py | #!/usr/bin/env python3
import json
import sys
import subprocess
try:
input_data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
command = tool_input.get("command", "")
# Only validate git push commands
if tool_name != "Bash" or "git push" not in command:
sys.exit(0)
# Get current branch
try:
current_branch = subprocess.check_output(
["git", "branch", "--show-current"],
stderr=subprocess.DEVNULL,
text=True
).strip()
except:
current_branch = ""
# Check if pushing to main or develop
push_cmd = command
is_force_push = "--force" in push_cmd or "-f" in push_cmd
# Check if command or current branch targets protected branches
targets_protected = (
"origin main" in push_cmd or
"origin develop" in push_cmd or
current_branch in ["main", "develop"]
)
# Block direct push to main/develop (unless force push which is already dangerous)
if targets_protected and not is_force_push:
if current_branch in ["main", "develop"] or "origin main" in push_cmd or "origin develop" in push_cmd:
reason = f"""β Direct push to main/develop is not allowed!
Protected branches:
- main (production)
- develop (integration)
Git Flow workflow:
1. Create a feature branch:
/feature <name>
2. Make your changes and commit
3. Push feature branch:
git push origin feature/<name>
4. Create pull request:
gh pr create
5. After approval, merge with:
/finish
For releases:
/release <version> β PR β /finish
For hotfixes:
/hotfix <name> β PR β /finish
Current branch: {current_branch}
π‘ Use feature/release/hotfix branches instead of pushing directly to main/develop."""
output = {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "deny",
"permissionDecisionReason": reason
}
}
print(json.dumps(output))
sys.exit(0)
# Allow the command
sys.exit(0)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/hooks/git/prevent-direct-push.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/hooks/git/validate-branch-name.py | #!/usr/bin/env python3
import json
import sys
import re
try:
input_data = json.load(sys.stdin)
except json.JSONDecodeError as e:
print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
sys.exit(1)
tool_name = input_data.get("tool_name", "")
tool_input = input_data.get("tool_input", {})
command = tool_input.get("command", "")
# Only validate git checkout -b commands
if tool_name != "Bash" or "git checkout -b" not in command:
sys.exit(0)
# Extract branch name
match = re.search(r'git checkout -b\s+([^\s]+)', command)
if not match:
sys.exit(0)
branch_name = match.group(1)
# Allow main and develop branches
if branch_name in ["main", "develop"]:
sys.exit(0)
# Validate Git Flow naming convention
if not re.match(r'^(feature|release|hotfix)/', branch_name):
reason = f"""β Invalid Git Flow branch name: {branch_name}
Git Flow branches must follow these patterns:
β’ feature/<descriptive-name>
β’ release/v<MAJOR>.<MINOR>.<PATCH>
β’ hotfix/<descriptive-name>
Examples:
β
feature/user-authentication
β
release/v1.2.0
β
hotfix/critical-security-fix
Invalid:
β {branch_name} (missing Git Flow prefix)
β feat/something (use 'feature/' not 'feat/')
β fix/bug (use 'hotfix/' not 'fix/')
π‘ Use Git Flow commands instead:
/feature <name> - Create feature branch
/release <version> - Create release branch
/hotfix <name> - Create hotfix branch"""
output = {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "deny",
"permissionDecisionReason": reason
}
}
print(json.dumps(output))
sys.exit(0)
# Validate release version format
if branch_name.startswith("release/"):
if not re.match(r'^release/v\d+\.\d+\.\d+(-[a-zA-Z0-9.]+)?$', branch_name):
reason = f"""β Invalid release version: {branch_name}
Release branches must follow semantic versioning:
release/vMAJOR.MINOR.PATCH[-prerelease]
Valid examples:
β
release/v1.0.0
β
release/v2.1.3
β
release/v1.0.0-beta.1
Invalid:
β release/1.0.0 (missing 'v' prefix)
β release/v1.0 (incomplete version)
β {branch_name}
π‘ Use: /release v1.2.0"""
output = {
"hookSpecificOutput": {
"hookEventName": "PreToolUse",
"permissionDecision": "deny",
"permissionDecisionReason": reason
}
}
print(json.dumps(output))
sys.exit(0)
# Allow the command
sys.exit(0)
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/hooks/git/validate-branch-name.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
davila7/claude-code-templates:cli-tool/components/sandbox/e2b/e2b-launcher.py | #!/usr/bin/env python3.11
"""
E2B Claude Code Sandbox Launcher
Executes Claude Code prompts in isolated E2B cloud sandbox
"""
import os
import sys
import json
import datetime
import re
import threading
import time
# Debug: Print Python path information
print(f"Python executable: {sys.executable}")
print(f"Python version: {sys.version}")
print(f"Python path: {sys.path[:3]}...") # Show first 3 paths
try:
from e2b import Sandbox
print("β E2B imported successfully")
except ImportError as e:
print(f"β E2B import failed: {e}")
print("Trying to install E2B...")
import subprocess
# Try different installation methods for different Python environments
install_commands = [
[sys.executable, '-m', 'pip', 'install', '--user', 'e2b'], # User install first
[sys.executable, '-m', 'pip', 'install', '--break-system-packages', 'e2b'], # System packages
[sys.executable, '-m', 'pip', 'install', 'e2b'] # Default fallback
]
result = None
for cmd in install_commands:
print(f"Trying: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode == 0:
print("β Installation successful")
break
else:
print(f"β Failed: {result.stderr.strip()[:100]}...")
if result is None:
result = subprocess.run([sys.executable, '-m', 'pip', 'install', 'e2b'],
capture_output=True, text=True)
print(f"Install result: {result.returncode}")
if result.stdout:
print(f"Install stdout: {result.stdout}")
if result.stderr:
print(f"Install stderr: {result.stderr}")
# Try importing again
try:
from e2b import Sandbox
print("β E2B imported successfully after install")
except ImportError as e2:
print(f"β E2B still failed after install: {e2}")
sys.exit(1)
# Try to import and use dotenv if available, but don't fail if it's not
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
# dotenv is optional since we can get keys from command line arguments
pass
def main():
# Parse command line arguments
if len(sys.argv) < 2:
print("Usage: python e2b-launcher.py <prompt> [components_to_install] [e2b_api_key] [anthropic_api_key]")
sys.exit(1)
prompt = sys.argv[1]
components_to_install = sys.argv[2] if len(sys.argv) > 2 else ""
# Get API keys from command line arguments or environment variables
e2b_api_key = sys.argv[3] if len(sys.argv) > 3 else os.getenv('E2B_API_KEY')
anthropic_api_key = sys.argv[4] if len(sys.argv) > 4 else os.getenv('ANTHROPIC_API_KEY')
if not e2b_api_key:
print("Error: E2B API key is required")
print("Provide via command line argument or E2B_API_KEY environment variable")
sys.exit(1)
if not anthropic_api_key:
print("Error: Anthropic API key is required")
print("Provide via command line argument or ANTHROPIC_API_KEY environment variable")
sys.exit(1)
try:
# Create E2B sandbox with Claude Code template with retry logic
print("π Creating E2B sandbox with Claude Code...")
# Try creating sandbox with retries for WebSocket issues
max_retries = 3
retry_count = 0
sbx = None
while retry_count < max_retries and sbx is None:
try:
if retry_count > 0:
print(f"π Retry {retry_count}/{max_retries - 1} - WebSocket connection...")
sbx = Sandbox.create(
template="anthropic-claude-code",
api_key=e2b_api_key,
envs={
'ANTHROPIC_API_KEY': anthropic_api_key,
},
timeout=600, # 10 minutes timeout for longer operations
)
# Keep sandbox alive during operations
print(f"π Extending sandbox timeout to prevent early termination...")
sbx.set_timeout(900) # 15 minutes total
print(f"β
Sandbox created: {sbx.sandbox_id}")
break
except Exception as e:
error_msg = str(e).lower()
if "websocket" in error_msg or "connection" in error_msg or "timeout" in error_msg:
retry_count += 1
if retry_count < max_retries:
print(f"β οΈ WebSocket connection failed (attempt {retry_count}), retrying in 3 seconds...")
time.sleep(3)
continue
else:
print(f"β WebSocket connection failed after {max_retries} attempts")
print("π‘ This might be due to:")
print(" β’ Network/firewall restrictions blocking WebSocket connections")
print(" β’ Temporary E2B service issues")
print(" β’ Corporate proxy blocking WebSocket traffic")
print("π‘ Try:")
print(" β’ Running from a different network")
print(" β’ Checking your firewall/proxy settings")
print(" β’ Waiting a few minutes and trying again")
raise e
else:
# Non-WebSocket error, don't retry
raise e
if sbx is None:
raise Exception("Failed to create sandbox after all retry attempts")
# Install components if specified
if components_to_install:
print("π¦ Installing specified components...")
install_result = sbx.commands.run(
f"npx claude-code-templates@latest {components_to_install}",
timeout=120, # 2 minutes for component installation
)
if install_result.exit_code != 0:
print(f"β οΈ Component installation warnings:")
print(install_result.stderr)
else:
print("β
Components installed successfully")
# Build enhanced prompt with instructions
# Parse components to extract agents
agents = []
if components_to_install:
# Split by '--' to get individual component types
parts = components_to_install.split('--')
for part in parts:
part = part.strip()
if part.startswith('agent '):
# Extract agent names after 'agent ' prefix
agent_names = part[6:].strip() # Remove 'agent ' prefix
if agent_names:
# Split by comma if multiple agents
agents.extend([a.strip() for a in agent_names.split(',')])
# Create enhanced prompt with proper instructions
if agents:
agent_list = ', '.join(agents)
enhanced_prompt = f"""You are Claude Code, an AI assistant specialized in software development.
IMPORTANT INSTRUCTIONS:
1. Execute the user's request immediately and create the requested code/files
2. You have access to the following specialized agents: {agent_list}
3. Use these agents in the order you deem most appropriate for completing the task
4. Generate all necessary files and code to fulfill the request
5. Be proactive and create a complete, working implementation
USER REQUEST: {prompt}
Now, please execute this request and create all necessary files."""
else:
enhanced_prompt = f"""You are Claude Code, an AI assistant specialized in software development.
IMPORTANT INSTRUCTIONS:
1. Execute the user's request immediately and create the requested code/files
2. Generate all necessary files and code to fulfill the request
3. Be proactive and create a complete, working implementation
4. Don't just acknowledge the request - actually create the implementation
USER REQUEST: {prompt}
Now, please execute this request and create all necessary files."""
# Execute Claude Code with the enhanced prompt
print(f"π€ Executing Claude Code with prompt: '{prompt[:50]}{'...' if len(prompt) > 50 else ''}'")
if agents:
print(f"π€ Using agents: {', '.join(agents)}")
# First, check if Claude Code is installed and available
print("π Checking Claude Code installation...")
check_result = sbx.commands.run("which claude", timeout=10)
if check_result.exit_code == 0:
print(f"β
Claude found at: {check_result.stdout.strip()}")
else:
print("β Claude not found, checking PATH...")
path_result = sbx.commands.run("echo $PATH", timeout=5)
print(f"PATH: {path_result.stdout}")
ls_result = sbx.commands.run("ls -la /usr/local/bin/ | grep claude", timeout=5)
print(f"Claude binaries: {ls_result.stdout}")
# Check current directory and permissions
print("π Checking sandbox environment...")
pwd_result = sbx.commands.run("pwd", timeout=5)
print(f"Current directory: {pwd_result.stdout.strip()}")
whoami_result = sbx.commands.run("whoami", timeout=5)
print(f"Current user: {whoami_result.stdout.strip()}")
# Check if we can write to current directory
test_write = sbx.commands.run("touch test_write.tmp && rm test_write.tmp", timeout=5)
if test_write.exit_code == 0:
print("β
Write permissions OK")
else:
print("β Write permission issue")
# Build Claude Code command with enhanced prompt and better error handling
# Escape single quotes in the enhanced prompt
escaped_prompt = enhanced_prompt.replace("'", "'\\''")
claude_command = f"echo '{escaped_prompt}' | claude -p --dangerously-skip-permissions"
# Show the original user prompt in the command display (not the enhanced version)
display_prompt = prompt[:100] + '...' if len(prompt) > 100 else prompt
print(f"π Running command: echo '{display_prompt}' | claude -p --dangerously-skip-permissions")
# Show loading message with visual separation
print("")
print("=" * 60)
print("βοΈ EXECUTING CLAUDE CODE IN SECURE CLOUD SANDBOX")
print("=" * 60)
print("")
print(" β³ Starting execution...")
print(" π Isolated E2B environment active")
print(" π‘ Streaming real-time output below:")
print("")
print("-" * 60)
print("π LIVE OUTPUT:")
print("-" * 60)
# Collect output for later use
stdout_buffer = []
stderr_buffer = []
# Track if we've received any output and last activity time
has_output = [False] # Use list to allow modification in nested function
last_activity = [time.time()]
execution_complete = [False]
# Progress indicator thread
def show_progress():
"""Show periodic progress updates if no output for a while"""
progress_messages = [
"β³ Still processing...",
"π Claude Code is working on your request...",
"βοΈ Analyzing requirements...",
"π οΈ Building solution...",
"π Generating code...",
"π Reviewing implementation..."
]
message_index = 0
while not execution_complete[0]:
time.sleep(5) # Check every 5 seconds
# If no activity for 10 seconds and no output yet
if not has_output[0] and (time.time() - last_activity[0]) > 10:
print(f"\n {progress_messages[message_index % len(progress_messages)]}")
message_index += 1
last_activity[0] = time.time()
# Start progress thread
progress_thread = threading.Thread(target=show_progress, daemon=True)
progress_thread.start()
# Define callbacks for streaming output
def on_stdout(data):
"""Handle stdout output in real-time"""
if data:
# Mark that we've received output and update activity time
if not has_output[0]:
has_output[0] = True
print("\nπ― Claude Code started responding:\n")
last_activity[0] = time.time()
# Print the data as it comes
print(data, end='', flush=True)
stdout_buffer.append(data)
def on_stderr(data):
"""Handle stderr output in real-time"""
if data:
# Mark that we've received output and update activity time
if not has_output[0]:
has_output[0] = True
print("\nπ― Claude Code started responding:\n")
last_activity[0] = time.time()
# Print stderr with warning prefix
if data.strip():
print(f"β οΈ {data}", end='', flush=True)
stderr_buffer.append(data)
# Execute with streaming output and extended timeout
try:
result = sbx.commands.run(
claude_command,
timeout=600, # 10 minutes timeout for complex operations
on_stdout=on_stdout,
on_stderr=on_stderr
)
finally:
# Mark execution as complete to stop progress thread
execution_complete[0] = True
# Join collected output
full_stdout = ''.join(stdout_buffer)
full_stderr = ''.join(stderr_buffer)
# Print execution summary
print("")
print("-" * 60)
print(f"π Command exit code: {result.exit_code}")
# Since we already streamed the output, just show summary
if full_stdout:
print(f"π€ Total stdout: {len(full_stdout)} characters")
if full_stderr:
print(f"β οΈ Total stderr: {len(full_stderr)} characters")
# List generated files
print("=" * 60)
print("π GENERATED FILES:")
print("=" * 60)
# More comprehensive file search - include jsx, tsx, and other common extensions
files_result = sbx.commands.run("""find . -type f \\( \
-name '*.html' -o -name '*.js' -o -name '*.jsx' -o \
-name '*.ts' -o -name '*.tsx' -o \
-name '*.css' -o -name '*.scss' -o -name '*.sass' -o \
-name '*.py' -o -name '*.json' -o -name '*.md' -o \
-name '*.vue' -o -name '*.svelte' -o \
-name '*.yaml' -o -name '*.yml' -o \
-name '*.xml' -o -name '*.txt' -o \
-name '*.env' -o -name '*.env.example' -o \
-name '*.sh' -o -name '*.bash' -o \
-name '*.go' -o -name '*.rs' -o -name '*.java' -o \
-name '*.php' -o -name '*.rb' -o -name '*.swift' \
\\) ! -path '*/.npm/*' ! -path '*/.claude/*' ! -path '*/node_modules/*' | head -50""")
if files_result.stdout.strip():
print(files_result.stdout)
# Download important files to local machine
print("\n" + "=" * 60)
print("πΎ DOWNLOADING FILES TO LOCAL MACHINE:")
print("=" * 60)
# Create project directory with sandbox ID in current working directory
project_dir = f"sandbox-{sbx.sandbox_id[:8]}" # Use first 8 chars of sandbox ID
local_output_dir = os.path.join(os.getcwd(), project_dir) # Use current working directory
# Ensure the project directory exists
os.makedirs(local_output_dir, exist_ok=True)
print(f"π Downloading files to project directory: {local_output_dir}")
print(f"π Current working directory: {os.getcwd()}")
files_to_download = files_result.stdout.strip().split('\n')
for file_path in files_to_download:
file_path = file_path.strip()
if file_path and not file_path.startswith('./.npm/'): # Skip npm cache files
try:
# Read file content from sandbox
file_content = sbx.commands.run(f"cat '{file_path}'", timeout=30)
if file_content.exit_code == 0:
# Preserve directory structure by removing leading ./
relative_path = file_path.lstrip('./')
local_file = os.path.join(local_output_dir, relative_path)
# Create directory structure if needed
os.makedirs(os.path.dirname(local_file), exist_ok=True)
# Write file locally
with open(local_file, 'w', encoding='utf-8') as f:
f.write(file_content.stdout)
print(f"β
Downloaded: {file_path} β {local_file}")
else:
print(f"β Failed to read: {file_path}")
except Exception as e:
print(f"β Error downloading {file_path}: {e}")
print(f"\nπ All files downloaded to: {os.path.abspath(local_output_dir)}")
else:
print("No common files generated")
print("=" * 60)
print(f"β
Execution completed successfully")
print(f"ποΈ Sandbox ID: {sbx.sandbox_id}")
print("π‘ Note: Sandbox will be automatically destroyed")
except Exception as e:
print(f"β Error executing Claude Code in sandbox: {str(e)}")
sys.exit(1)
finally:
# Cleanup sandbox
try:
if 'sbx' in locals():
sbx.kill()
print("π§Ή Sandbox cleaned up")
except Exception as cleanup_error:
print(f"β οΈ Cleanup warning: {cleanup_error}")
if __name__ == "__main__":
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/sandbox/e2b/e2b-launcher.py",
"license": "MIT License",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/settings/statusline/context-monitor.py | #!/usr/bin/env python3
"""
Claude Code Context Monitor
Real-time context usage monitoring with visual indicators and session analytics
"""
import json
import sys
import os
import re
import subprocess
def get_git_status():
"""Get git branch and change count for statusline."""
try:
# Check if inside a git repository
subprocess.check_output(
["git", "rev-parse", "--git-dir"], stderr=subprocess.DEVNULL
)
# Get current branch
branch = (
subprocess.check_output(
["git", "branch", "--show-current"], stderr=subprocess.DEVNULL
)
.decode()
.strip()
)
if not branch:
return ""
# Count changes
changes = (
subprocess.check_output(
["git", "status", "--porcelain"], stderr=subprocess.DEVNULL
)
.decode()
.splitlines()
)
change_count = len(changes)
# Color logic
if change_count > 0:
color = "\033[31m" # Red = dirty
suffix = f" ({change_count})"
else:
color = "\033[32m" # Green = clean
suffix = ""
return f" \033[90m|\033[0m {color}πΏ {branch}{suffix}\033[0m"
except Exception:
return ""
def parse_context_from_transcript(transcript_path):
"""Parse context usage from transcript file."""
if not transcript_path or not os.path.exists(transcript_path):
return None
try:
with open(transcript_path, "r", encoding="utf-8", errors="replace") as f:
lines = f.readlines()
# Check last 15 lines for context information
recent_lines = lines[-15:] if len(lines) > 15 else lines
for line in reversed(recent_lines):
try:
data = json.loads(line.strip())
# Method 1: Parse usage tokens from assistant messages
if data.get("type") == "assistant":
message = data.get("message", {})
usage = message.get("usage", {})
if usage:
input_tokens = usage.get("input_tokens", 0)
cache_read = usage.get("cache_read_input_tokens", 0)
cache_creation = usage.get("cache_creation_input_tokens", 0)
# Estimate context usage (assume 200k context for Claude Sonnet)
total_tokens = input_tokens + cache_read + cache_creation
if total_tokens > 0:
percent_used = min(100, (total_tokens / 200000) * 100)
return {
"percent": percent_used,
"tokens": total_tokens,
"method": "usage",
}
# Method 2: Parse system context warnings
elif data.get("type") == "system_message":
content = data.get("content", "")
# "Context left until auto-compact: X%"
match = re.search(
r"Context left until auto-compact: (\d+)%", content
)
if match:
percent_left = int(match.group(1))
return {
"percent": 100 - percent_left,
"warning": "auto-compact",
"method": "system",
}
# "Context low (X% remaining)"
match = re.search(r"Context low \((\d+)% remaining\)", content)
if match:
percent_left = int(match.group(1))
return {
"percent": 100 - percent_left,
"warning": "low",
"method": "system",
}
except (json.JSONDecodeError, KeyError, ValueError):
continue
return None
except (FileNotFoundError, PermissionError):
return None
def get_context_display(context_info):
"""Generate context display with visual indicators."""
if not context_info:
return "π΅ ???"
percent = context_info.get("percent", 0)
warning = context_info.get("warning")
# Color and icon based on usage level
if percent >= 95:
icon, color = "π¨", "\033[31;1m" # Blinking red
alert = "CRIT"
elif percent >= 90:
icon, color = "π΄", "\033[31m" # Red
alert = "HIGH"
elif percent >= 75:
icon, color = "π ", "\033[91m" # Light red
alert = ""
elif percent >= 50:
icon, color = "π‘", "\033[33m" # Yellow
alert = ""
else:
icon, color = "π’", "\033[32m" # Green
alert = ""
# Create progress bar
segments = 8
filled = int((percent / 100) * segments)
bar = "β" * filled + "β" * (segments - filled)
# Special warnings
if warning == "auto-compact":
alert = "AUTO-COMPACT!"
elif warning == "low":
alert = "LOW!"
reset = "\033[0m"
alert_str = f" {alert}" if alert else ""
return f"{icon}{color}{bar}{reset} {percent:.0f}%{alert_str}"
def get_directory_display(workspace_data):
"""Get directory display name."""
current_dir = workspace_data.get("current_dir", "")
project_dir = workspace_data.get("project_dir", "")
if current_dir and project_dir:
if current_dir.startswith(project_dir):
rel_path = current_dir[len(project_dir) :].lstrip("/")
return rel_path or os.path.basename(project_dir)
else:
return os.path.basename(current_dir)
elif project_dir:
return os.path.basename(project_dir)
elif current_dir:
return os.path.basename(current_dir)
else:
return "unknown"
def get_session_metrics(cost_data):
"""Get session metrics display."""
if not cost_data:
return ""
metrics = []
# Cost
cost_usd = cost_data.get("total_cost_usd", 0)
if cost_usd > 0:
if cost_usd >= 0.10:
cost_color = "\033[31m" # Red for expensive
elif cost_usd >= 0.05:
cost_color = "\033[33m" # Yellow for moderate
else:
cost_color = "\033[32m" # Green for cheap
cost_str = f"{cost_usd*100:.0f}Β’" if cost_usd < 0.01 else f"${cost_usd:.3f}"
metrics.append(f"{cost_color}π° {cost_str}\033[0m")
# Duration
duration_ms = cost_data.get("total_duration_ms", 0)
if duration_ms > 0:
minutes = duration_ms / 60000
if minutes >= 30:
duration_color = "\033[33m" # Yellow for long sessions
else:
duration_color = "\033[32m" # Green
if minutes < 1:
duration_str = f"{duration_ms//1000}s"
else:
duration_str = f"{minutes:.0f}m"
metrics.append(f"{duration_color}β± {duration_str}\033[0m")
# Lines changed
lines_added = cost_data.get("total_lines_added", 0)
lines_removed = cost_data.get("total_lines_removed", 0)
if lines_added > 0 or lines_removed > 0:
net_lines = lines_added - lines_removed
if net_lines > 0:
lines_color = "\033[32m" # Green for additions
elif net_lines < 0:
lines_color = "\033[31m" # Red for deletions
else:
lines_color = "\033[33m" # Yellow for neutral
sign = "+" if net_lines >= 0 else ""
metrics.append(f"{lines_color}π {sign}{net_lines}\033[0m")
return f" \033[90m|\033[0m {' '.join(metrics)}" if metrics else ""
def main():
try:
# Read JSON input from Claude Code
data = json.load(sys.stdin)
# Extract information
model_name = data.get("model", {}).get("display_name", "Claude")
workspace = data.get("workspace", {})
transcript_path = data.get("transcript_path", "")
cost_data = data.get("cost", {})
# Parse context usage
context_info = parse_context_from_transcript(transcript_path)
# Build status components
context_display = get_context_display(context_info)
directory = get_directory_display(workspace)
session_metrics = get_session_metrics(cost_data)
git_status = get_git_status()
# Model display with context-aware coloring
if context_info:
percent = context_info.get("percent", 0)
if percent >= 90:
model_color = "\033[31m" # Red
elif percent >= 75:
model_color = "\033[33m" # Yellow
else:
model_color = "\033[32m" # Green
model_display = f"{model_color}[{model_name}]\033[0m"
else:
model_display = f"\033[94m[{model_name}]\033[0m"
# Combine all components
status_line = (
f"{model_display} "
f"\033[93mπ {directory}\033[0m"
f"{git_status} "
f"π§ {context_display}"
f"{session_metrics}"
)
print(status_line)
except Exception as e:
# Fallback display on any error
print(
f"\033[94m[Claude]\033[0m \033[93mπ {os.path.basename(os.getcwd())}\033[0m π§ \033[31m[Error: {str(e)[:20]}]\033[0m"
)
if __name__ == "__main__":
main()
| {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/settings/statusline/context-monitor.py",
"license": "MIT License",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/daily_notes_connector.py | #!/usr/bin/env python3
"""
Daily Notes Connectivity Agent
Analyzes daily notes and creates meaningful connections between them and other vault content.
"""
import os
import re
import yaml
from datetime import datetime, timedelta
from pathlib import Path
from collections import defaultdict
import json
class DailyNotesConnector:
def __init__(self, vault_path):
self.vault_path = Path(vault_path)
self.connections_made = 0
self.notes_processed = 0
self.patterns = {
'project': r'(?:project|AI IDEAS|idea|experiment|build|develop)',
'meeting': r'(?:meeting|call|discussion|client|consultation)',
'technical': r'(?:MCP|LangChain|GraphRAG|AI|ML|model|agent|tool)',
'client': r'(?:client|consulting|business|CamRohn)',
'personal': r'(?:family|personal|reflection|stoic|goal)',
'research': r'(?:research|paper|study|article|documentation)',
'community': r'(?:Austin|LangChain|meetup|community|conference)'
}
self.connection_map = defaultdict(list)
def find_daily_notes(self):
"""Find all daily notes across the vault."""
daily_notes = []
# Search patterns for daily notes
patterns = [
self.vault_path / "Daily Notes" / "*.md",
self.vault_path / "REMOTE_VAULT01" / "Daily Notes" / "*.md",
self.vault_path / "Daily Email" / "*.md",
self.vault_path / "_PERSONAL_" / "JOURNAL" / "**" / "*.md"
]
for pattern in patterns:
for file_path in self.vault_path.glob(str(pattern).split(str(self.vault_path) + "/")[1]):
# Check if filename matches date pattern
if re.match(r'\d{4}-\d{2}-\d{2}', file_path.stem):
daily_notes.append(file_path)
return sorted(daily_notes)
def extract_frontmatter(self, file_path):
"""Extract frontmatter from a markdown file."""
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
if content.startswith('---'):
try:
end_index = content.index('---', 3)
frontmatter_text = content[3:end_index].strip()
return yaml.safe_load(frontmatter_text), content[end_index+3:]
except:
return {}, content
return {}, content
def update_frontmatter(self, file_path, frontmatter, body):
"""Update the frontmatter of a file."""
yaml_content = yaml.dump(frontmatter, default_flow_style=False, allow_unicode=True)
new_content = f"---\n{yaml_content}---\n{body}"
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
def analyze_content(self, content):
"""Analyze content to identify topics and themes."""
content_lower = content.lower()
topics = defaultdict(int)
for topic, pattern in self.patterns.items():
matches = re.findall(pattern, content_lower)
topics[topic] = len(matches)
# Extract specific mentions
mentions = {
'projects': re.findall(r'\[\[([^]]+)\]\]', content),
'headers': re.findall(r'^#+\s+(.+)$', content, re.MULTILINE),
'urls': re.findall(r'https?://[^\s\]]+', content),
'tags': re.findall(r'#(\w+)', content)
}
return topics, mentions
def find_related_content(self, topics, mentions, current_file):
"""Find related content based on topics and mentions."""
related = []
# Map topics to vault directories
topic_dirs = {
'project': ['AI IDEAS', 'AI Development'],
'meeting': ['CamRohn LLC/Client Work', 'Austin LangChain'],
'technical': ['AI Development', 'Model Context Protocol (MCP)'],
'client': ['CamRohn LLC', 'Second Opinion DDS'],
'research': ['AI Articles and Research', 'Clippings'],
'community': ['Austin LangChain', 'AI Conferences and Competitions']
}
# Find files based on dominant topics
for topic, count in sorted(topics.items(), key=lambda x: x[1], reverse=True):
if count > 0 and topic in topic_dirs:
for dir_name in topic_dirs[topic]:
dir_path = self.vault_path / dir_name
if dir_path.exists():
# Add MOC if exists
moc_path = dir_path / f"MOC - {dir_name.split('/')[-1]}.md"
if moc_path.exists():
related.append((moc_path, f"{topic} reference"))
# Add specific mentioned files
for mention in mentions['projects']:
if dir_name in mention:
file_path = self.vault_path / f"{mention}.md"
if file_path.exists() and file_path != current_file:
related.append((file_path, "direct mention"))
return related[:10] # Limit to top 10 connections
def find_temporal_connections(self, file_path, all_notes):
"""Find temporal connections (previous/next days, weekly summaries)."""
temporal = []
# Extract date from filename
date_match = re.match(r'(\d{4})-(\d{2})-(\d{2})', file_path.stem)
if not date_match:
return temporal
current_date = datetime(int(date_match.group(1)),
int(date_match.group(2)),
int(date_match.group(3)))
# Find previous and next days
for days_offset in [-1, 1]:
target_date = current_date + timedelta(days=days_offset)
target_str = target_date.strftime('%Y-%m-%d')
for note in all_notes:
if target_str in note.stem:
temporal.append((note, f"{'Previous' if days_offset < 0 else 'Next'} day"))
break
# Find weekly connections (same week)
week_start = current_date - timedelta(days=current_date.weekday())
week_end = week_start + timedelta(days=6)
for note in all_notes:
date_match = re.match(r'(\d{4})-(\d{2})-(\d{2})', note.stem)
if date_match:
note_date = datetime(int(date_match.group(1)),
int(date_match.group(2)),
int(date_match.group(3)))
if week_start <= note_date <= week_end and note != file_path:
temporal.append((note, "Same week"))
return temporal
def process_daily_note(self, file_path, all_notes):
"""Process a single daily note and add connections."""
print(f"Processing: {file_path.relative_to(self.vault_path)}")
frontmatter, body = self.extract_frontmatter(file_path)
topics, mentions = self.analyze_content(body)
# Find related content
content_related = self.find_related_content(topics, mentions, file_path)
temporal_related = self.find_temporal_connections(file_path, all_notes)
# Build related list
new_related = []
# Add temporal connections first
for related_file, relation_type in temporal_related:
if "Previous" in relation_type or "Next" in relation_type:
relative_path = related_file.relative_to(self.vault_path)
link = f"[[{relative_path.with_suffix('').as_posix()}]]"
if relation_type == "Previous day":
new_related.insert(0, f"{link} # {relation_type}")
else:
new_related.append(f"{link} # {relation_type}")
# Add content-based connections
for related_file, relation_type in content_related:
relative_path = related_file.relative_to(self.vault_path)
link = f"[[{relative_path.with_suffix('').as_posix()}]]"
comment = f" # {relation_type.title()}"
new_related.append(f"{link}{comment}")
# Update frontmatter if we found new connections
if new_related:
existing_related = frontmatter.get('related', [])
if isinstance(existing_related, list):
# Merge and deduplicate - convert lists to strings for deduplication
combined = existing_related + new_related
seen = set()
all_related = []
for item in combined:
if item not in seen:
seen.add(item)
all_related.append(item)
else:
all_related = new_related
frontmatter['related'] = all_related
self.update_frontmatter(file_path, frontmatter, body)
self.connections_made += len(new_related)
self.notes_processed += 1
# Track patterns for reporting
for topic, count in topics.items():
if count > 0:
self.connection_map[topic].append(file_path.stem)
def generate_report(self):
"""Generate a report of connections made."""
report = f"""# Daily Notes Connectivity Report
Generated: {datetime.now().strftime('%Y-%m-%d %H:%M')}
## Summary
- Total daily notes processed: {self.notes_processed}
- Total connections created: {self.connections_made}
- Average connections per note: {self.connections_made / max(self.notes_processed, 1):.1f}
## Connection Patterns Discovered
"""
for topic, dates in self.connection_map.items():
if dates:
report += f"### {topic.title()} Topics\n"
report += f"Found in {len(dates)} daily notes:\n"
# Show recent examples
for date in sorted(dates)[-5:]:
report += f"- [[{date}]]\n"
report += "\n"
report += """## Themes Across Time Periods
### Recent Trends (Last 30 days)
"""
# Analyze recent trends
recent_date = datetime.now() - timedelta(days=30)
recent_topics = defaultdict(int)
for topic, dates in self.connection_map.items():
for date_str in dates:
try:
date_match = re.match(r'(\d{4})-(\d{2})-(\d{2})', date_str)
if date_match:
note_date = datetime(int(date_match.group(1)),
int(date_match.group(2)),
int(date_match.group(3)))
if note_date >= recent_date:
recent_topics[topic] += 1
except:
pass
for topic, count in sorted(recent_topics.items(), key=lambda x: x[1], reverse=True):
report += f"- **{topic.title()}**: {count} occurrences\n"
report += "\n## Recommendations\n\n"
report += "1. Consider creating weekly/monthly summary notes to consolidate themes\n"
report += "2. Review orphaned daily notes that lack connections\n"
report += "3. Add more content to empty daily notes for better connectivity\n"
return report
def run(self):
"""Main execution method."""
print("Daily Notes Connectivity Agent Starting...")
print(f"Vault path: {self.vault_path}")
# Find all daily notes
daily_notes = self.find_daily_notes()
print(f"Found {len(daily_notes)} daily notes")
# Process each note
for note in daily_notes:
try:
self.process_daily_note(note, daily_notes)
except Exception as e:
print(f"Error processing {note}: {e}")
# Generate and save report
report = self.generate_report()
report_path = self.vault_path / "System_Files" / "Daily_Notes_Connectivity_Report.md"
with open(report_path, 'w', encoding='utf-8') as f:
f.write(report)
print(f"\nComplete! Report saved to: {report_path}")
print(f"Processed {self.notes_processed} notes, created {self.connections_made} connections")
if __name__ == "__main__":
vault_path = "/Users/cam/VAULT01"
connector = DailyNotesConnector(vault_path)
connector.run() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/daily_notes_connector.py",
"license": "MIT License",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/enhance_tag_standardizer.py | #!/usr/bin/env python3
"""
Enhanced Tag Standardizer for hierarchical tag structure.
Consolidates similar tags and applies hierarchical organization.
"""
import os
import re
import yaml
from pathlib import Path
from collections import defaultdict, Counter
class EnhancedTagStandardizer:
def __init__(self, vault_path):
self.vault_path = Path(vault_path)
self.files_updated = 0
# Enhanced mappings for better consolidation
self.enhanced_mappings = {
# AI hierarchy consolidation
'ai-development': 'ai/development',
'ai-ideas': 'idea', # Ideas are tracked with 'idea' tag
'ai-tools': 'ai/tools',
'ai-consulting': 'consulting',
'ai-courses': 'learning/course',
'ai-conferences-and-competitions': 'learning/conference',
'ai-articles-and-research': 'ai/research',
'ai-agent': 'ai/agents',
'ai-community': 'community',
'ai-integration': 'ai/development',
'ai-services': 'ai/tools',
# Business hierarchy
'business-strategy': 'business/strategy',
'business-development': 'business/development',
'business-intelligence': 'business/analytics',
'business-model': 'business/strategy',
'business-automation': 'automation',
'business-systems': 'business/systems',
'business-case': 'business/strategy',
'business-context': 'business',
'business-research': 'business/research',
'business-mapping': 'business/strategy',
'business-report': 'business/analytics',
'business-operations': 'business/operations',
'business-plan': 'business/strategy',
'business-transformation': 'business/strategy',
'business-solutions': 'business/solutions',
'business-assets': 'business/assets',
# Client work standardization
'client-work': 'client',
'client-materials': 'client',
'client-analytics': 'client',
'client-communication': 'client',
# Learning hierarchy
'tutorial/course': 'learning/course',
'learning-paths': 'learning',
'courses': 'learning/course',
'tutorials': 'tutorial',
'guides': 'tutorial',
'training': 'learning',
'certifications': 'learning/certification',
# Technical tags
'development-tools': 'ai/tools',
'tools': 'ai/tools',
'apis': 'api',
'api-integration': 'api',
'api-testing': 'testing',
# Content organization
'daily-notes': 'daily',
'daily-email': 'email',
'email-summary': 'email',
'email-processing': 'email',
'email-marketing': 'marketing',
# Web development
'web-development': 'development',
'web-presence': 'business/web-presence',
'_web-presence-development': 'business/web-presence',
# Personal tags
'_personal_': 'personal',
'personal-development': 'personal/development',
# Project management
'project-management': 'project',
'project-timeline': 'project',
# Marketing & content
'content-strategy': 'marketing/content',
'content-marketing': 'marketing/content',
'content-calendar': 'marketing/calendar',
'content-distribution': 'marketing/distribution',
'marketing-strategy': 'marketing/strategy',
# Technical infrastructure
'it-infrastructure': 'infrastructure',
'server-management': 'infrastructure',
'server-setup': 'infrastructure',
# Data and analytics
'data-processing': 'data',
'data-sources': 'data',
'data-security': 'security',
# Remove underscores from tags
'_tutorials_': 'tutorial',
'_business-formation_': 'business/formation',
# Standardize compound words
'meeting-notes': 'meeting',
'meetings': 'meeting',
# Standardize plural forms
'agents': 'ai/agents',
'templates': 'template',
'projects': 'project',
'tasks': 'action/todo',
'sources': 'source',
'systems': 'system',
'solutions': 'solution',
'recommendations': 'recommendation',
'transcripts': 'transcript',
'discussions': 'discussion',
'platforms': 'platform',
'frameworks': 'framework',
'pipelines': 'pipeline',
'servers': 'server',
'summaries': 'summary',
'conferences': 'conference',
'opportunities': 'opportunity',
'datasets': 'dataset',
# Consolidate similar concepts
'thought-leadership': 'authority-building',
'technical-authority': 'authority-building',
'brainstorming': 'idea',
'strategic-planning': 'strategy',
'strategic-decision-making': 'strategy',
'strategic-overview': 'strategy',
'strategic-connections': 'strategy',
# Standardize vendor/tool names
'anthropic_blog': 'anthropic',
'anthropic_github': 'anthropic',
'github_topic_-_mcp': 'mcp',
'github_topic_-_mcp_server': 'mcp',
'mcp_reddit': 'mcp',
'mcp_documentation': 'mcp',
'mcp_github_discussions': 'mcp',
'mcp-server': 'mcp',
'npm_-_mcp_packages': 'mcp',
'dev.to_mcp_tag': 'mcp',
'medium_-_mcp_topics': 'mcp',
'pulsemcp_blog': 'pulsemcp',
# Clean up system tags
'system_files': 'system',
'remote_vault01': 'remote-sync',
# Consolidate database tags
'graph-db': 'database',
'graph-databases': 'database',
'vector-databases': 'database',
'database-queries': 'database',
'database-updates': 'database',
# Consolidate AI concepts
'ai/rag': 'ai/embeddings',
'agentic-rag': 'ai/embeddings',
'knowledge-graph': 'graphrag',
'knowledge-network': 'graphrag',
'knowledge-management': 'knowledge-base',
# Family tags
'family-projects': 'family/projects',
'family/index': 'family',
# Visual content
'visual-assets': 'visual-assets',
'visual-organization': 'visual-assets',
'visual-learning': 'visual-assets',
'visual-search': 'visual-assets',
'image-gallery': 'gallery',
'image-generation': 'ai/tools',
'screenshots': 'visual-assets',
'screenshots-and-references': 'visual-assets',
'infographics': 'visual-assets',
'charts': 'visual-assets',
'images': 'visual-assets',
'snagit-captures': 'visual-assets',
# Analytics and metrics
'analytics': 'analytics',
'client-analytics': 'analytics',
'revenue-analytics': 'analytics',
'performance-metrics': 'analytics',
'financial-analysis': 'finance',
'roi-analysis': 'roi',
'roi-calculator': 'roi',
# Simplify compound concepts
'complexity-analysis': 'analysis',
'connection-analysis': 'analysis',
'network-analysis': 'analysis',
'schema-analysis': 'analysis',
'competitive-intelligence': 'analysis',
'competitor-tracking': 'analysis',
# Standardize workflow tags
'workflow': 'workflows',
# Clean up misc tags
'--ollama-deep-research': 'ollama',
'second-opinion-dds': 'dental',
'austin-langchain': 'community',
'the-build': 'build',
'mcpcentral.io': 'mcpcentral',
}
def extract_frontmatter(self, content):
"""Extract YAML frontmatter from content."""
if not content.strip().startswith('---'):
return None, content
lines = content.split('\n')
if len(lines) < 3:
return None, content
end_idx = None
for i in range(1, len(lines)):
if lines[i].strip() == '---':
end_idx = i
break
if end_idx is None:
return None, content
try:
frontmatter_text = '\n'.join(lines[1:end_idx])
frontmatter = yaml.safe_load(frontmatter_text)
remaining_content = '\n'.join(lines[end_idx + 1:])
return frontmatter, remaining_content
except yaml.YAMLError:
return None, content
def enhance_tag(self, tag):
"""Apply enhanced tag standardization."""
# Remove leading/trailing whitespace
tag = tag.strip()
# Remove hash if present
if tag.startswith('#'):
tag = tag[1:]
# Apply enhanced mappings
if tag in self.enhanced_mappings:
return self.enhanced_mappings[tag]
# Default: lowercase and replace spaces with hyphens
return tag.lower().replace(' ', '-')
def process_file(self, file_path):
"""Process a single file with enhanced tag standardization."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
frontmatter, remaining_content = self.extract_frontmatter(content)
if frontmatter is None or 'tags' not in frontmatter:
return False
# Get current tags
current_tags = frontmatter.get('tags', [])
if not current_tags:
return False
# Standardize tags
if isinstance(current_tags, list):
new_tags = []
changed = False
for tag in current_tags:
if isinstance(tag, str):
enhanced = self.enhance_tag(tag)
if enhanced != tag:
changed = True
if enhanced and enhanced not in new_tags:
new_tags.append(enhanced)
if changed:
# Update frontmatter
frontmatter['tags'] = new_tags
# Reconstruct content
new_frontmatter = yaml.dump(frontmatter, default_flow_style=False, sort_keys=False)
new_content = f"---\n{new_frontmatter}---\n{remaining_content}"
# Write back to file
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"Enhanced: {file_path.relative_to(self.vault_path)}")
return True
return False
except Exception as e:
print(f"Error processing {file_path}: {e}")
return False
def process_vault(self):
"""Process all files in the vault."""
skip_dirs = {'.obsidian', '.trash', '.git'}
for file_path in self.vault_path.rglob('*.md'):
if any(skip_dir in file_path.parts for skip_dir in skip_dirs):
continue
if self.process_file(file_path):
self.files_updated += 1
return self.files_updated
def main():
vault_path = '/Users/cam/VAULT01'
print(f"Enhanced tag standardization for: {vault_path}")
print("This will consolidate similar tags and apply hierarchical organization")
print("-" * 60)
standardizer = EnhancedTagStandardizer(vault_path)
updated = standardizer.process_vault()
print("-" * 60)
print(f"Total files updated: {updated}")
if __name__ == '__main__':
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/enhance_tag_standardizer.py",
"license": "MIT License",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/find_keyword_connections.py | #!/usr/bin/env python3
"""Find and implement keyword-based connections between files."""
import os
import re
from collections import Counter, defaultdict
from pathlib import Path
# Priority keywords to focus on
PRIORITY_KEYWORDS = {
'llm', 'langchain', 'langgraph', 'rag', 'embedding', 'vector',
'agent', 'automation', 'workflow', 'pipeline', 'mcp', 'api',
'anthropic', 'openai', 'google', 'claude', 'gpt', 'model',
'context', 'protocol', 'fastapi', 'docker', 'cloudflare',
'supabase', 'integration', 'framework', 'retrieval', 'augmented',
'generation', 'prompt', 'engineering', 'multimodal', 'function',
'calling', 'tool', 'use', 'chain', 'thought', 'reasoning'
}
def extract_keywords_from_file(file_path):
"""Extract meaningful keywords from a file."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read().lower()
# Extract words (alphanumeric plus hyphens)
words = re.findall(r'\b[a-z0-9-]+\b', content)
# Filter for priority keywords and count occurrences
keyword_counts = Counter()
for word in words:
if word in PRIORITY_KEYWORDS:
keyword_counts[word] += 1
return keyword_counts
except:
return Counter()
def find_keyword_connections(vault_root):
"""Find files that share multiple priority keywords."""
search_dirs = [
vault_root / "AI Development",
vault_root / "AI Articles and Research",
vault_root / "AI IDEAS",
vault_root / "AI Courses",
vault_root / "CamRohn LLC",
vault_root / "Clippings"
]
# Collect keyword data for all files
file_keywords = {}
print("Analyzing files for keywords...")
for search_dir in search_dirs:
if not search_dir.exists():
continue
for root, dirs, files in os.walk(search_dir):
for file in files:
if file.endswith('.md'):
file_path = Path(root) / file
keywords = extract_keywords_from_file(file_path)
if keywords:
file_keywords[file_path] = keywords
print(f"Analyzed {len(file_keywords)} files")
# Find connections between files
connections = []
processed_pairs = set()
files = list(file_keywords.keys())
for i, file1 in enumerate(files):
if i % 100 == 0:
print(f"Processing file {i+1}/{len(files)}...")
for j, file2 in enumerate(files[i+1:], start=i+1):
# Skip if already processed
pair = tuple(sorted([str(file1), str(file2)]))
if pair in processed_pairs:
continue
processed_pairs.add(pair)
# Find common keywords
common_keywords = set(file_keywords[file1].keys()) & set(file_keywords[file2].keys())
if len(common_keywords) >= 5: # At least 5 common keywords
# Calculate relevance score
score = sum(
min(file_keywords[file1][kw], file_keywords[file2][kw])
for kw in common_keywords
)
connections.append({
'file1': file1,
'file2': file2,
'keywords': list(common_keywords),
'score': score
})
# Sort by score
connections.sort(key=lambda x: x['score'], reverse=True)
return connections
def add_link_to_file(file_path, link_to_add, link_text=None):
"""Add a link to a file if it doesn't already exist."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Extract just the filename for the link
link_filename = Path(link_to_add).stem
# Check if link already exists
if f'[[{link_filename}]]' in content:
return False
# Find or create Related section
related_match = re.search(r'^## Related\s*$', content, re.MULTILINE)
if related_match:
# Insert after the Related header
insert_pos = related_match.end()
# Skip to next line
next_line = content.find('\n', insert_pos)
if next_line != -1:
insert_pos = next_line + 1
else:
# Add Related section at end
if not content.endswith('\n'):
content += '\n'
content += '\n## Related\n'
insert_pos = len(content)
# Create the link line
if link_text:
link_line = f"- [[{link_filename}]] - {link_text}\n"
else:
link_line = f"- [[{link_filename}]]\n"
# Insert the link
new_content = content[:insert_pos] + link_line + content[insert_pos:]
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
return True
except Exception as e:
print(f"Error updating {file_path}: {e}")
return False
def implement_keyword_connections(connections, limit=50):
"""Implement the top keyword-based connections."""
connections_made = 0
keyword_clusters = defaultdict(list)
for conn in connections[:limit]:
file1 = conn['file1']
file2 = conn['file2']
keywords = conn['keywords']
# Create descriptive text
keyword_text = f"Shares keywords: {', '.join(sorted(keywords)[:5])}"
# Add bidirectional links
if add_link_to_file(file1, file2, keyword_text):
connections_made += 1
print(f"Connected {file1.name} β {file2.name}")
# Track keyword clusters
for kw in keywords:
keyword_clusters[kw].append((file1.name, file2.name))
if add_link_to_file(file2, file1, keyword_text):
connections_made += 1
return connections_made, keyword_clusters
def main():
vault_root = Path("/Users/cam/VAULT01")
print("Finding keyword-based connections...")
connections = find_keyword_connections(vault_root)
print(f"\nFound {len(connections)} potential keyword connections")
# Show top connections
print("\nTop 10 keyword connections by relevance:")
for i, conn in enumerate(connections[:10]):
print(f"{i+1}. {conn['file1'].name} β {conn['file2'].name}")
print(f" Keywords ({len(conn['keywords'])}): {', '.join(sorted(conn['keywords'])[:8])}")
print(f" Score: {conn['score']}")
# Implement connections
print("\nImplementing connections...")
connections_made, keyword_clusters = implement_keyword_connections(connections)
print(f"\n=== Keyword Connection Report ===")
print(f"Total connections made: {connections_made}")
print("\nMost connected keywords:")
sorted_keywords = sorted(keyword_clusters.items(), key=lambda x: len(x[1]), reverse=True)[:10]
for keyword, connections in sorted_keywords:
print(f" {keyword}: {len(connections)} connections")
# Find cross-domain connections
print("\nCross-domain connections:")
cross_domain = 0
for i, (keyword, conn_list) in enumerate(sorted_keywords[:5]):
print(f"\n{keyword}-related cross-domain connections:")
for file1_name, file2_name in conn_list[:3]: # Show first 3 for each keyword
print(f" - {file1_name} β {file2_name}")
if __name__ == "__main__":
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/find_keyword_connections.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/fix_quoted_tags.py | #!/usr/bin/env python3
"""
Fix quoted tags in frontmatter by converting single-quoted tags to unquoted format.
"""
import os
import re
import yaml
from pathlib import Path
def fix_quoted_tags(vault_path):
"""Fix single-quoted tags in all markdown files."""
vault_path = Path(vault_path)
files_updated = 0
for file_path in vault_path.rglob('*.md'):
if any(skip in file_path.parts for skip in ['.obsidian', '.trash', '.git']):
continue
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Check if file has frontmatter
if not content.strip().startswith('---'):
continue
# Find frontmatter boundaries
lines = content.split('\n')
if len(lines) < 3:
continue
end_idx = None
for i in range(1, len(lines)):
if lines[i].strip() == '---':
end_idx = i
break
if end_idx is None:
continue
# Extract frontmatter
frontmatter_text = '\n'.join(lines[1:end_idx])
# Check if tags line has quoted values
if re.search(r"tags:\s*\[['\"]", frontmatter_text):
# Parse frontmatter
try:
frontmatter = yaml.safe_load(frontmatter_text)
if frontmatter and 'tags' in frontmatter:
# Update tags (remove quotes)
tags = frontmatter['tags']
if isinstance(tags, list):
# Tags are already in list format, just ensure they're not quoted
frontmatter['tags'] = [str(tag) for tag in tags]
# Reconstruct content
new_frontmatter = yaml.dump(frontmatter, default_flow_style=False, sort_keys=False)
remaining_content = '\n'.join(lines[end_idx + 1:])
new_content = f"---\n{new_frontmatter}---\n{remaining_content}"
# Write back
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
files_updated += 1
print(f"Fixed: {file_path.relative_to(vault_path)}")
except yaml.YAMLError as e:
print(f"YAML error in {file_path}: {e}")
except Exception as e:
print(f"Error processing {file_path}: {e}")
return files_updated
if __name__ == '__main__':
vault_path = '/Users/cam/VAULT01'
print(f"Fixing quoted tags in: {vault_path}")
updated = fix_quoted_tags(vault_path)
print(f"\nTotal files updated: {updated}") | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/fix_quoted_tags.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/implement_entity_connections.py | #!/usr/bin/env python3
"""Implement entity-based connections from Link Suggestions Report."""
import re
import os
from pathlib import Path
# Priority entities to focus on
PRIORITY_ENTITIES = {
'langchain', 'langgraph', 'llm', 'rag', 'embedding', 'vector',
'mcp', 'model context protocol', 'api integration', 'function calling',
'anthropic', 'openai', 'google', 'claude', 'gpt',
'autonomous agent', 'ai agent', 'chain of thought', 'prompt engineering',
'retrieval augmented', 'graphrag', 'multimodal', 'tool use'
}
def read_file(file_path):
"""Read file content."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
except:
return None
def write_file(file_path, content):
"""Write content to file."""
try:
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
return True
except:
return False
def find_file(filename, search_dirs):
"""Find a file in the vault."""
for dir_path in search_dirs:
for root, dirs, files in os.walk(dir_path):
for file in files:
if file == filename or file == filename + '.md':
return os.path.join(root, file)
return None
def add_link_to_file(file_path, link_to_add, link_text=None):
"""Add a link to a file if it doesn't already exist."""
content = read_file(file_path)
if not content:
return False
# Check if link already exists
if f'[[{link_to_add}]]' in content:
return False
# Find a good place to add the link
# Look for existing "Related:" or "See also:" sections
related_patterns = [
r'^#+\s*Related.*?$',
r'^#+\s*See also.*?$',
r'^#+\s*Links.*?$',
r'^#+\s*References.*?$'
]
insert_pos = None
for pattern in related_patterns:
match = re.search(pattern, content, re.MULTILINE | re.IGNORECASE)
if match:
# Find the end of this section
insert_pos = match.end()
# Skip to next line
next_line = content.find('\n', insert_pos)
if next_line != -1:
insert_pos = next_line + 1
break
# If no related section found, add at the end of frontmatter
if insert_pos is None:
# Find end of frontmatter
frontmatter_end = content.find('---', 3)
if frontmatter_end != -1:
insert_pos = content.find('\n', frontmatter_end + 3) + 1
else:
# No frontmatter, add at beginning
insert_pos = 0
# Create the link line
if link_text:
link_line = f"- [[{link_to_add}]] - {link_text}\n"
else:
link_line = f"- [[{link_to_add}]]\n"
# If we're not in a list section, create one
if insert_pos is None or (insert_pos > 0 and content[insert_pos-2:insert_pos] != '\n\n'):
# Check if we need to add a Related section
needs_section = True
for pattern in related_patterns:
if re.search(pattern, content, re.MULTILINE | re.IGNORECASE):
needs_section = False
break
if needs_section:
# Add at end of file with new Related section
if not content.endswith('\n'):
content += '\n'
content += '\n## Related\n'
content += link_line
else:
# Insert in existing section
new_content = content[:insert_pos] + link_line + content[insert_pos:]
content = new_content
else:
# Insert at found position
new_content = content[:insert_pos] + link_line + content[insert_pos:]
content = new_content
return write_file(file_path, content)
def implement_connections():
"""Implement priority entity connections."""
vault_root = Path("/Users/cam/VAULT01")
search_dirs = [
vault_root / "AI Development",
vault_root / "AI Articles and Research",
vault_root / "AI IDEAS",
vault_root / "AI Courses",
vault_root / "CamRohn LLC",
vault_root / "Clippings"
]
# Read the report
report_path = vault_root / "System_Files" / "Link_Suggestions_Report.md"
report_content = read_file(report_path)
if not report_content:
print("Could not read report")
return
# Parse entity connections
connections_made = 0
connections_by_entity = {}
# Find entity sections
entity_pattern = r'^### (.+)$'
connection_pattern = r'- \[\[([^\]]+)\]\] β \[\[([^\]]+)\]\]'
current_entity = None
for line in report_content.split('\n'):
# Check for entity header
entity_match = re.match(entity_pattern, line)
if entity_match:
current_entity = entity_match.group(1).strip().lower()
continue
# Check for connection
if current_entity and current_entity in [e.lower() for e in PRIORITY_ENTITIES]:
conn_match = re.match(connection_pattern, line)
if conn_match:
file1 = conn_match.group(1).strip()
file2 = conn_match.group(2).strip()
# Skip self-connections
if file1 == file2:
continue
# Find actual file paths
file1_path = find_file(file1 + '.md', search_dirs)
file2_path = find_file(file2 + '.md', search_dirs)
if file1_path and file2_path:
# Add bidirectional links
if add_link_to_file(file1_path, file2, f"Related to {current_entity}"):
connections_made += 1
if current_entity not in connections_by_entity:
connections_by_entity[current_entity] = 0
connections_by_entity[current_entity] += 1
print(f"Added link from {file1} to {file2}")
if add_link_to_file(file2_path, file1, f"Related to {current_entity}"):
connections_made += 1
if current_entity not in connections_by_entity:
connections_by_entity[current_entity] = 0
connections_by_entity[current_entity] += 1
print(f"Added link from {file2} to {file1}")
# Generate report
print("\n=== Connection Implementation Report ===")
print(f"Total connections made: {connections_made}")
print("\nConnections by entity:")
for entity, count in sorted(connections_by_entity.items(), key=lambda x: x[1], reverse=True):
print(f" {entity}: {count} connections")
# Find cross-domain connections
print("\nCross-domain connections created:")
# This would require more complex analysis of file paths
if __name__ == "__main__":
implement_connections() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/implement_entity_connections.py",
"license": "MIT License",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/link_suggester.py | #!/usr/bin/env python3
"""
Link Suggester for Obsidian Vault
Identifies potential connections between notes based on content analysis.
"""
import os
import re
from pathlib import Path
from collections import defaultdict, Counter
import argparse
import json
class LinkSuggester:
def __init__(self, vault_path):
self.vault_path = Path(vault_path)
self.notes = {}
self.entity_mentions = defaultdict(set)
self.potential_links = []
# Common entities to look for
self.entities = {
'technologies': [
'langchain', 'langgraph', 'mcp', 'model context protocol',
'graphrag', 'openai', 'anthropic', 'claude', 'gpt', 'llm',
'ollama', 'huggingface', 'github', 'python', 'javascript',
'cloudflare', 'supabase', 'vector database', 'embedding',
'ai agent', 'autonomous agent', 'rag', 'retrieval augmented'
],
'concepts': [
'machine learning', 'deep learning', 'neural network',
'transformer', 'attention mechanism', 'fine-tuning',
'prompt engineering', 'chain of thought', 'reasoning',
'multimodal', 'text generation', 'code generation',
'tool use', 'function calling', 'api integration'
],
'companies': [
'google', 'microsoft', 'amazon', 'meta', 'apple',
'nvidia', 'intel', 'amd', 'tesla', 'stripe',
'y combinator', 'techstars', 'propel', 'dental'
],
'people': [
'andrew ng', 'geoffrey hinton', 'yann lecun', 'ilya sutskever',
'sam altman', 'dario amodei', 'demis hassabis', 'jensen huang'
]
}
# Flatten entities for easier searching
self.all_entities = []
for category, entities in self.entities.items():
self.all_entities.extend(entities)
def load_notes(self):
"""Load all markdown files and their content."""
skip_dirs = {'.obsidian', '.trash', 'System_Files', '.git'}
for file_path in self.vault_path.rglob('*.md'):
if any(skip_dir in file_path.parts for skip_dir in skip_dirs):
continue
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Extract title
title_match = re.search(r'^#\s+(.+)$', content, re.MULTILINE)
title = title_match.group(1) if title_match else file_path.stem
# Extract existing links
existing_links = set(re.findall(r'\[\[([^\]]+)\]\]', content))
self.notes[file_path] = {
'title': title,
'content': content.lower(),
'existing_links': existing_links,
'word_count': len(content.split())
}
except Exception as e:
print(f"Error reading {file_path}: {e}")
def find_entity_mentions(self):
"""Find mentions of entities across all notes."""
for file_path, note_data in self.notes.items():
content = note_data['content']
for entity in self.all_entities:
if entity in content:
self.entity_mentions[entity].add(file_path)
def suggest_links_by_entities(self):
"""Suggest links based on common entity mentions."""
suggestions = []
for entity, files in self.entity_mentions.items():
if len(files) >= 2: # Entity mentioned in at least 2 files
file_list = list(files)
for i, file1 in enumerate(file_list):
for file2 in file_list[i+1:]:
# Check if files don't already link to each other
note1 = self.notes[file1]
note2 = self.notes[file2]
if (note2['title'] not in note1['existing_links'] and
note1['title'] not in note2['existing_links']):
suggestions.append({
'file1': file1,
'file2': file2,
'title1': note1['title'],
'title2': note2['title'],
'common_entity': entity,
'type': 'entity_mention',
'confidence': len(files) / 10 # Simple confidence score
})
return suggestions
def suggest_links_by_keywords(self):
"""Suggest links based on keyword overlap."""
suggestions = []
# Extract keywords from titles and content
for file_path, note_data in self.notes.items():
if note_data['word_count'] < 100: # Skip very short notes
continue
# Get keywords from title
title_words = set(re.findall(r'\b\w{4,}\b', note_data['title'].lower()))
# Find other notes with similar keywords
for other_path, other_data in self.notes.items():
if file_path == other_path:
continue
other_title_words = set(re.findall(r'\b\w{4,}\b', other_data['title'].lower()))
# Check for keyword overlap
common_words = title_words.intersection(other_title_words)
if len(common_words) >= 2: # At least 2 common significant words
# Check if files don't already link to each other
if (other_data['title'] not in note_data['existing_links'] and
note_data['title'] not in other_data['existing_links']):
suggestions.append({
'file1': file_path,
'file2': other_path,
'title1': note_data['title'],
'title2': other_data['title'],
'common_words': list(common_words),
'type': 'keyword_overlap',
'confidence': len(common_words) / 5
})
return suggestions
def find_orphaned_notes(self):
"""Find notes with no incoming or outgoing links."""
orphaned = []
for file_path, note_data in self.notes.items():
if len(note_data['existing_links']) == 0:
# Check if any other notes link to this one
mentioned_in = []
for other_path, other_data in self.notes.items():
if note_data['title'] in other_data['existing_links']:
mentioned_in.append(other_path)
if not mentioned_in:
orphaned.append({
'file': file_path,
'title': note_data['title'],
'word_count': note_data['word_count']
})
return orphaned
def analyze_vault(self):
"""Perform complete analysis of the vault."""
print("Loading notes...")
self.load_notes()
print(f"Loaded {len(self.notes)} notes")
print("Finding entity mentions...")
self.find_entity_mentions()
print("Generating link suggestions...")
entity_suggestions = self.suggest_links_by_entities()
keyword_suggestions = self.suggest_links_by_keywords()
orphaned_notes = self.find_orphaned_notes()
return {
'entity_suggestions': entity_suggestions,
'keyword_suggestions': keyword_suggestions,
'orphaned_notes': orphaned_notes,
'stats': {
'total_notes': len(self.notes),
'entity_suggestions': len(entity_suggestions),
'keyword_suggestions': len(keyword_suggestions),
'orphaned_notes': len(orphaned_notes)
}
}
def generate_report(self, results, output_file=None):
"""Generate a human-readable report."""
report = []
report.append("# Link Suggestions Report")
report.append(f"Generated for vault: {self.vault_path}")
report.append(f"Total notes analyzed: {results['stats']['total_notes']}")
report.append("")
# Entity-based suggestions
report.append("## Entity-Based Link Suggestions")
report.append(f"Found {len(results['entity_suggestions'])} potential connections")
report.append("")
# Group by entity
entity_groups = defaultdict(list)
for suggestion in results['entity_suggestions']:
entity_groups[suggestion['common_entity']].append(suggestion)
for entity, suggestions in sorted(entity_groups.items()):
report.append(f"### {entity.title()}")
for suggestion in suggestions[:5]: # Top 5 per entity
report.append(f"- [[{suggestion['title1']}]] β [[{suggestion['title2']}]]")
report.append("")
# Keyword-based suggestions
report.append("## Keyword-Based Link Suggestions")
report.append(f"Found {len(results['keyword_suggestions'])} potential connections")
report.append("")
# Sort by confidence
sorted_keywords = sorted(results['keyword_suggestions'],
key=lambda x: x['confidence'], reverse=True)
for suggestion in sorted_keywords[:20]: # Top 20
common_words = ', '.join(suggestion['common_words'])
report.append(f"- [[{suggestion['title1']}]] β [[{suggestion['title2']}]]")
report.append(f" Common words: {common_words}")
report.append("")
# Orphaned notes
report.append("## Orphaned Notes (No Links)")
report.append(f"Found {len(results['orphaned_notes'])} notes with no connections")
report.append("")
# Sort by word count (longer notes first)
sorted_orphaned = sorted(results['orphaned_notes'],
key=lambda x: x['word_count'], reverse=True)
for note in sorted_orphaned[:30]: # Top 30
report.append(f"- [[{note['title']}]] ({note['word_count']} words)")
report_text = '\n'.join(report)
if output_file:
with open(output_file, 'w', encoding='utf-8') as f:
f.write(report_text)
print(f"Report saved to: {output_file}")
return report_text
def main():
parser = argparse.ArgumentParser(description='Suggest links for Obsidian vault')
parser.add_argument('--vault', default='/Users/cam/VAULT01',
help='Path to Obsidian vault')
parser.add_argument('--output',
default='/Users/cam/VAULT01/System_Files/Link_Suggestions_Report.md',
help='Output file for report')
parser.add_argument('--json',
help='Output JSON file for programmatic use')
args = parser.parse_args()
suggester = LinkSuggester(args.vault)
results = suggester.analyze_vault()
# Generate report
report = suggester.generate_report(results, args.output)
# Save JSON if requested
if args.json:
with open(args.json, 'w') as f:
json.dump(results, f, indent=2, default=str)
print(f"JSON data saved to: {args.json}")
# Print summary
print("\n" + "="*50)
print("LINK SUGGESTIONS SUMMARY")
print("="*50)
print(f"Total notes: {results['stats']['total_notes']}")
print(f"Entity-based suggestions: {results['stats']['entity_suggestions']}")
print(f"Keyword-based suggestions: {results['stats']['keyword_suggestions']}")
print(f"Orphaned notes: {results['stats']['orphaned_notes']}")
if __name__ == '__main__':
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/link_suggester.py",
"license": "MIT License",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/metadata_adder.py | #!/usr/bin/env python3
"""
Metadata Adder for Obsidian Vault
Adds standardized frontmatter to markdown files that lack it.
"""
import os
import re
from datetime import datetime
from pathlib import Path
import argparse
class MetadataAdder:
def __init__(self, vault_path):
self.vault_path = Path(vault_path)
self.stats = {
'processed': 0,
'updated': 0,
'skipped': 0,
'errors': 0
}
def get_file_creation_date(self, file_path):
"""Get file creation date from filesystem."""
try:
stat = os.stat(file_path)
# Use birthtime on macOS, ctime on others
timestamp = stat.st_birthtime if hasattr(stat, 'st_birthtime') else stat.st_ctime
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')
except:
return datetime.now().strftime('%Y-%m-%d')
def determine_file_type(self, file_path):
"""Determine the type of note based on path and content."""
path_str = str(file_path).lower()
if 'moc' in path_str or 'map of content' in path_str:
return 'map-of-content'
elif 'daily notes' in path_str or 'daily note' in path_str:
return 'daily'
elif 'research' in path_str or 'articles' in path_str:
return 'research'
elif 'client' in path_str or 'camrohn llc' in path_str:
return 'client-work'
elif 'tutorial' in path_str or 'course' in path_str:
return 'tutorial'
elif 'idea' in path_str:
return 'idea'
elif 'meeting' in path_str:
return 'meeting'
elif 'email' in path_str:
return 'email'
else:
return 'note'
def generate_tags_from_path(self, file_path):
"""Generate tags based on file path."""
tags = []
path_parts = file_path.relative_to(self.vault_path).parts[:-1] # Exclude filename
# Map directory names to tags
tag_mapping = {
'ai development': 'ai/development',
'ai articles': 'ai/research',
'ai courses': 'tutorial/course',
'ai ideas': 'idea',
'camrohn llc': 'client',
'daily notes': 'daily',
'clippings': 'clippings',
'mcp': 'mcp',
'langchain': 'langchain',
'graphrag': 'graphrag'
}
for part in path_parts:
part_lower = part.lower()
for key, tag in tag_mapping.items():
if key in part_lower:
tags.append(tag)
break
# Add date tags for daily notes
if 'daily' in tags:
created_date = self.get_file_creation_date(file_path)
year_month = datetime.strptime(created_date, '%Y-%m-%d').strftime('%Y/%m')
tags.append(f'daily/{year_month}')
return list(set(tags)) # Remove duplicates
def has_frontmatter(self, content):
"""Check if content already has frontmatter."""
return content.strip().startswith('---')
def create_frontmatter(self, file_path, existing_content):
"""Create appropriate frontmatter for the file."""
created_date = self.get_file_creation_date(file_path)
file_type = self.determine_file_type(file_path)
tags = self.generate_tags_from_path(file_path)
# Extract title from first heading or filename
title_match = re.search(r'^#\s+(.+)$', existing_content, re.MULTILINE)
if title_match:
title = title_match.group(1)
else:
title = file_path.stem.replace('_', ' ').replace('-', ' ')
frontmatter = f"""---
tags: {tags}
type: {file_type}
created: {created_date}
modified: {datetime.now().strftime('%Y-%m-%d')}
status: active
related: []
aliases: []
---
"""
return frontmatter
def process_file(self, file_path):
"""Process a single markdown file."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
if self.has_frontmatter(content):
self.stats['skipped'] += 1
return False
# Create and prepend frontmatter
frontmatter = self.create_frontmatter(file_path, content)
new_content = frontmatter + content
# Write back to file
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
self.stats['updated'] += 1
return True
except Exception as e:
print(f"Error processing {file_path}: {e}")
self.stats['errors'] += 1
return False
def process_vault(self, dry_run=False):
"""Process all markdown files in the vault."""
# Directories to skip
skip_dirs = {'.obsidian', '.trash', 'System_Files', '.git'}
for file_path in self.vault_path.rglob('*.md'):
# Skip files in excluded directories
if any(skip_dir in file_path.parts for skip_dir in skip_dirs):
continue
self.stats['processed'] += 1
if dry_run:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
if not self.has_frontmatter(content):
print(f"Would update: {file_path.relative_to(self.vault_path)}")
self.stats['updated'] += 1
else:
self.stats['skipped'] += 1
else:
if self.process_file(file_path):
print(f"Updated: {file_path.relative_to(self.vault_path)}")
return self.stats
def main():
parser = argparse.ArgumentParser(description='Add metadata to Obsidian vault files')
parser.add_argument('--vault', default='/Users/cam/VAULT01',
help='Path to Obsidian vault')
parser.add_argument('--dry-run', action='store_true',
help='Show what would be updated without making changes')
args = parser.parse_args()
adder = MetadataAdder(args.vault)
print(f"Processing vault at: {args.vault}")
print("Dry run mode" if args.dry_run else "Making changes")
print("-" * 50)
stats = adder.process_vault(dry_run=args.dry_run)
print("-" * 50)
print(f"Files processed: {stats['processed']}")
print(f"Files updated: {stats['updated']}")
print(f"Files skipped (already have metadata): {stats['skipped']}")
print(f"Errors: {stats['errors']}")
if __name__ == '__main__':
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/metadata_adder.py",
"license": "MIT License",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/moc_generator.py | #!/usr/bin/env python3
"""
MOC (Map of Content) Generator for Obsidian Vault
Automatically generates MOCs for directories and topics.
"""
import os
import re
from pathlib import Path
from datetime import datetime
from collections import defaultdict
import argparse
class MOCGenerator:
def __init__(self, vault_path):
self.vault_path = Path(vault_path)
self.directory_stats = {}
self.topic_clusters = defaultdict(list)
def analyze_directory(self, directory_path):
"""Analyze a directory and its contents."""
stats = {
'total_files': 0,
'md_files': 0,
'subdirectories': [],
'file_types': defaultdict(int),
'common_topics': defaultdict(int)
}
try:
for item in directory_path.iterdir():
if item.is_file():
stats['total_files'] += 1
if item.suffix == '.md':
stats['md_files'] += 1
# Extract topics from filename
topics = self.extract_topics_from_filename(item.name)
for topic in topics:
stats['common_topics'][topic] += 1
stats['file_types'][item.suffix] += 1
elif item.is_dir() and not item.name.startswith('.'):
stats['subdirectories'].append(item.name)
except PermissionError:
print(f"Permission denied: {directory_path}")
return stats
def extract_topics_from_filename(self, filename):
"""Extract potential topics from filename."""
# Remove extension and common prefixes
name = filename.replace('.md', '')
name = re.sub(r'^\d{4}-\d{2}-\d{2}[_-]', '', name) # Remove date prefix
name = re.sub(r'^MOC[_-]', '', name) # Remove MOC prefix
# Split on common separators and filter
words = re.split(r'[_\-\s]+', name)
topics = []
# Filter out common words and keep meaningful terms
stop_words = {'and', 'the', 'for', 'with', 'to', 'of', 'in', 'on', 'at', 'by'}
for word in words:
if len(word) > 2 and word.lower() not in stop_words:
topics.append(word.lower())
return topics
def generate_moc_content(self, directory_path, title, description=""):
"""Generate MOC content for a directory."""
stats = self.analyze_directory(directory_path)
# Create frontmatter
frontmatter = f"""---
tags: [MOC, {directory_path.name.lower().replace(' ', '-')}]
type: map-of-content
created: {datetime.now().strftime('%Y-%m-%d')}
modified: {datetime.now().strftime('%Y-%m-%d')}
status: active
cssclass: moc
aliases: [{title} Hub, {title} Overview]
hub_for: [{directory_path.name}]
related_mocs: []
---
"""
# Create content
content = f"""# {title} Map of Content
## Overview
{description if description else f"This MOC organizes all content related to {title.lower()}."}
**Directory**: `{directory_path.name}/`
**Total Files**: {stats['md_files']} markdown files
**Last Updated**: {datetime.now().strftime('%Y-%m-%d')}
"""
# Add subdirectories if any
if stats['subdirectories']:
content += "## Subdirectories\n\n"
for subdir in sorted(stats['subdirectories']):
content += f"### {subdir}\n"
content += f"- [[MOC - {subdir}|{subdir} Overview]]\n\n"
# Organize files by topic
content += self.organize_files_by_topic(directory_path, stats)
# Add common topics section
if stats['common_topics']:
content += "## Key Topics\n\n"
sorted_topics = sorted(stats['common_topics'].items(), key=lambda x: x[1], reverse=True)
for topic, count in sorted_topics[:10]: # Top 10 topics
content += f"- **{topic.title()}** ({count} files)\n"
content += "\n"
# Add templates and resources
content += """## Related MOCs
- [[MOC - AI Development|AI Development]]
- [[MOC - Learning Resources|Learning Resources]]
- [[Master Index|ποΈ Master Index]]
## Status & Progress
- [ ] Organize files by topic
- [ ] Add cross-references
- [ ] Review and update links
- [ ] Add examples and tutorials
## Next Steps
- Review all files in this directory
- Create sub-MOCs for large topic clusters
- Add relevant tags to all files
- Connect to related MOCs
---
*This MOC was auto-generated. Please review and customize as needed.*
"""
return frontmatter + content
def organize_files_by_topic(self, directory_path, stats):
"""Organize files by topic clusters."""
content = "## Content Organization\n\n"
# Group files by topic
topic_files = defaultdict(list)
ungrouped_files = []
try:
for file_path in directory_path.glob('*.md'):
if file_path.name.startswith('MOC'):
continue # Skip existing MOCs
topics = self.extract_topics_from_filename(file_path.name)
if topics:
# Use first topic as primary grouping
primary_topic = topics[0]
topic_files[primary_topic].append(file_path)
else:
ungrouped_files.append(file_path)
except Exception as e:
print(f"Error organizing files: {e}")
return content
# Sort topics by number of files
sorted_topics = sorted(topic_files.items(), key=lambda x: len(x[1]), reverse=True)
for topic, files in sorted_topics:
if len(files) > 1: # Only create sections for topics with multiple files
content += f"### {topic.title()}\n"
for file_path in sorted(files):
title = file_path.stem.replace('_', ' ').replace('-', ' ')
content += f"- [[{title}]]\n"
content += "\n"
# Add ungrouped files
if ungrouped_files:
content += "### Other Files\n"
for file_path in sorted(ungrouped_files):
title = file_path.stem.replace('_', ' ').replace('-', ' ')
content += f"- [[{title}]]\n"
content += "\n"
return content
def create_moc_file(self, directory_path, title, description="", output_path=None):
"""Create a MOC file for a directory."""
content = self.generate_moc_content(directory_path, title, description)
if output_path is None:
# Create MOCs in the centralized map-of-content directory
moc_dir = self.vault_path / "map-of-content"
moc_dir.mkdir(exist_ok=True)
output_path = moc_dir / f"MOC - {title}.md"
# Check if file already exists
if output_path.exists():
print(f"MOC already exists: {output_path}")
return False
try:
with open(output_path, 'w', encoding='utf-8') as f:
f.write(content)
print(f"Created MOC: {output_path}")
return True
except Exception as e:
print(f"Error creating MOC: {e}")
return False
def suggest_mocs(self):
"""Suggest MOCs for directories that don't have them."""
suggestions = []
# Skip these directories
skip_dirs = {'.obsidian', '.trash', 'System_Files', '.git'}
for directory in self.vault_path.iterdir():
if not directory.is_dir() or directory.name in skip_dirs:
continue
stats = self.analyze_directory(directory)
# Check if directory has enough content to warrant a MOC
if stats['md_files'] >= 3:
# Check if MOC already exists
existing_mocs = list(directory.glob('MOC*.md'))
if not existing_mocs:
suggestions.append({
'directory': directory,
'title': directory.name,
'file_count': stats['md_files'],
'subdirs': len(stats['subdirectories']),
'top_topics': sorted(stats['common_topics'].items(),
key=lambda x: x[1], reverse=True)[:5]
})
return suggestions
def main():
parser = argparse.ArgumentParser(description='Generate MOCs for Obsidian vault')
parser.add_argument('--vault', default='/Users/cam/VAULT01',
help='Path to Obsidian vault')
parser.add_argument('--directory',
help='Specific directory to create MOC for')
parser.add_argument('--title',
help='Title for the MOC')
parser.add_argument('--description',
help='Description for the MOC')
parser.add_argument('--suggest', action='store_true',
help='Suggest directories that need MOCs')
parser.add_argument('--create-all', action='store_true',
help='Create MOCs for all suggested directories')
args = parser.parse_args()
generator = MOCGenerator(args.vault)
if args.suggest or args.create_all:
suggestions = generator.suggest_mocs()
if args.suggest:
print("MOC Suggestions:")
print("="*50)
for suggestion in suggestions:
print(f"Directory: {suggestion['directory'].name}")
print(f" Files: {suggestion['file_count']}")
print(f" Subdirs: {suggestion['subdirs']}")
if suggestion['top_topics']:
topics = [f"{topic} ({count})" for topic, count in suggestion['top_topics']]
print(f" Topics: {', '.join(topics)}")
print()
if args.create_all:
for suggestion in suggestions:
generator.create_moc_file(
suggestion['directory'],
suggestion['title']
)
elif args.directory:
directory_path = Path(args.vault) / args.directory
if not directory_path.exists():
print(f"Directory not found: {directory_path}")
return
title = args.title or directory_path.name
description = args.description or ""
generator.create_moc_file(directory_path, title, description)
else:
parser.print_help()
if __name__ == '__main__':
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/moc_generator.py",
"license": "MIT License",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/parse_keyword_connections.py | #!/usr/bin/env python3
"""Parse keyword connections from Link Suggestions Report."""
import re
import sys
from pathlib import Path
def parse_keyword_connections(report_path):
"""Parse keyword connections from the report and find meaningful ones."""
with open(report_path, 'r', encoding='utf-8') as f:
content = f.read()
# Find the keyword-based section
keyword_section_start = content.find("## Keyword-Based Link Suggestions")
if keyword_section_start == -1:
print("Could not find keyword-based section")
return []
# Find the next section (orphaned notes)
orphaned_section_start = content.find("## Orphaned Notes", keyword_section_start)
if orphaned_section_start == -1:
keyword_section = content[keyword_section_start:]
else:
keyword_section = content[keyword_section_start:orphaned_section_start]
# Pattern to match connections
pattern = r'- \[\[([^\]]+)\]\] β \[\[([^\]]+)\]\]\s*\n\s*Common words: ([^\n]+)'
connections = []
for match in re.finditer(pattern, keyword_section):
file1 = match.group(1).strip()
file2 = match.group(2).strip()
keywords = match.group(3).strip()
# Skip self-connections
if file1 == file2:
continue
# Count keywords
keyword_list = [k.strip() for k in keywords.split(',')]
keyword_count = len(keyword_list)
# Only include connections with 5+ keywords
if keyword_count >= 5:
connections.append({
'file1': file1,
'file2': file2,
'keywords': keyword_list,
'count': keyword_count
})
# Sort by keyword count descending
connections.sort(key=lambda x: x['count'], reverse=True)
return connections
def main():
report_path = Path("/Users/cam/VAULT01/System_Files/Link_Suggestions_Report.md")
connections = parse_keyword_connections(report_path)
print(f"Found {len(connections)} meaningful keyword connections (5+ keywords)\n")
# Group by keyword themes
tech_keywords = {'llm', 'langchain', 'langgraph', 'rag', 'embedding', 'vector', 'agent', 'model', 'api', 'mcp'}
framework_keywords = {'langchain', 'langgraph', 'fastapi', 'docker', 'cloudflare', 'supabase'}
company_keywords = {'openai', 'anthropic', 'google', 'microsoft', 'meta'}
concept_keywords = {'automation', 'workflow', 'pipeline', 'integration', 'generation', 'retrieval'}
tech_connections = []
framework_connections = []
company_connections = []
concept_connections = []
for conn in connections:
keywords_lower = [k.lower() for k in conn['keywords']]
tech_score = len([k for k in keywords_lower if any(tech in k for tech in tech_keywords)])
framework_score = len([k for k in keywords_lower if any(fw in k for fw in framework_keywords)])
company_score = len([k for k in keywords_lower if any(comp in k for comp in company_keywords)])
concept_score = len([k for k in keywords_lower if any(conc in k for conc in concept_keywords)])
if tech_score >= 2:
tech_connections.append(conn)
if framework_score >= 2:
framework_connections.append(conn)
if company_score >= 1:
company_connections.append(conn)
if concept_score >= 2:
concept_connections.append(conn)
print("## High-Priority Technical Connections")
print(f"Found {len(tech_connections)} connections with technical keywords\n")
for i, conn in enumerate(tech_connections[:20]): # Top 20
print(f"{i+1}. [[{conn['file1']}]] β [[{conn['file2']}]]")
print(f" Keywords ({conn['count']}): {', '.join(conn['keywords'][:10])}")
print()
print("\n## Framework-Related Connections")
print(f"Found {len(framework_connections)} connections with framework keywords\n")
for i, conn in enumerate(framework_connections[:15]): # Top 15
print(f"{i+1}. [[{conn['file1']}]] β [[{conn['file2']}]]")
print(f" Keywords ({conn['count']}): {', '.join(conn['keywords'][:10])}")
print()
print("\n## Company/Provider Connections")
print(f"Found {len(company_connections)} connections with company keywords\n")
for i, conn in enumerate(company_connections[:10]): # Top 10
print(f"{i+1}. [[{conn['file1']}]] β [[{conn['file2']}]]")
print(f" Keywords ({conn['count']}): {', '.join(conn['keywords'][:10])}")
print()
print("\n## Concept/Workflow Connections")
print(f"Found {len(concept_connections)} connections with concept keywords\n")
for i, conn in enumerate(concept_connections[:10]): # Top 10
print(f"{i+1}. [[{conn['file1']}]] β [[{conn['file2']}]]")
print(f" Keywords ({conn['count']}): {', '.join(conn['keywords'][:10])}")
print()
if __name__ == "__main__":
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/parse_keyword_connections.py",
"license": "MIT License",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
davila7/claude-code-templates:cli-tool/components/agents/obsidian-ops-team/Scripts/tag_standardizer.py | #!/usr/bin/env python3
"""
Tag Standardizer for Obsidian Vault
Normalizes and standardizes tags across all notes.
"""
import os
import re
import yaml
from pathlib import Path
from collections import defaultdict, Counter
import argparse
class TagStandardizer:
def __init__(self, vault_path):
self.vault_path = Path(vault_path)
self.tag_mappings = {}
self.tag_stats = Counter()
self.files_processed = 0
self.files_updated = 0
# Define standard tag mappings
self.standard_mappings = {
# Color codes to semantic tags
'#F0EEE6': 'clippings',
'#e0e0e0': 'reference',
'#f0f0f0': 'note',
# Technology standardization
'langchain': 'langchain',
'lang-chain': 'langchain',
'LangChain': 'langchain',
'langgraph': 'langgraph',
'lang-graph': 'langgraph',
'LangGraph': 'langgraph',
'mcp': 'mcp',
'MCP': 'mcp',
'model-context-protocol': 'mcp',
'Model Context Protocol': 'mcp',
'graphrag': 'graphrag',
'GraphRAG': 'graphrag',
'graph-rag': 'graphrag',
'openai': 'openai',
'OpenAI': 'openai',
'anthropic': 'anthropic',
'Anthropic': 'anthropic',
'claude': 'anthropic',
'Claude': 'anthropic',
'llm': 'ai/llm',
'LLM': 'ai/llm',
'ai-agents': 'ai/agents',
'AI Agents': 'ai/agents',
'embeddings': 'ai/embeddings',
'vector-db': 'ai/embeddings',
'rag': 'ai/embeddings',
'RAG': 'ai/embeddings',
# Common case standardizations
'MOC': 'moc',
'API': 'api',
'RSS': 'rss',
'UI': 'ui',
'AI': 'ai',
# Content type standardization
'research': 'research',
'Research': 'research',
'tutorial': 'tutorial',
'Tutorial': 'tutorial',
'how-to': 'tutorial',
'guide': 'tutorial',
'reference': 'reference',
'Reference': 'reference',
'docs': 'reference',
'documentation': 'reference',
'idea': 'idea',
'Idea': 'idea',
'ideas': 'idea',
'brainstorm': 'idea',
'concept': 'idea',
'meeting': 'meeting',
'Meeting': 'meeting',
'notes': 'meeting',
'email': 'email',
'Email': 'email',
'correspondence': 'email',
'daily': 'daily',
'Daily': 'daily',
'journal': 'daily',
'Journal': 'daily',
'log': 'daily',
# Business tags
'client': 'client',
'Client': 'client',
'business': 'business',
'Business': 'business',
'startup': 'startup',
'Startup': 'startup',
'freelance': 'freelance',
'Freelance': 'freelance',
'project': 'project',
'Project': 'project',
# Status tags
'active': 'status/active',
'Active': 'status/active',
'draft': 'status/draft',
'Draft': 'status/draft',
'completed': 'status/completed',
'Completed': 'status/completed',
'archived': 'status/archived',
'Archived': 'status/archived',
'todo': 'action/todo',
'TODO': 'action/todo',
'follow-up': 'action/follow-up',
'Follow-up': 'action/follow-up',
# Learning tags
'course': 'learning/course',
'Course': 'learning/course',
'certification': 'learning/certification',
'Certification': 'learning/certification',
'book': 'learning/book',
'Book': 'learning/book',
'video': 'learning/video',
'Video': 'learning/video',
'podcast': 'learning/podcast',
'Podcast': 'learning/podcast',
'conference': 'learning/conference',
'Conference': 'learning/conference',
'webinar': 'learning/webinar',
'Webinar': 'learning/webinar'
}
def extract_frontmatter(self, content):
"""Extract YAML frontmatter from content."""
if not content.strip().startswith('---'):
return None, content
# Find the end of frontmatter
lines = content.split('\n')
if len(lines) < 3:
return None, content
end_idx = None
for i in range(1, len(lines)):
if lines[i].strip() == '---':
end_idx = i
break
if end_idx is None:
return None, content
try:
frontmatter_text = '\n'.join(lines[1:end_idx])
frontmatter = yaml.safe_load(frontmatter_text)
remaining_content = '\n'.join(lines[end_idx + 1:])
return frontmatter, remaining_content
except yaml.YAMLError:
return None, content
def normalize_tag(self, tag):
"""Normalize a single tag."""
# Remove leading/trailing whitespace
tag = tag.strip()
# Remove hash if present
if tag.startswith('#'):
tag = tag[1:]
# Apply standard mappings
if tag in self.standard_mappings:
return self.standard_mappings[tag]
# Handle date-based tags
if re.match(r'\d{4}/\d{2}', tag):
return f'daily/{tag}'
# Handle file paths as tags
if '/' in tag and not tag.startswith(('ai/', 'client/', 'learning/', 'status/', 'action/')):
# Convert path-like tags to hierarchical tags
parts = tag.split('/')
if len(parts) >= 2:
category = parts[0].lower()
if category in ['ai', 'client', 'learning', 'business', 'project']:
return f"{category}/{'/'.join(parts[1:]).lower()}"
# Default normalization
return tag.lower().replace(' ', '-')
def standardize_tags(self, tags):
"""Standardize a list of tags."""
if not tags:
return []
# Handle different tag formats
if isinstance(tags, str):
# Single tag as string
return [self.normalize_tag(tags)]
if isinstance(tags, list):
standardized = []
for tag in tags:
if isinstance(tag, str):
normalized = self.normalize_tag(tag)
if normalized and normalized not in standardized:
standardized.append(normalized)
self.tag_stats[normalized] += 1
return standardized
return []
def process_file(self, file_path, dry_run=False):
"""Process a single file and standardize its tags."""
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
frontmatter, remaining_content = self.extract_frontmatter(content)
if frontmatter is None:
return False # No frontmatter to process
# Get current tags
current_tags = frontmatter.get('tags', [])
# Standardize tags
standardized_tags = self.standardize_tags(current_tags)
# Check if tags changed
if standardized_tags != current_tags:
if not dry_run:
# Update frontmatter
frontmatter['tags'] = standardized_tags
# Reconstruct content
new_frontmatter = yaml.dump(frontmatter, default_flow_style=False, sort_keys=False)
new_content = f"---\n{new_frontmatter}---\n{remaining_content}"
# Write back to file
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
return True # File was updated
return False # No changes needed
except Exception as e:
print(f"Error processing {file_path}: {e}")
return False
def analyze_existing_tags(self):
"""Analyze existing tags in the vault."""
tag_files = defaultdict(list)
for file_path in self.vault_path.rglob('*.md'):
if any(skip_dir in file_path.parts for skip_dir in {'.obsidian', '.trash', 'System_Files', '.git'}):
continue
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
frontmatter, _ = self.extract_frontmatter(content)
if frontmatter and 'tags' in frontmatter:
tags = frontmatter['tags']
if isinstance(tags, list):
for tag in tags:
if isinstance(tag, str):
tag_files[tag].append(file_path)
elif isinstance(tags, str):
tag_files[tags].append(file_path)
except Exception as e:
print(f"Error analyzing {file_path}: {e}")
return tag_files
def generate_tag_report(self):
"""Generate a report of current tags and suggested changes."""
print("Analyzing existing tags...")
tag_files = self.analyze_existing_tags()
report = []
report.append("# Tag Standardization Report")
report.append(f"Generated for vault: {self.vault_path}")
report.append(f"Total unique tags: {len(tag_files)}")
report.append("")
# Group tags by frequency
tag_frequency = [(tag, len(files)) for tag, files in tag_files.items()]
tag_frequency.sort(key=lambda x: x[1], reverse=True)
report.append("## Current Tags by Frequency")
for tag, count in tag_frequency:
normalized = self.normalize_tag(tag)
if normalized != tag:
report.append(f"- `{tag}` ({count} files) β `{normalized}`")
else:
report.append(f"- `{tag}` ({count} files)")
report.append("")
# Suggest consolidations
report.append("## Suggested Consolidations")
consolidations = defaultdict(list)
for tag, files in tag_files.items():
normalized = self.normalize_tag(tag)
if normalized != tag:
consolidations[normalized].append((tag, len(files)))
for normalized_tag, original_tags in consolidations.items():
if len(original_tags) > 1:
report.append(f"### {normalized_tag}")
total_files = sum(count for _, count in original_tags)
report.append(f"Total files: {total_files}")
for original, count in original_tags:
report.append(f"- `{original}` ({count} files)")
report.append("")
return '\n'.join(report)
def process_vault(self, dry_run=False):
"""Process all files in the vault."""
skip_dirs = {'.obsidian', '.trash', 'System_Files', '.git'}
for file_path in self.vault_path.rglob('*.md'):
if any(skip_dir in file_path.parts for skip_dir in skip_dirs):
continue
self.files_processed += 1
if self.process_file(file_path, dry_run):
self.files_updated += 1
if not dry_run:
print(f"Updated: {file_path.relative_to(self.vault_path)}")
return {
'processed': self.files_processed,
'updated': self.files_updated,
'tag_stats': dict(self.tag_stats)
}
def main():
parser = argparse.ArgumentParser(description='Standardize tags in Obsidian vault')
parser.add_argument('--vault', default='/Users/cam/VAULT01',
help='Path to Obsidian vault')
parser.add_argument('--dry-run', action='store_true',
help='Show what would be changed without making changes')
parser.add_argument('--report', action='store_true',
help='Generate analysis report of current tags')
parser.add_argument('--output',
default='/Users/cam/VAULT01/System_Files/Tag_Analysis_Report.md',
help='Output file for report')
args = parser.parse_args()
standardizer = TagStandardizer(args.vault)
if args.report:
report = standardizer.generate_tag_report()
with open(args.output, 'w', encoding='utf-8') as f:
f.write(report)
print(f"Tag analysis report saved to: {args.output}")
else:
print(f"Processing vault at: {args.vault}")
print("Dry run mode" if args.dry_run else "Making changes")
print("-" * 50)
results = standardizer.process_vault(dry_run=args.dry_run)
print("-" * 50)
print(f"Files processed: {results['processed']}")
print(f"Files updated: {results['updated']}")
if results['tag_stats']:
print("\nTop standardized tags:")
for tag, count in Counter(results['tag_stats']).most_common(10):
print(f" {tag}: {count}")
if __name__ == '__main__':
main() | {
"repo_id": "davila7/claude-code-templates",
"file_path": "cli-tool/components/agents/obsidian-ops-team/Scripts/tag_standardizer.py",
"license": "MIT License",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:lib/config/config.py | #!/usr/bin/env python3
""" Default configurations for faceswap. Handles parsing and validating of Faceswap Configs and
interfacing with :class:`configparser.ConfigParser` """
from __future__ import annotations
import inspect
import logging
import os
import sys
from importlib import import_module
from lib.utils import full_path_split, get_module_objects, PROJECT_ROOT
from .ini import ConfigFile
from .objects import ConfigItem, ConfigSection, GlobalSection
logger = logging.getLogger(__name__)
_CONFIGS: dict[str, FaceswapConfig] = {}
""" dict[str, FaceswapConfig] : plugin group to FaceswapConfig mapping for all loaded configs """
class FaceswapConfig():
""" Config Items """
def __init__(self, configfile: str | None = None) -> None:
""" Init Configuration
Parameters
----------
configfile : str, optional
Optional path to a config file. ``None`` for default location. Default: ``None``
"""
logger.debug("Initializing: %s", self.__class__.__name__)
self._plugin_group = self._get_plugin_group()
self._ini = ConfigFile(self._plugin_group, ini_path=configfile)
self.sections: dict[str, ConfigSection] = {}
""" dict[str, :class:`ConfigSection`] : The Faceswap config sections and options """
self._set_defaults()
self._ini.on_load(self.sections)
_CONFIGS[self._plugin_group] = self
logger.debug("Initialized: %s", self.__class__.__name__)
def _get_plugin_group(self) -> str:
""" Obtain the name of the plugin group based on the child module's folder path
Returns
-------
str
The plugin group for this Config object
"""
mod_split = self.__module__.split(".")
mod_name = mod_split[-1]
retval = mod_name.rsplit("_", maxsplit=1)[0]
logger.debug("Got plugin group '%s' from module '%s'",
retval, self.__module__)
# Sanity check in case of defaults config file name/location changes
parent = mod_split[-2]
assert mod_name == f"{parent}_config"
return retval
def add_section(self, title: str, info: str) -> None:
""" Add a default section to config file
Parameters
----------
title : str
The title for the section
info : str
The helptext for the section
"""
logger.debug("Add section: (title: '%s', info: '%s')", title, info)
self.sections[title] = ConfigSection(helptext=info, options={})
def add_item(self, section: str, title: str, config_item: ConfigItem) -> None:
""" Add a default item to a config section
Parameters
----------
section : str
The section of the config to add the item to
title : str
The name of the config item
config_item : :class:`~lib.config.objects.ConfigItem`
The default config item object to add to the config
"""
logger.debug("Add item: (section: '%s', item: %s", section, config_item)
self.sections[section].options[title] = config_item
def _import_defaults_from_module(self,
filename: str,
module_path: str,
plugin_type: str) -> None:
""" Load the plugin's defaults module, extract defaults and add to default configuration.
Parameters
----------
filename : str
The filename to load the defaults from
module_path : str
The path to load the module from
plugin_type : str
The type of plugin that the defaults are being loaded for
"""
logger.debug("Adding defaults: (filename: %s, module_path: %s, plugin_type: %s",
filename, module_path, plugin_type)
module = os.path.splitext(filename)[0]
section = ".".join((plugin_type, module.replace("_defaults", "")))
logger.debug("Importing defaults module: %s.%s", module_path, module)
mod = import_module(f"{module_path}.{module}")
self.add_section(section, mod.HELPTEXT) # type:ignore[attr-defined]
for key, val in vars(mod).items():
if isinstance(val, ConfigItem):
self.add_item(section=section, title=key, config_item=val)
logger.debug("Added defaults: %s", section)
def _defaults_from_plugin(self, plugin_folder: str) -> None:
""" Scan the given plugins folder for config defaults.py files and update the
default configuration.
Parameters
----------
plugin_folder : str
The folder to scan for plugins
"""
for dirpath, _, filenames in os.walk(plugin_folder):
default_files = [fname for fname in filenames if fname.endswith("_defaults.py")]
if not default_files:
continue
base_path = os.path.dirname(os.path.realpath(sys.argv[0]))
# Can't use replace as there is a bug on some Windows installs that lowers some paths
import_path = ".".join(full_path_split(dirpath[len(base_path):])[1:])
plugin_type = import_path.rsplit(".", maxsplit=1)[-1]
for filename in default_files:
self._import_defaults_from_module(filename, import_path, plugin_type)
def set_defaults(self, helptext: str = "") -> None:
""" Override for plugin specific config defaults.
This method should always be overriden to add the help text for the global plugin group.
If `helptext` is not provided, then it is assumed that there is no global section for this
plugin group.
The default action will parse the child class' module for
:class:`~lib.config.objects.ConfigItem` objects and add them to this plugin group's
"global" section of :attr:`sections`.
The name of each config option will be the variable name found in the module.
It will then parse the child class' module for subclasses of
:class:`~lib.config.objects.GlobalSection` objects and add each of these sections to this
plugin group's :attr:`sections`, adding any :class:`~lib.config.objects.ConfigItem` within
the GlobalSection to that sub-section.
The section name will be the name of the GlobalSection subclass, lowercased
Parameters
----------
helptext : str
The help text to display for the plugin group
Raises
------
ValueError
If the plugin group's help text has not been provided
"""
section = "global"
logger.debug("[%s:%s] Adding defaults", self._plugin_group, section)
if not helptext:
logger.debug("No help text provided for '%s'. Not creating global section",
self.__module__)
return
self.add_section(section, helptext)
for key, val in vars(sys.modules[self.__module__]).items():
if isinstance(val, ConfigItem):
self.add_item(section=section, title=key, config_item=val)
logger.debug("[%s:%s] Added defaults", self._plugin_group, section)
# Add global sub-sections
for key, val in vars(sys.modules[self.__module__]).items():
if inspect.isclass(val) and issubclass(val, GlobalSection) and val != GlobalSection:
section_name = f"{section}.{key.lower()}"
self.add_section(section_name, val.helptext)
for opt_name, opt in val.__dict__.items():
if isinstance(opt, ConfigItem):
self.add_item(section=section_name, title=opt_name, config_item=opt)
def _set_defaults(self) -> None:
"""Load the plugin's default values, set the object names and order the sections, global
first then alphabetically."""
self.set_defaults()
for section_name, section in self.sections.items():
for opt_name, opt in section.options.items():
opt.set_name(f"{self._plugin_group}.{section_name}.{opt_name}")
global_keys = sorted(s for s in self.sections if s.startswith("global"))
remaining_keys = sorted(s for s in self.sections if not s.startswith("global"))
ordered = {k: self.sections[k] for k in global_keys + remaining_keys}
self.sections = ordered
def save_config(self) -> None:
"""Update the ini file with the currently stored app values and save the config file."""
self._ini.update_from_app(self.sections)
def get_configs() -> dict[str, FaceswapConfig]:
""" Get all of the FaceswapConfig options. Loads any configs that have not been loaded and
return a dictionary of all configs.
Returns
-------
dict[str, :class:`FaceswapConfig`]
All of the loaded faceswap config objects
"""
generate_configs(force=True)
return _CONFIGS
def generate_configs(force: bool = False) -> None:
""" Generate config files if they don't exist.
This script is run prior to anything being set up, so don't use logging
Generates the default config files for plugins in the faceswap config folder
Logic:
- Scan the plugins path for files named <parent_folder>_config.py>
- Import the discovered module and look for instances of FaceswapConfig
- If exists initialize the class
Parameters
----------
force : bool
Force the loading of all plugin configs even if their .ini files pre-exist
"""
configs_path = os.path.join(PROJECT_ROOT, "config")
plugins_path = os.path.join(PROJECT_ROOT, "plugins")
for dirpath, _, filenames in os.walk(plugins_path):
relative_path = dirpath.replace(PROJECT_ROOT, "")[1:]
if len(full_path_split(relative_path)) > 2: # don't dig further than 1 folder deep
continue
plugin_group = os.path.basename(dirpath)
filename = f"{plugin_group}_config.py"
if filename not in filenames:
continue
if plugin_group in _CONFIGS:
continue
config_file = os.path.join(configs_path, f"{plugin_group}.ini")
if not os.path.exists(config_file) or force:
modname = os.path.splitext(filename)[0]
modpath = os.path.join(dirpath.replace(PROJECT_ROOT, ""),
modname)[1:].replace(os.sep, ".")
mod = import_module(modpath)
for obj in vars(mod).values():
if (inspect.isclass(obj)
and issubclass(obj, FaceswapConfig)
and obj != FaceswapConfig):
obj()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/config/config.py",
"license": "GNU General Public License v3.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:lib/config/ini.py | #! /usr/env/bin/python3
""" Handles interfacing between Faceswap Configs and ConfigParser .ini files """
from __future__ import annotations
import logging
import os
import textwrap
import typing as T
from configparser import ConfigParser
from lib.logger import parse_class_init
from lib.utils import get_module_objects, PROJECT_ROOT
if T.TYPE_CHECKING:
from .objects import ConfigSection, ConfigValueType
logger = logging.getLogger(__name__)
class ConfigFile():
""" Handles the interfacing between saved faceswap .ini configs and internal Config objects
Parameters
----------
plugin_group : str
The plugin group that is requesting a config file
ini_path : str | None, optional
Optional path to a .ini config file. ``None`` for default location. Default: ``None``
"""
def __init__(self, plugin_group: str, ini_path: str | None = None) -> None:
parse_class_init(locals())
self._plugin_group = plugin_group
self._file_path = self._get_config_path(ini_path)
self._parser = self._get_new_configparser()
if self._exists: # Load or create new
self.load()
@property
def _exists(self) -> bool:
""" bool : ``True`` if the config.ini file exists """
return os.path.isfile(self._file_path)
def _get_config_path(self, ini_path: str | None) -> str:
""" Return the path to the config file from the calling folder or the provided file
Parameters
----------
ini_path : str | None
Path to a config ini file. ``None`` for default location.
Returns
-------
str
The full path to the configuration file
"""
if ini_path is not None:
if not os.path.isfile(ini_path):
err = f"Config file does not exist at: {ini_path}"
logger.error(err)
raise ValueError(err)
return ini_path
retval = os.path.join(PROJECT_ROOT, "config", f"{self._plugin_group}.ini")
logger.debug("[%s] Config File location: '%s'", os.path.basename(retval), retval)
return retval
def _get_new_configparser(self) -> ConfigParser:
""" Obtain a fresh ConfigParser object and set it to case-sensitive
Returns
-------
:class:`configparser.ConfigParser`
A new ConfigParser object set to case-sensitive
"""
retval = ConfigParser(allow_no_value=True)
retval.optionxform = str # type:ignore[assignment,method-assign]
return retval
# I/O
def load(self) -> None:
""" Load values from the saved config ini file into our Config object """
logger.verbose("[%s] Loading config: '%s'", # type:ignore[attr-defined]
self._plugin_group, self._file_path)
self._parser.read(self._file_path, encoding="utf-8")
def save(self) -> None:
""" Save a config file """
logger.debug("[%s] %s config: '%s'",
self._plugin_group, "Updating" if self._exists else "Saving", self._file_path)
# TODO in python >= 3.14 this will error when there are delimiters in the comments
with open(self._file_path, "w", encoding="utf-8", errors="replace") as f_cfgfile:
self._parser.write(f_cfgfile)
logger.info("[%s] Saved config: '%s'", self._plugin_group, self._file_path)
# .ini vs Faceswap Config checking
def _sections_synced(self, app_config: dict[str, ConfigSection]) -> bool:
""" Validate that all of the sections within the application config match with all of the
sections in the ini file
Parameters
----------
app_config : dict[str, :class:`ConfigSection`]
The latest configuration settings from the application. Section name is key
Returns
-------
bool
``True`` if application sections and saved ini sections match
"""
given_sections = set(app_config)
loaded_sections = set(self._parser.sections())
retval = given_sections == loaded_sections
if not retval:
logger.debug("[%s] Config sections are not synced: (app: %s, ini: %s)",
self._plugin_group, sorted(given_sections), sorted(loaded_sections))
return retval
def _options_synced(self, app_config: dict[str, ConfigSection]) -> bool:
""" Validate that all of the option names within the application config match with all of
the option names in the ini file
Note
----
As we need to write a new config anyway, we return on the first change found
Parameters
----------
app_config : dict[str, :class:`ConfigSection`]
The latest configuration settings from the application. Section name is key
Returns
-------
bool
``True`` if application option names match with saved ini option names
"""
for name, section in app_config.items():
given_opts = set(opt for opt in section.options)
loaded_opts = set(self._parser[name].keys())
if given_opts != loaded_opts:
logger.debug("[%s:%s] Config options are not synced: (app: %s, ini: %s)",
self._plugin_group, name, sorted(given_opts), sorted(loaded_opts))
return False
return True
def _values_synced(self, app_section: ConfigSection, section: str) -> bool:
""" Validate that all of the option values within the application config match with all of
the option values in the ini file
Parameters
----------
app_section : :class:`ConfigSection`
The latest configuration settings from the application for the given section
section : str
The section name to check the option values for
Returns
-------
bool
``True`` if application option values match with saved ini option values
"""
# Need to also pull in keys as False is omitted from the set with just values which can
# cause edge-case false negatives
given_vals = set((k, v.ini_value) for k, v in app_section.options.items())
loaded_vals = set((k, v) for k, v in self._parser[section].items())
retval = given_vals == loaded_vals
if not retval:
logger.debug("[%s:%s] Config values are not synced: (app: %s, ini: %s)",
self._plugin_group, section, sorted(given_vals), sorted(loaded_vals))
return retval
def _is_synced_structure(self, app_config: dict[str, ConfigSection]) -> bool:
""" Validate that all the given sections and option names within the application config
match with their corresponding items in the save .ini file
Parameters
----------
app_config: dict[str, :class:`ConfigSection`]
The latest configuration settings from the application. Section name is key
Returns
-------
bool
``True`` if the app config and saved ini config structure match
"""
if not self._sections_synced(app_config):
return False
if not self._options_synced(app_config):
return False
logger.debug("[%s] Configs are synced", self._plugin_group)
return True
# .ini file insertion
def format_help(self, helptext: str, is_section: bool = False) -> str:
""" Format comments for insertion into a config ini file
Parameters
----------
helptext : str
The help text to be formatted
is_section : bool, optional
``True`` if the help text pertains to a section. ``False`` if it pertains to an option.
Default: ``True``
Returns
-------
str
The formatted help text
"""
logger.debug("[%s] Formatting help: (helptext: '%s', is_section: '%s')",
self._plugin_group, helptext, is_section)
formatted = ""
for hlp in helptext.split("\n"):
subsequent_indent = "\t\t" if hlp.startswith("\t") else ""
hlp = f"\t- {hlp[1:].strip()}" if hlp.startswith("\t") else hlp
formatted += textwrap.fill(hlp,
100,
tabsize=4,
subsequent_indent=subsequent_indent) + "\n"
helptext = '# {}'.format(formatted[:-1].replace("\n", "\n# ")) # Strip last newline
helptext = helptext.upper() if is_section else f"\n{helptext}"
return helptext
def _insert_section(self, section: str, helptext: str, config: ConfigParser) -> None:
""" Insert a section into the config
Parameters
----------
section : str
The section title to insert
helptext : str
The help text for the config section
config : :class:`configparser.ConfigParser`
The config parser object to insert the section into.
"""
logger.debug("[%s:%s] Inserting section: (helptext: '%s', config: '%s')",
self._plugin_group, section, helptext, config)
helptext = self.format_help(helptext, is_section=True)
config.add_section(section)
config.set(section, helptext)
def _insert_option(self,
section: str,
name: str,
helptext: str,
value: str,
config: ConfigParser) -> None:
""" Insert an option into a config section
Parameters
----------
section : str
The section to insert the option into
name : str
The name of the option to insert
helptext : str
The help text for the option
value : str
The value for the option
config : :class:`configparser.ConfigParser`
The config parser object to insert the option into
"""
logger.debug(
"[%s:%s] Inserting option: (name: '%s', helptext: %s, value: '%s', config: '%s')",
self._plugin_group, section, name, helptext, value, config)
helptext = self.format_help(helptext, is_section=False)
config.set(section, helptext)
config.set(section, name, value)
def _sync_from_app(self, app_config: dict[str, ConfigSection]) -> None:
""" Update the saved config.ini file from the values stored in the application config
Existing options keep their saved values as per the .ini files. New options are added with
their application defined default value. Options in the .ini file not in application
provided config are removed.
Note
----
A new configuration object is created as comments are stripped from the loaded ini files.
Parameters
----------
app_config: dict[str, :class:`ConfigSection`]
The latest configuration settings from the application. Section name is key
"""
logger.debug("[%s] Syncing from app", self._plugin_group)
parser = self._get_new_configparser() if self._exists else self._parser
for section_name, section in app_config.items():
self._insert_section(section_name, section.helptext, parser)
for name, opt in section.options.items():
value = self._parser.get(section_name, name, fallback=None)
if value is None:
value = opt.ini_value
logger.debug(
"[%s:%s] Setting default value for non-existent config option '%s': '%s'",
self._plugin_group, section_name, name, value)
self._insert_option(section_name, name, opt.helptext, value, parser)
if parser != self._parser:
self._parser = parser
self.save()
# .ini extraction
def _get_converted_value(self, section: str, option: str, datatype: type) -> ConfigValueType:
""" Return a config item from the .ini file in it's correct type.
Parameters
----------
section : str
The configuration section to obtain the config option for
option : str
The configuration option to obtain the converted value for
datatype : type
The type to return the value as
Returns
-------
bool | int | float | list[str] | str
The selected configuration option in the correct data format
"""
logger.debug("[%s:%s] Getting config item: (option: '%s', datatype: %s)",
self._plugin_group, section, option, datatype)
assert datatype in (bool, int, float, str, list), (
f"Expected (bool, int, float, str, list). Got {datatype}")
retval: ConfigValueType
if datatype == bool:
retval = self._parser.getboolean(section, option)
elif datatype == int:
retval = self._parser.getint(section, option)
elif datatype == float:
retval = self._parser.getfloat(section, option)
else:
retval = self._parser.get(section, option)
logger.debug("[%s:%s] Got config item: (value: %s, type: %s)",
self._plugin_group, section, retval, type(retval))
return retval
def _sync_to_app(self, app_config: dict[str, ConfigSection]) -> None:
""" Update the values in the application config to those loaded from the saved config.ini.
Parameters
----------
app_config: dict[str, :class:`ConfigSection`]
The latest configuration settings from the application. Section name is key
"""
logger.debug("[%s] Syncing to app", self._plugin_group)
for section_name, section in app_config.items():
if self._values_synced(section, section_name):
continue
for opt_name, opt in section.options.items():
if section_name not in self._parser or opt_name not in self._parser[section_name]:
logger.debug("[%s:%s] Skipping new option: '%s'",
self._plugin_group, section_name, opt_name)
continue
ini_opt = self._parser[section_name][opt_name]
if opt.ini_value != ini_opt:
logger.debug("[%s:%s] Updating '%s' from '%s' to '%s'",
self._plugin_group, section_name,
opt_name, ini_opt, opt.ini_value)
opt.set(self._get_converted_value(section_name, opt_name, opt.datatype))
# .ini insertion and extraction
def on_load(self, app_config: dict[str, ConfigSection]) -> None:
""" Check whether there has been any change between the current application config and
the loaded ini config. If so, update the relevant object(s) appropriately. This check will
also create new config.ini files if they do not pre-exist
Parameters
----------
app_config : dict[str, :class:`ConfigSection`]
The latest configuration settings from the application. Section name is key
"""
if not self._exists:
logger.debug("[%s] Creating new ini file", self._plugin_group)
self._sync_from_app(app_config)
if not self._is_synced_structure(app_config):
self._sync_from_app(app_config)
self._sync_to_app(app_config)
def update_from_app(self, app_config: dict[str, ConfigSection]) -> None:
""" Update the config.ini file to those values that are currently in Faceswap's app
config
Parameters
----------
app_config : dict[str, :class:`ConfigSection`]
The latest configuration settings from the application. Section name is key
"""
logger.debug("[%s] Updating saved config", self._plugin_group)
parser = self._get_new_configparser() if self._exists else self._parser
for section_name, section in app_config.items():
self._insert_section(section_name, section.helptext, parser)
for name, opt in section.options.items():
self._insert_option(section_name, name, opt.helptext, opt.ini_value, parser)
if parser != self._parser:
self._parser = parser
self.save()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/config/ini.py",
"license": "GNU General Public License v3.0",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:lib/config/objects.py | #! /usr/env/bin/python3
""" Dataclass objects for holding and validating Faceswap Config items """
from __future__ import annotations
import gettext
import logging
from typing import (Any, cast, Generic, get_args, get_origin, get_type_hints,
Literal, TypeVar, Union)
import types
from dataclasses import dataclass, field
from lib.utils import get_module_objects
# LOCALES
_LANG = gettext.translation("lib.config", localedir="locales", fallback=True)
_ = _LANG.gettext
logger = logging.getLogger(__name__)
ConfigValueType = bool | int | float | list[str] | str
T = TypeVar("T")
# TODO allow list items other than strings
@dataclass
class ConfigItem(Generic[T]): # pylint:disable=too-many-instance-attributes
""" A dataclass for storing config items loaded from config.ini files and dynamically assigning
and validating that the correct datatype is used.
The value loaded from the .ini config file can be accessed with either:
>>> conf.value
>>> conf()
>>> conf.get()
Parameters
----------
datatype : type
A python type class. This limits the type of data that can be provided in the .ini file
and ensures that the value is returned to faceswap is correct. Valid datatypes are:
`int`, `float`, `str`, `bool` or `list`. Note that `list` items must all be strings.
default : Any
The default value for this option. It must be of the same type as :attr:`datatype`.
group : str
The group that this config item exists within in the config section
info : str
A description of what this option does.
choices : list[str] | Literal["colorchooser"], optional
If this option's datatype is a `str` then valid selections can be defined here, empty list
for any value. If the option's datatype is a `list`, then this option must be populated
with the valid selections. This validates the option and also enables a combobox / radio
option in the GUI. If the default value is a hex color value, then this should be the
literal "colorchooser" to present a color choosing interface in the GUI. Ignored for all
other datatypes
Default: [] (empty list: no options)
gui_radio : bool, optional
If :attr:`choices` are defined, this indicates that the GUI should use radio buttons rather
than a combobox to display this option. Default: ``False``
min_max : tuple[int | float, int | float] | None, optional
For `int` and `float` :attr:`datatype` this is required otherwise it is ignored. Should be
a tuple of min and max accepted values of the same datatype as the option value. This is
used for controlling the GUI slider range. Values are not enforced. Default: ``None``
rounding : int | None, optional
For `int` and `float :attr:datatypes this is required to be > 0 otherwise it is ignored.
Used for the GUI slider. For `float`, this is the number of decimal places to display. For
`int` this is the step size. Default: `-1` (ignored)
fixed : bool, optional
[train only]. Training configurations are fixed when the model is created, and then
reloaded from the state file. Marking an item as fixed=``False`` indicates that this value
can be changed for existing models, and will override the value saved in the state file
with the updated value in config. Default: ``True``
"""
datatype: type[T]
""" type : A python type class. The datatype of the config value. One of `int`, `float`, `str`,
`bool` or `list`. `list` will only contain `str` items """
default: T
""" Any : The default value for this option. It is of the same type as :attr:`datatype` """
group: str
""" str : The group that this config option belongs to """
info: str
""" str : A description of what this option does """
choices: list[str] | Literal["colorchooser"] = field(default_factory=list)
""" list[str] | Literal["colorchooser"]: If this option's datatype is a `str` then valid
selections may be defined here, Empty list if any value is valid. If the datatype is a `list`
then valid choices will be populated here. If the default value is a hex color code, then the
literal "colorchooser" will display a color choosing interface in the GUI. """
gui_radio: bool = False
""" bool : indicates that the GUI should use radio buttons rather than a combobox to display
this option if :attr:`choices` is populated """
min_max: tuple[T, T] | None = None
""" tuple[int | float, int | float] | None : For `int` and `float` :attr:`datatype` this will
be populated otherwise it will be ``None``. Used for controlling the GUI slider range. Values
are not enforced. """
rounding: int = -1
""" int : For `int` and `float` :attr:`datatypes` this will be > 0 otherwise it will be `-1`.
Used for the GUI slider. For `float`, this is the number of decimal places to display. For
`int` this is the step size. """
fixed: bool = True
""" bool : Only used for train.model configurations. Options marked as fixed=``False``
indicates that this value can be changed for existing models, otherwise the option set when the
model commenced training is fixed and cannot be changed. Default: ``True`` """
_value: T = field(init=False)
""" Any : The value of the config item of type :attr:`datatype`"""
_name: str = field(init=False)
""" str: The option name for this object. Set when the config is first loaded """
@property
def helptext(self) -> str:
""" str | Description of the config option with additional formating and helptext added
from the item parameters """
retval = f"{self.info}\n"
if not self.fixed:
retval += _("\nThis option can be updated for existing models.\n")
if self.datatype == list:
retval += _("\nIf selecting multiple options then each option should be separated "
"by a space or a comma (e.g. item1, item2, item3)\n")
if self.choices and self.choices != "colorchooser":
retval += _("\nChoose from: {}").format(self.choices)
elif self.datatype == bool:
retval += _("\nChoose from: True, False")
elif self.datatype == int:
assert self.min_max is not None
cmin, cmax = self.min_max
retval += _("\nSelect an integer between {} and {}").format(cmin, cmax)
elif self.datatype == float:
assert self.min_max is not None
cmin, cmax = self.min_max
retval += _("\nSelect a decimal number between {} and {}").format(cmin, cmax)
default = ", ".join(self.default) if isinstance(self.default, list) else self.default
retval += _("\n[Default: {}]").format(default)
return retval
@property
def value(self) -> T:
""" Any : The config value for this item loaded from the config .ini file. String values
will always be lowercase, regardless of what is loaded from Config """
retval = self._value
if isinstance(self._value, str):
retval = cast(T, self._value.lower())
if isinstance(self._value, list):
retval = cast(T, [x.lower() for x in self._value])
return retval
@property
def ini_value(self) -> str:
""" str : The current value of the ConfigItem as a string for writing to a .ini file """
if isinstance(self._value, list):
return ", ".join(str(x) for x in self._value)
return str(self._value)
@property
def name(self) -> str:
"""str: The name associated with this option """
return self._name
def _validate_type(self, # pylint:disable=too-many-return-statements
expected_type: Any,
attr: Any,
depth=1) -> bool:
""" Validate that provided types are correct when this Dataclass is initialized
Parameters
----------
expected_type : Any
The expected data type for the given attribute
attr : Any
The attribute to test for correctness
depth : int, optional
The current recursion depth
Returns
-------
bool
``True`` if the given attribute is a valid datatype
Raises
------
AssertionError
On explicit data type failure
ValueError
On unhandled data type failure
"""
value = getattr(self, attr)
attr_type = type(value)
expected_type = self.datatype if expected_type == T else expected_type # type:ignore[misc]
if attr_type is expected_type:
return True
if attr == "datatype":
assert value in (str, bool, float, int, list), (
"'datatype' must be one of str, bool, float, int or list. Got {value}")
return True
if expected_type == T: # type:ignore[misc]
assert attr_type == self.datatype, (
f"'{attr}' expected: {self.datatype}. Got: {attr_type}")
return True
if get_origin(expected_type) is Literal:
return value in get_args(expected_type)
if get_origin(expected_type) in (Union, types.UnionType):
for subtype in get_args(expected_type):
if self._validate_type(subtype, attr, depth=depth + 1):
return True
if get_origin(expected_type) in (list, tuple) and attr_type in (list, tuple):
sub_expected = [self.datatype if v == T # type:ignore[misc]
else v for v in get_args(expected_type)]
return set(type(v) for v in value).issubset(sub_expected)
if depth == 1:
raise ValueError(f"'{attr}' expected: {expected_type}. Got: {attr_type}")
return False
def _validate_required(self) -> None:
""" Validate that required parameters are populated
Raises
------
ValueError
If any required parameters are empty
"""
if not self.group:
raise ValueError("A group must be provided")
if not self.info:
raise ValueError("Option info must me provided")
def _validate_choices(self) -> None:
""" Validate that choices have been used correctly
Raises
------
ValueError
If any choices options have not been populated correctly
"""
if self.choices == "colorchooser":
if not isinstance(self.default, str):
raise ValueError(f"Config Item default must be a string when selecting "
f"choice='colorchooser'. Got {type(self.default)}")
if not self.default.startswith("#") or len(self.default) != 7:
raise ValueError(f"Hex color codes should start with a '#' and be 6 "
f"characters long. Got: '{self.default}'")
elif self.choices and isinstance(self.default, str) and self.default not in self.choices:
raise ValueError(f"Config item default value '{self.default}' must exist in "
f"in choices {self.choices}")
if isinstance(self.choices, list) and self.choices:
unique_choices = set(x.lower() for x in self.choices)
if len(unique_choices) != len(self.choices):
raise ValueError("Config item choices must be a unique list")
if isinstance(self.default, list):
defaults = set(x.lower() for x in self.default)
else:
assert isinstance(self.default, str), type(self.default)
defaults = {self.default.lower()}
if not defaults.issubset(unique_choices):
raise ValueError(f"Config item default {self.default} must exist in choices "
f"{self.choices}")
if not self.choices and isinstance(self.default, list):
raise ValueError("Config item of type list must have choices defined")
def _validate_numeric(self) -> None:
""" Validate that float and int values have been set correctly
Raises
------
ValueError
If any float or int options have not been configured correctly
"""
# NOTE: Have to include datatype filter in next check to exclude bools
if self.datatype in (float, int) and isinstance(self.default, (float, int)):
if self.rounding <= 0:
raise ValueError(f"Config Item rounding must be a positive number for "
f"datatypes float and int. Got {self.rounding}")
if self.min_max is None or len(self.min_max) != 2:
raise ValueError(f"Config Item min_max must be a tuple of (<minimum>, "
f"<maximum>) values. Got {self.min_max}")
def __post_init__(self) -> None:
""" Validate and type check that the given parameters are valid and set the default value.
Raises
------
ValueError
If the Dataclass fails validation checks
"""
self._name = ""
self._value = self.default
try:
for attr, dtype in get_type_hints(self.__class__).items():
self._validate_type(dtype, attr)
except (AssertionError, ValueError) as err:
raise ValueError(f"Config item failed type checking: {str(err)}") from err
self._validate_required()
self._validate_choices()
self._validate_numeric()
def get(self) -> T:
""" Obtain the currently stored configuration value
Returns
-------
Any
The config value for this item loaded from the config .ini file. String values will
always be lowecase, regardless of what is loaded from Config """
return self.value
def _parse_list(self, value: str | list[str]) -> list[str]:
""" Parse inbound list values. These can be space/comma-separated strings or a list.
Parameters
----------
value : str | list[str]
The inbound value to be converted to a list
Returns
-------
list[str]
List of strings representing the inbound values.
"""
if not value:
return []
if isinstance(value, list):
return [str(x) for x in value]
delimiter = "," if "," in value else None
retval = list(set(x.strip() for x in value.split(delimiter)))
logger.debug("[%s] Processed str value '%s' to unique list %s", self._name, value, retval)
return retval
def _validate_selection(self, value: str | list[str]) -> str | list[str]:
""" Validate that the given value is valid within the stored choices
Parameters
----------
str | list[str]
The inbound config value to validate
Returns
-------
bool
``True`` if the selected value is a valid choice
"""
assert isinstance(self.choices, list)
choices = [x.lower() for x in self.choices]
logger.debug("[%s] Checking config choices", self._name)
if isinstance(value, str):
if value.lower() not in choices:
logger.warning("[%s] '%s' is not a valid config choice. Defaulting to '%s'",
self._name, value, self.default)
return cast(str, self.default)
return value
if all(x.lower() in choices for x in value):
return value
valid = [x for x in value if x.lower() in choices]
valid = valid if valid else cast(list[str], self.default)
invalid = [x for x in value if x.lower() not in choices]
logger.warning("[%s] The option(s) %s are not valid selections. Setting to: %s",
self._name, invalid, valid)
return valid
def set(self, value: T) -> None:
""" Set the item's option value
Parameters
----------
value : Any
The value to set this item to. Must be of type :attr:`datatype`
Raises
------
ValueError
If the given value does not pass type and content validation checks
"""
if not self._name:
raise ValueError("The name of this object should have been set before any value is"
"added")
if self.datatype is list:
if not isinstance(value, (str, list)):
raise ValueError(f"[{self._name}] List values should be set as a Str or List. Got "
f"{type(value)} ({value})")
value = cast(T, self._parse_list(value))
if not isinstance(value, self.datatype):
raise ValueError(
f"[{self._name}] Expected {self.datatype} got {type(value)} ({value})")
if isinstance(self.choices, list) and self.choices:
assert isinstance(value, (list, str))
value = cast(T, self._validate_selection(value))
if self.choices == "colorchooser":
assert isinstance(value, str)
if not value.startswith("#") or len(value) != 7:
raise ValueError(f"Hex color codes should start with a '#' and be 6 "
f"characters long. Got: '{value}'")
self._value = value
def set_name(self, name: str) -> None:
""" Set the logging name for this object for display purposes
Parameters
----------
name : str
The name to assign to this option
"""
logger.debug("Setting name to '%s'", name)
assert isinstance(name, str) and name
self._name = name
def __call__(self) -> T:
""" Obtain the currently stored configuration value
Returns
-------
Any
The config value for this item loaded from the config .ini file. String values will
always be lowecase, regardless of what is loaded from Config """
return self.value
@dataclass
class ConfigSection:
""" Dataclass for holding information about configuration sections and the contained
configuration items
Parameters
----------
helptext : str
The helptext to be displayed for the configuration section
options : dict[str, :class:`ConfigItem`]
Dictionary of configuration option name to the options for the section
"""
helptext: str
options: dict[str, ConfigItem]
@dataclass
class GlobalSection:
""" A dataclass for holding and identifying global sub-sections for plugin groups. Any global
subsections must inherit from this.
Parameters
----------
helptext : str
The helptext to be displayed for the global configuration section
"""
helptext: str
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/config/objects.py",
"license": "GNU General Public License v3.0",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:lib/gui/analysis/moving_average.py | #!/usr/bin python3
""" Calculate Exponential Moving Average for faceswap GUI Stats. """
import logging
import numpy as np
from lib.logger import parse_class_init
from lib.utils import get_module_objects
logger = logging.getLogger(__name__)
class ExponentialMovingAverage:
""" Reshapes data before calculating exponential moving average, then iterates once over the
rows to calculate the offset without precision issues.
Parameters
----------
data : :class:`numpy.ndarray`
A 1 dimensional numpy array to obtain smoothed data for
amount : float
in the range (0.0, 1.0) The alpha parameter (smoothing amount) for the moving average.
Notes
-----
Adapted from: https://stackoverflow.com/questions/42869495
"""
def __init__(self, data: np.ndarray, amount: float) -> None:
logger.debug(parse_class_init(locals()))
assert data.ndim == 1
amount = min(max(amount, 0.001), 0.999)
self._data = np.nan_to_num(data)
self._alpha = 1. - amount
self._dtype = "float32" if data.dtype == np.float32 else "float64"
self._row_size = self._get_max_row_size()
self._out = np.empty_like(data, dtype=self._dtype)
logger.debug("Initialized %s", self.__class__.__name__)
def __call__(self) -> np.ndarray:
""" Perform the exponential moving average calculation.
Returns
-------
:class:`numpy.ndarray`
The smoothed data
"""
if self._data.size <= self._row_size:
self._ewma_vectorized(self._data, self._out) # Normal function can handle this input
else:
self._ewma_vectorized_safe() # Use the safe version
return self._out
def _get_max_row_size(self) -> int:
""" Calculate the maximum row size for the running platform for the given dtype.
Returns
-------
int
The maximum row size possible on the running platform for the given :attr:`_dtype`
Notes
-----
Might not be the optimal value for speed, which is hard to predict due to numpy
optimizations.
"""
# Use :func:`np.finfo(dtype).eps` if you are worried about accuracy and want to be safe.
epsilon = np.finfo(self._dtype).tiny
# If this produces an OverflowError, make epsilon larger:
retval = int(np.log(epsilon) / np.log(1 - self._alpha)) + 1
logger.debug("row_size: %s", retval)
return retval
def _ewma_vectorized_safe(self) -> None:
""" Perform the vectorized exponential moving average in a safe way. """
num_rows = int(self._data.size // self._row_size) # the number of rows to use
leftover = int(self._data.size % self._row_size) # the amount of data leftover
first_offset = self._data[0]
if leftover > 0:
# set temporary results to slice view of out parameter
out_main_view = np.reshape(self._out[:-leftover], (num_rows, self._row_size))
data_main_view = np.reshape(self._data[:-leftover], (num_rows, self._row_size))
else:
out_main_view = self._out.reshape(-1, self._row_size)
data_main_view = self._data.reshape(-1, self._row_size)
self._ewma_vectorized_2d(data_main_view, out_main_view) # get the scaled cumulative sums
scaling_factors = (1 - self._alpha) ** np.arange(1, self._row_size + 1)
last_scaling_factor = scaling_factors[-1]
# create offset array
offsets = np.empty(out_main_view.shape[0], dtype=self._dtype)
offsets[0] = first_offset
# iteratively calculate offset for each row
for i in range(1, out_main_view.shape[0]):
offsets[i] = offsets[i - 1] * last_scaling_factor + out_main_view[i - 1, -1]
# add the offsets to the result
out_main_view += offsets[:, np.newaxis] * scaling_factors[np.newaxis, :]
if leftover > 0:
# process trailing data in the 2nd slice of the out parameter
self._ewma_vectorized(self._data[-leftover:],
self._out[-leftover:],
offset=out_main_view[-1, -1])
def _ewma_vectorized(self,
data: np.ndarray,
out: np.ndarray,
offset: float | None = None) -> None:
""" Calculates the exponential moving average over a vector. Will fail for large inputs.
The result is processed in place into the array passed to the `out` parameter
Parameters
----------
data : :class:`numpy.ndarray`
A 1 dimensional numpy array to obtain smoothed data for
out : :class:`numpy.ndarray`
A location into which the result is stored. It must have the same shape and dtype as
the input data
offset : float, optional
The offset for the moving average, scalar. Default: the value held in data[0].
"""
if data.size < 1: # empty input, return empty array
return
offset = data[0] if offset is None else offset
# scaling_factors -> 0 as len(data) gets large. This leads to divide-by-zeros below
scaling_factors = np.power(1. - self._alpha, np.arange(data.size + 1, dtype=self._dtype),
dtype=self._dtype)
# create cumulative sum array
np.multiply(data, (self._alpha * scaling_factors[-2]) / scaling_factors[:-1],
dtype=self._dtype, out=out)
np.cumsum(out, dtype=self._dtype, out=out)
out /= scaling_factors[-2::-1] # cumulative sums / scaling
if offset != 0:
noffset = np.asarray(offset).astype(self._dtype, copy=False)
out += noffset * scaling_factors[1:]
def _ewma_vectorized_2d(self, data: np.ndarray, out: np.ndarray) -> None:
""" Calculates the exponential moving average over the last axis.
The result is processed in place into the array passed to the `out` parameter
Parameters
----------
data : :class:`numpy.ndarray`
A 1 or 2 dimensional numpy array to obtain smoothed data for.
out : :class:`numpy.ndarray`
A location into which the result is stored. It must have the same shape and dtype as
the input data
"""
if data.size < 1: # empty input, return empty array
return
# calculate the moving average
scaling_factors = np.power(1. - self._alpha, np.arange(data.shape[1] + 1,
dtype=self._dtype),
dtype=self._dtype)
# create a scaled cumulative sum array
np.multiply(data,
np.multiply(self._alpha * scaling_factors[-2],
np.ones((data.shape[0], 1), dtype=self._dtype),
dtype=self._dtype) / scaling_factors[np.newaxis, :-1],
dtype=self._dtype, out=out)
np.cumsum(out, axis=1, dtype=self._dtype, out=out)
out /= scaling_factors[np.newaxis, -2::-1]
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/gui/analysis/moving_average.py",
"license": "GNU General Public License v3.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:lib/gui/gui_config.py | #!/usr/bin/env python3
""" Default configurations for the GUI """
import logging
import os
from tkinter import font as tk_font
from matplotlib import font_manager
from lib.config import FaceswapConfig
from lib.config import ConfigItem
from lib.utils import get_module_objects, PROJECT_ROOT
logger = logging.getLogger(__name__)
class _Config(FaceswapConfig):
""" Config File for GUI """
def set_defaults(self, helptext="") -> None:
""" Set the default values for config """
logger.debug("Setting defaults")
super().set_defaults(
helptext="Faceswap GUI Options.\nConfigure the appearance and behaviour of the GUI")
# Font choices cannot be added until tkinter has been launched
logger.debug("Adding font list from tkinter")
self.sections["global"].options["font"].choices = get_clean_fonts()
def get_commands() -> list[str]:
""" Return commands formatted for GUI
Returns
-------
list[str]
A list of faceswap and tools commands that can be displayed in Faceswap's GUI
"""
command_path = os.path.join(PROJECT_ROOT, "scripts")
tools_path = os.path.join(PROJECT_ROOT, "tools")
commands = [os.path.splitext(item)[0] for item in os.listdir(command_path)
if os.path.splitext(item)[1] == ".py"
and os.path.splitext(item)[0] not in ("gui", "fsmedia")
and not os.path.splitext(item)[0].startswith("_")]
tools = [os.path.splitext(item)[0] for item in os.listdir(tools_path)
if os.path.splitext(item)[1] == ".py"
and os.path.splitext(item)[0] not in ("gui", "cli")
and not os.path.splitext(item)[0].startswith("_")]
return commands + tools
def get_clean_fonts() -> list[str]:
""" Return a sane list of fonts for the system that has both regular and bold variants.
Pre-pend "default" to the beginning of the list.
Returns
-------
list[str]:
A list of valid fonts for the system
"""
fmanager = font_manager.FontManager()
fonts: dict[str, dict[str, bool]] = {}
for fnt in fmanager.ttflist:
if str(fnt.weight) in ("400", "normal", "regular"):
fonts.setdefault(fnt.name, {})["regular"] = True
if str(fnt.weight) in ("700", "bold"):
fonts.setdefault(fnt.name, {})["bold"] = True
valid_fonts = {key for key, val in fonts.items() if len(val) == 2}
retval = sorted(list(valid_fonts.intersection(tk_font.families())))
if not retval:
# Return the font list with any @prefixed or non-Unicode characters stripped and default
# prefixed
logger.debug("No bold/regular fonts found. Running simple filter")
retval = sorted([fnt for fnt in tk_font.families()
if not fnt.startswith("@") and not any(ord(c) > 127 for c in fnt)])
return ["default"] + retval
fullscreen = ConfigItem(
datatype=bool,
default=False,
group="startup",
info="Start Faceswap maximized.")
tab = ConfigItem(
datatype=str,
default="extract",
group="startup",
info="Start Faceswap in this tab.",
choices=get_commands())
options_panel_width = ConfigItem(
datatype=int,
default=30,
group="layout",
info="How wide the lefthand option panel is as a percentage of GUI width at "
"startup.",
min_max=(10, 90),
rounding=1)
console_panel_height = ConfigItem(
datatype=int,
default=20,
group="layout",
info="How tall the bottom console panel is as a percentage of GUI height at "
"startup.",
min_max=(10, 90),
rounding=1)
icon_size = ConfigItem(
datatype=int,
default=14,
group="layout",
info="Pixel size for icons. NB: Size is scaled by DPI.",
min_max=(10, 20),
rounding=1)
font = ConfigItem(
datatype=str,
default="default",
group="font",
info="Global font",
choices=["default"]) # Cannot get tk fonts until tk is loaded, so real value populated later
font_size = ConfigItem(
datatype=int,
default=9,
group="font",
info="Global font size.",
min_max=(6, 12),
rounding=1)
autosave_last_session = ConfigItem(
datatype=str,
default="prompt",
group="startup",
info="Automatically save the current settings on close and reload on startup"
"\n\tnever - Don't autosave session"
"\n\tprompt - Prompt to reload last session on launch"
"\n\talways - Always load last session on launch",
choices=["never", "prompt", "always"],
gui_radio=True)
timeout = ConfigItem(
datatype=int,
default=120,
group="behaviour",
info="Training can take some time to save and shutdown. Set the timeout "
"in seconds before giving up and force quitting.",
min_max=(10, 600),
rounding=10)
auto_load_model_stats = ConfigItem(
datatype=bool,
default=True,
group="behaviour",
info="Auto load model statistics into the Analysis tab when selecting a model "
"in Train or Convert tabs.")
def load_config(config_file: str | None = None) -> None:
""" Load the GUI configuration .ini file
Parameters
----------
config_file : str | None, optional
Path to a custom .ini configuration file to load. Default: ``None`` (use default
configuration file)
"""
_Config(configfile=config_file)
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/gui/gui_config.py",
"license": "GNU General Public License v3.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:lib/system/ml_libs.py | #! /usr/env/bin/python
"""
Queries information about system installed Machine Learning Libraries.
NOTE: Only packages from Python's Standard Library should be imported in this module
"""
from __future__ import annotations
import json
import logging
import os
import platform
import re
import typing as T
from abc import ABC, abstractmethod
from shutil import which
from lib.utils import get_module_objects
from .system import _lines_from_command
if platform.system() == "Windows":
import winreg # pylint:disable=import-error
else:
winreg = None # type:ignore[assignment] # pylint:disable=invalid-name
if T.TYPE_CHECKING:
from winreg import HKEYType # type:ignore[attr-defined]
logger = logging.getLogger(__name__)
_TORCH_ROCM_REQUIREMENTS = {">=2.2.1,<2.4.0": ((6, 0), (6, 0))}
"""dict[str, tuple[tuple[int, int], tuple[int, int]]]: Minumum and maximum ROCm versions """
def _check_dynamic_linker(lib: str) -> list[str]:
""" Locate the folders that contain a given library in ldconfig and $LD_LIBRARY_PATH
Parameters
----------
lib: str The library to locate
Returns
-------
list[str]
All real existing folders from ldconfig or $LD_LIBRARY_PATH that contain the given lib
"""
paths: set[str] = set()
ldconfig = which("ldconfig")
if ldconfig:
paths.update({os.path.realpath(os.path.dirname(line.split("=>")[-1].strip()))
for line in _lines_from_command([ldconfig, "-p"])
if lib in line and "=>" in line})
if not os.environ.get("LD_LIBRARY_PATH"):
return list(paths)
paths.update({os.path.realpath(path)
for path in os.environ["LD_LIBRARY_PATH"].split(":")
if path and os.path.exists(path)
for fname in os.listdir(path)
if lib in fname})
return list(paths)
def _files_from_folder(folder: str, prefix: str) -> list[str]:
""" Obtain all filenames from the given folder that start with the given prefix
Parameters
----------
folder : str
The folder to search for files in
prefix : str
The filename prefix to search for
Returns
-------
list[str]
All filenames that exist in the given folder with the given prefic
"""
if not os.path.exists(folder):
return []
return [f for f in os.listdir(folder) if f.startswith(prefix)]
class _Alternatives:
""" Holds output from the update-alternatives command for the given package
Parameters
----------
package : str
The package to query update-alternatives for information
"""
def __init__(self, package: str) -> None:
self._package = package
self._bin = which("update-alternatives")
self._default_marker = "link currently points to"
self._alternatives_marker = "priority"
self._output: list[str] | None = None
@property
def alternatives(self) -> list[str]:
""" list[str] : Full path to alternatives listed for the given package """
if self._output is None:
self._query()
if not self._output:
return []
retval = [line.rsplit(" - ", maxsplit=1)[0] for line in self._output
if self._alternatives_marker in line.lower()]
logger.debug("Versions from 'update-alternatives' for '%s': %s", self._package, retval)
return retval
@property
def default(self) -> str:
""" str : Full path to the default package """
if self._output is None:
self._query()
if not self._output:
return ""
retval = next((x for x in self._output
if x.startswith(self._default_marker)), "").replace(self._default_marker,
"").strip()
logger.debug("Default from update-alternatives for '%s': %s", self._package, retval)
return retval
def _query(self) -> None:
""" Query update-alternatives for the given package and place stripped output into
:attr:`_output` """
if not self._bin:
self._output = []
return
cmd = [self._bin, "--display", self._package]
retval = [line.strip() for line in _lines_from_command(cmd)]
logger.debug("update-alternatives output for command %s: %s",
cmd, retval)
self._output = retval
class _Cuda(ABC):
""" Find the location of system installed Cuda and cuDNN on Windows and Linux. """
def __init__(self) -> None:
self.versions: list[tuple[int, int]] = []
""" list[tuple[int, int]] : All detected globally installed Cuda versions """
self.version: tuple[int, int] = (0, 0)
""" tuple[int, int] : Default installed Cuda version. (0, 0) if not detected """
self.cudnn_versions: dict[tuple[int, int], tuple[int, int, int]] = {}
""" dict[tuple[int, int], tuple[int, int, int]] : Detected cuDNN version for each installed
Cuda. key (0, 0) denotes globally installed cudnn """
self._paths: list[str] = []
""" list[str] : list of path to Cuda install folders relating to :attr:`versions` """
self._version_file = "version.json"
self._lib = "libcudart.so"
self._cudnn_header = "cudnn_version.h"
self._alternatives = _Alternatives("cuda")
self._re_cudnn = re.compile(r"#define CUDNN_(MAJOR|MINOR|PATCHLEVEL)\s+(\d+)")
if platform.system() in ("Windows", "Linux"):
self._get_versions()
self._get_version()
self._get_cudnn_versions()
def __repr__(self) -> str:
""" Pretty representation of this class """
attrs = ", ".join(f"{k}={repr(v)}" for k, v in self.__dict__.items()
if not k.startswith("_"))
return f"{self.__class__.__name__}({attrs})"
@classmethod
def _tuple_from_string(cls, version: str) -> tuple[int, int] | None:
""" Convert a Cuda version string to a version tuple
Parameters
----------
version : str
The Cuda version string to convert
Returns
-------
tuple[int, int] | None
The converted Cuda version string. ``None`` if not a valid version string
"""
if version.startswith("."):
version = version[1:]
split = version.split(".")
if len(split) not in (2, 3):
return None
split = split[:2]
if not all(x.isdigit() for x in split):
return None
return (int(split[0]), int(split[1]))
@abstractmethod
def get_versions(self) -> dict[tuple[int, int], str]:
""" Overide to Attempt to detect all installed Cuda versions on Linux or Windows systems
Returns
-------
dict[tuple[int, int], str]
The Cuda versions to the folder path on the system
"""
@abstractmethod
def get_version(self) -> tuple[int, int] | None:
""" Override to attempt to locate the default Cuda version on Linux or Windows
Returns
-------
tuple[int, int] | None
The Default global Cuda version or ``None`` if not found
"""
@abstractmethod
def get_cudnn_versions(self) -> dict[tuple[int, int], tuple[int, int, int]]:
""" Override to attempt to locate any installed cuDNN versions
Returns
-------
dict[tuple[int, int], tuple[int, int, int]]
Detected cuDNN version for each installed Cuda. key (0, 0) denotes globally installed
cudnn
"""
def version_from_version_file(self, folder: str) -> tuple[int, int] | None:
""" Attempt to get an installed Cuda version from its version.json file
Parameters
----------
folder : str
Full path to the folder to check for a version file
Returns
-------
tuple[int, int] | None
The detected Cuda version or ``None`` if not detected
"""
vers_file = os.path.join(folder, self._version_file)
if not os.path.exists(vers_file):
return None
with open(vers_file, "r", encoding="utf-8", errors="replace") as f:
vers = json.load(f)
retval = self._tuple_from_string(vers.get("cuda_cudart", {}).get("version"))
logger.debug("Version from '%s': %s", vers_file, retval)
return retval
def _version_from_nvcc(self) -> tuple[int, int] | None:
""" Obtain the version from NVCC output if it is on PATH
Returns
-------
tuple[int, int] | None
The detected default Cuda version. ``None`` if not version detected
"""
retval = None
nvcc = which("nvcc")
if not nvcc:
return retval
for line in _lines_from_command([nvcc, "-V"]):
vers = re.match(r".*release (\d+\.\d+)", line)
if vers is not None:
retval = self._tuple_from_string(vers.group(1))
break
logger.debug("Version from NVCC '%s': %s", nvcc, retval)
return retval
def _get_versions(self) -> None:
""" Attempt to detect all installed Cuda versions and populate to :attr:`versions` """
versions = self.get_versions()
if versions:
logger.debug("Cuda Versions: %s", versions)
self.versions = list(versions)
self._paths = list(versions.values())
return
logger.debug("Could not locate any Cuda versions")
def _get_version(self) -> None:
""" Attempt to detect the default Cuda version and populate to :attr:`version` """
version: tuple[int, int] | None = None
if len(self.versions) == 1:
version = self.versions[0]
logger.debug("Only 1 installed Cuda version: %s", version)
if not version:
version = self._version_from_nvcc()
if not version:
version = self.get_version()
if version:
self.version = version
logger.debug("Cuda version: %s", self.version if version else "not detected")
def _get_cudnn_versions(self) -> None:
""" Attempt to locate any installed cuDNN versions and add to :attr`cudnn_versions` """
versions = self.get_cudnn_versions()
if versions:
logger.debug("cudnn versions: %s", versions)
self.cudnn_versions = versions
return
logger.debug("No cudnn versions found")
def cudnn_version_from_header(self, folder: str) -> tuple[int, int, int] | None:
""" Attempt to detect the cuDNN version from the version header file
Parameters
----------
folder : str
The folder to check for the cuDNN header file
Returns
-------
tuple[int, int, int] | None
The cuDNN version found from the given folder or ``None`` if not detected
"""
path = os.path.join(folder, self._cudnn_header)
if not os.path.exists(path):
logger.debug("cudnn file '%s' does not exist", path)
return None
with open(path, "r", encoding="utf-8", errors="ignore") as f:
file = f.read()
version = {v[0]: int(v[1]) if v[1].isdigit() else 0
for v in self._re_cudnn.findall(file)}
if not version:
logger.debug("cudnn version could not be found in '%s'", path)
return None
logger.debug("cudnn version from '%s': %s", path, version)
retval = (version.get("MAJOR", 0), version.get("MINOR", 0), version.get("PATCHLEVEL", 0))
logger.debug("cudnn versions: %s", retval)
return retval
class CudaLinux(_Cuda):
""" Find the location of system installed Cuda and cuDNN on Linux. """
def __init__(self) -> None:
self._folder_prefix = "cuda-"
super().__init__()
def _version_from_lib(self, folder: str) -> tuple[int, int] | None:
""" Attempt to locate the version from the existence of libcudart.so within a Cuda
targets/x86_64-linux/lib folder
Parameters
----------
folder : str
Full file path to the Cuda folder
Returns
-------
tuple[int, int] | None
The Cuda version identified by the existence of the libcudart.so file. ``None`` if
not detected
"""
lib_folder = os.path.join(folder, "targets", "x86_64-linux", "lib")
lib_versions = [f.replace(self._lib, "")
for f in _files_from_folder(lib_folder, self._lib)]
if not lib_versions:
return None
versions = [self._tuple_from_string(f[1:])
for f in lib_versions if f and f.startswith(".")]
valid = [v for v in versions if v is not None]
if not valid or not len(set(valid)) == 1:
return None
retval = valid[0]
logger.debug("Version from '%s': %s", os.path.join(lib_folder, self._lib), retval)
return retval
def _versions_from_usr(self) -> dict[tuple[int, int], str]:
""" Attempt to detect all installed Cuda versions from the /usr/local folder
Scan /usr/local for cuda-x.x folders containing either a version.json file or
include/lib/libcudart.so.x.
Returns
-------
dict[tuple[int, int], str]
A dictionary of detected Cuda versions to their install paths
"""
retval: dict[tuple[int, int], str] = {}
usr = os.path.join(os.sep, "usr", "local")
for folder in _files_from_folder(usr, self._folder_prefix):
path = os.path.join(usr, folder)
if os.path.islink(path):
continue
version = self.version_from_version_file(path) or self._version_from_lib(path)
if version is not None:
retval[version] = path
return retval
def _versions_from_alternatives(self) -> dict[tuple[int, int], str]:
""" Attempt to detect all installed Cuda versions from update-alternatives
Returns
-------
list[tuple[int, int, int]]
A dictionary of detected Cuda versions to their install paths found in
update-alternatives
"""
retval: dict[tuple[int, int], str] = {}
alts = self._alternatives.alternatives
for path in alts:
vers = self.version_from_version_file(path) or self._version_from_lib(path)
if vers is not None:
retval[vers] = path
logger.debug("Versions from 'update-alternatives': %s", retval)
return retval
def _parent_from_targets(self, folder: str) -> str:
""" Obtain the Cuda parent folder from a path obtained from child targets folder
Parameters
----------
folder : str
Full path to a folder that has a 'targets' folder in its path
Returns
-------
str
The potential parent Cuda folder, or an empty string if not detected
"""
split = folder.split(os.sep)
return os.sep.join(split[:split.index("targets")]) if "targets" in split else ""
def _versions_from_dynamic_linker(self) -> dict[tuple[int, int], str]:
""" Attempt to detect all installed Cuda versions from ldconfig
Returns
-------
dict[tuple[int, int], str]
The Cuda version to the folder path found from ldconfig
"""
retval: dict[tuple[int, int], str] = {}
folders = _check_dynamic_linker(self._lib)
cuda_roots = [self._parent_from_targets(f) for f in folders]
for path in cuda_roots:
if not path:
continue
version = self.version_from_version_file(path) or self._version_from_lib(path)
if version is not None:
retval[version] = path
logger.debug("Versions from 'ld_config': %s", retval)
return retval
def get_versions(self) -> dict[tuple[int, int], str]:
""" Attempt to detect all installed Cuda versions on Linux systems
Returns
-------
dict[tuple[int, int], str]
The Cuda version to the folder path on Linux
"""
versions = (self._versions_from_usr() |
self._versions_from_alternatives() |
self._versions_from_dynamic_linker())
return {k: versions[k] for k in sorted(versions)}
def _version_from_alternatives(self) -> tuple[int, int] | None:
""" Attempt to get the default Cuda version from update-alternatives
Returns
-------
tuple[int, int] | None
The detected default Cuda version. ``None`` if not version detected
"""
default = self._alternatives.default
if not default:
return None
retval = self.version_from_version_file(default) or self._version_from_lib(default)
logger.debug("Version from update-alternatives: %s", retval)
return retval
def _version_from_link(self) -> tuple[int, int] | None:
""" Attempt to get the default Cuda version from the /usr/local/cuda file
Returns
-------
tuple[int, int] | None
The detected default Cuda version. ``None`` if not version detected
"""
path = os.path.join(os.sep, "usr", "local", "cuda")
if not os.path.exists(path):
return None
real_path = os.path.abspath(os.path.realpath(path)) if os.path.islink(path) else path
retval = self.version_from_version_file(real_path) or self._version_from_lib(real_path)
logger.debug("Version from symlink: %s", retval)
return retval
def _version_from_dynamic_linker(self) -> tuple[int, int] | None:
""" Attempt to get the default version from ldconfig or $LD_LIBRARY_PATH
Returns
-------
tuple[int, int, int] | None
The detected default ROCm version. ``None`` if not version detected
"""
paths = _check_dynamic_linker(self._lib)
if len(paths) != 1: # Multiple or None
return None
root = self._parent_from_targets(paths[0])
retval = self.version_from_version_file(root) or self._version_from_lib(root)
logger.debug("Version from ld_config: %s", retval)
return retval
def get_version(self) -> tuple[int, int] | None:
""" Attempt to locate the default Cuda version on Linux
Checks, in order: update-alternatives, /usr/local/cuda, ldconfig, nvcc
Returns
-------
tuple[int, int] | None
The Default global Cuda version or ``None`` if not found
"""
return (self._version_from_alternatives() or
self._version_from_link() or
self._version_from_dynamic_linker())
def get_cudnn_versions(self) -> dict[tuple[int, int], tuple[int, int, int]]:
""" Attempt to locate any installed cuDNN versions on Linux
Returns
-------
dict[tuple[int, int], tuple[int, int, int]]
Detected cuDNN version for each installed Cuda. key (0, 0) denotes globally installed
cudnn
"""
retval: dict[tuple[int, int], tuple[int, int, int]] = {}
gbl = ["/usr/include", "/usr/local/include"]
lcl = [os.path.join(f, "include") for f in self._paths]
for root in gbl + lcl:
for folder, _, filenames in os.walk(root):
if self._cudnn_header not in filenames:
continue
version = self.cudnn_version_from_header(folder)
if not version:
continue
cuda_vers = ((0, 0) if root in gbl
else self.versions[self._paths.index(os.path.dirname(root))])
retval[cuda_vers] = version
return retval
class CudaWindows(_Cuda):
""" Find the location of system installed Cuda and cuDNN on Windows. """
@classmethod
def _enum_subkeys(cls, key: HKEYType) -> T.Generator[str, None, None]:
""" Iterate through a Registry key's sub-keys
Parameters
----------
key : :class:`winreg.HKEYType`
The Registry key to iterate
Yields
------
str
A sub-key name from the given registry key
"""
assert winreg is not None
i = 0
while True:
try:
yield winreg.EnumKey(key, i) # type:ignore[attr-defined]
except OSError:
break
i += 1
def get_versions(self) -> dict[tuple[int, int], str]:
""" Attempt to detect all installed Cuda versions on Windows systems from the registry
Returns
-------
dict[tuple[int, int], str]
The Cuda version to the folder path on Windows
"""
retval: dict[tuple[int, int], str] = {}
assert winreg is not None
reg_key = r"SOFTWARE\NVIDIA Corporation\GPU Computing Toolkit\CUDA"
paths = {k.lower().replace("cuda_path_", "").replace("_", "."): v
for k, v in os.environ.items()
if "cuda_path_v" in k.lower()}
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, # type:ignore[attr-defined]
reg_key) as key:
for version in self._enum_subkeys(key):
vers_tuple = self._tuple_from_string(version[1:])
if vers_tuple is not None:
retval[vers_tuple] = paths.get(version, "")
except FileNotFoundError:
logger.debug("Could not find Windows Registry key '%s'", reg_key)
return {k: retval[k] for k in sorted(retval)}
def get_version(self) -> tuple[int, int] | None:
""" Attempt to get the default Cuda version from the Environment Variable
Returns
-------
tuple[int, int] | None
The Default global Cuda version or ``None`` if not found
"""
path = os.environ.get("CUDA_PATH")
if not path or path not in self._paths:
return None
retval = self.versions[self._paths.index(path)]
logger.debug("Version from CUDA_PATH Environment Variable: %s", path)
return retval
def _get_cudnn_paths(self) -> list[str]: # noqa[C901]
""" Attempt to locate the locations of cuDNN installs for Windows
Returns
-------
list[str]
Full path to existing cuDNN installs under Windows
"""
assert winreg is not None
paths: set[str] = set()
cudnn_key = "cudnn_cuda"
reg_key = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall"
lookups = (winreg.HKEY_LOCAL_MACHINE, # type:ignore[attr-defined]
winreg.HKEY_CURRENT_USER) # type:ignore[attr-defined]
for lookup in lookups:
try:
key = winreg.OpenKey(lookup, reg_key) # type:ignore[attr-defined]
except FileNotFoundError:
continue
for name in self._enum_subkeys(key):
if cudnn_key not in name.lower():
logger.debug("Skipping subkey '%s'", name)
continue
try:
subkey = winreg.OpenKey(key, name) # type:ignore[attr-defined]
logger.debug("Skipping subkey not found '%s'", name)
except FileNotFoundError:
continue
logger.debug("Parsing cudnn key '%s'", cudnn_key)
try:
path, _ = winreg.QueryValueEx(subkey, # type:ignore[attr-defined]
"InstallLocation")
except (FileNotFoundError, OSError):
logger.debug("Skipping missing InstallLocation for sub-key '%s'", subkey)
continue
if not os.path.isdir(path):
logger.debug("Skipping non-existant path '%s'", path)
continue
paths.add(path)
retval = list(paths)
logger.debug("cudnn install paths: %s", retval)
return retval
def get_cudnn_versions(self) -> dict[tuple[int, int], tuple[int, int, int]]:
""" Attempt to locate any installed cuDNN versions on Windows
Returns
-------
dict[tuple[int, int], tuple[int, int, int]]
Detected cuDNN version for each installed Cuda. key (0, 0) denotes globally installed
cudnn
"""
retval: dict[tuple[int, int], tuple[int, int, int]] = {}
gbl = self._get_cudnn_paths()
lcl = [os.path.join(f, "include") for f in self._paths]
for root in gbl + lcl:
for folder, _, filenames in os.walk(root):
if self._cudnn_header not in filenames:
continue
version = self.cudnn_version_from_header(folder)
if not version:
continue
cuda_vers = ((0, 0) if root in gbl
else self.versions[self._paths.index(os.path.dirname(root))])
retval[cuda_vers] = version
return retval
def get_cuda_finder() -> type[_Cuda]:
"""Create a platform-specific CUDA object.
Returns
-------
type[_Cuda]
The OS specific finder for system-wide Cuda
"""
if platform.system().lower() == "windows":
return CudaWindows
return CudaLinux
Cuda = get_cuda_finder()
class ROCm():
""" Find the location of system installed ROCm on Linux """
def __init__(self) -> None:
self.version_min = min(v[0] for v in _TORCH_ROCM_REQUIREMENTS.values())
self.version_max = max(v[1] for v in _TORCH_ROCM_REQUIREMENTS.values())
self.versions: list[tuple[int, int, int]] = []
""" list[tuple[int, int, int]] : All detected ROCm installed versions """
self.version: tuple[int, int, int] = (0, 0, 0)
""" tuple[int, int, int] : Default ROCm installed version. (0, 0, 0) if not detected """
self._folder_prefix = "rocm-"
self._version_files = ["version-rocm", "version"]
self._lib = "librocm-core.so"
self._alternatives = _Alternatives("rocm")
self._re_version = re.compile(r"(\d+\.\d+\.\d+)(?=$|[-.])")
self._re_config = re.compile(r"\sroc-(\d+\.\d+\.\d+)(?=\s|[-.])")
if platform.system() == "Linux":
self._rocm_check()
def __repr__(self) -> str:
""" Pretty representation of this class """
attrs = ", ".join(f"{k}={repr(v)}" for k, v in self.__dict__.items()
if not k.startswith("_"))
return f"{self.__class__.__name__}({attrs})"
@property
def valid_versions(self) -> list[tuple[int, int, int]]:
""" list[tuple[int, int, int]] """
return [v for v in self.versions if self.version_min <= v[:2] <= self.version_max]
@property
def valid_installed(self) -> bool:
""" bool : ``True`` if a valid version of ROCm is installed """
return any(self.valid_versions)
@property
def is_valid(self):
""" bool : ``True`` if the default ROCm version is valid """
return self.version_min <= self.version[:2] <= self.version_max
@classmethod
def _tuple_from_string(cls, version: str) -> tuple[int, int, int] | None:
""" Convert a ROCm version string to a version tuple
Parameters
----------
version : str
The ROCm version string to convert
Returns
-------
tuple[int, int, int] | None
The converted ROCm version string. ``None`` if not a valid version string
"""
split = version.split(".")
if len(split) != 3:
return None
if not all(x.isdigit() for x in split):
return None
return (int(split[0]), int(split[1]), int(split[2]))
def _version_from_string(self, string: str) -> tuple[int, int, int] | None:
""" Obtain the ROCm version from the end of a string
Parameters
----------
string : str
The string to test for a valid ROCm version
Returns
-------
tuple[int, int, int] | None
The ROCm version from the end of the string or ``None`` if not detected
"""
re_vers = self._re_version.search(string)
if re_vers is None:
return None
return self._tuple_from_string(re_vers.group(1))
def _version_from_info(self, folder: str) -> tuple[int, int, int] | None:
""" Attempt to locate the version from a version file within a ROCm .info folder
Parameters
----------
file_path : str
Full path to the ROCm .info folder
Returns
-------
tuple[int, int, int] | None
The ROCm version extracted from a version file within the .info folder. ``None`` if
not detected
"""
info_loc = [os.path.join(folder, ".info", v) for v in self._version_files]
for info_file in info_loc:
if not os.path.exists(info_file):
continue
with open(info_file, "r", encoding="utf-8") as f:
vers_string = f.read().strip()
if not vers_string:
continue
retval = self._tuple_from_string(vers_string.split("-", maxsplit=1)[0])
if retval is None:
continue
logger.debug("Version from '%s': %s", info_file, retval)
return retval
return None
def _version_from_lib(self, folder: str) -> tuple[int, int, int] | None:
""" Attempt to locate the version from the existence of librocm-core.so within a ROCm
lib folder
Parameters
----------
folder : str
Full file path to the ROCm folder
Returns
-------
tuple[int, int, int] | None
The ROCm version identified by the existence of the librocm-core.so file. ``None`` if
not detected
"""
lib_folder = os.path.join(folder, "lib")
lib_files = _files_from_folder(lib_folder, self._lib)
if not lib_files:
return None
# librocm-core naming is librocm-core.so.1.0.##### which is ambiguous. Get from folder
rocm_folder = os.path.basename(folder)
if not rocm_folder.startswith(self._folder_prefix):
return None
retval = self._version_from_string(rocm_folder)
logger.debug("Version from '%s': %s", os.path.join(lib_folder, self._lib), retval)
return retval
def _versions_from_opt(self) -> list[tuple[int, int, int]]:
""" Attempt to detect all installed ROCm versions from the /opt folder
Scan /opt for rocm.x.x.x folders containing either .info or lib/librocm-core.so.x
Returns
-------
list[tuple[int, int, int]]
Any ROCm versions found in the /opt folder
"""
retval: list[tuple[int, int, int]] = []
opt = os.path.join(os.sep, "opt")
for folder in _files_from_folder(opt, self._folder_prefix):
path = os.path.join(opt, folder)
version = self._version_from_info(path) or self._version_from_lib(path)
if version is not None:
retval.append(version)
return retval
def _versions_from_alternatives(self) -> list[tuple[int, int, int]]:
""" Attempt to detect all installed ROCm versions from update-alternatives
Returns
-------
list[tuple[int, int, int]]
Any ROCm versions found in update-alternatives
"""
alts = self._alternatives.alternatives
if not alts:
return []
versions = [self._version_from_string(c) for c in alts]
retval = list(set(v for v in versions if v is not None))
logger.debug("Versions from 'update-alternatives': %s", retval)
return retval
def _versions_from_dynamic_linker(self) -> list[tuple[int, int, int]]:
""" Attempt to detect all installed ROCm versions from ldconfig
Returns
-------
dict[tuple[int, int], str]
The ROCm versions found from ldconfig
"""
retval: list[tuple[int, int, int]] = []
folders = _check_dynamic_linker(self._lib)
for folder in folders:
path = os.path.dirname(folder)
version = self._version_from_info(path) or self._version_from_lib(path)
if version is not None:
retval.append(version)
logger.debug("Versions from 'ld_config': %s", retval)
return retval
def _get_versions(self) -> None:
""" Attempt to detect all installed ROCm versions and populate to :attr:`rocm_versions` """
versions = list(sorted(set(self._versions_from_opt()) |
set(self._versions_from_alternatives()) |
set(self._versions_from_dynamic_linker())))
if versions:
logger.debug("ROCm Versions: %s", versions)
self.versions = versions
return
logger.debug("Could not locate any ROCm versions")
def _version_from_hipconfig(self) -> tuple[int, int, int] | None:
""" Attempt to get the default version from hipconfig
Returns
-------
tuple[int, int, int] | None
The detected default ROCm version. ``None`` if not version detected
"""
retval: tuple[int, int, int] | None = None
exe = which("hipconfig")
if not exe:
return retval
lines = _lines_from_command([exe, "--full"])
if not lines:
return retval
for line in lines:
line = line.strip()
if line.startswith("ROCM_PATH"):
path = line.split(":", maxsplit=1)[-1]
retval = self._version_from_info(path) or self._version_from_lib(path)
match = self._re_config.search(line)
if match is not None:
retval = self._tuple_from_string(match.group(1))
logger.debug("Version from hipconfig: %s", retval)
return retval
def _version_from_alternatives(self) -> tuple[int, int, int] | None:
""" Attempt to get the default version from update-alternatives
Returns
-------
tuple[int, int, int] | None
The detected default ROCm version. ``None`` if not version detected
"""
default = self._alternatives.default
if not default:
return None
retval = self._version_from_string(default.rsplit(os.sep, maxsplit=1)[-1])
logger.debug("Version from update-alternatives: %s", retval)
return retval
def _version_from_link(self) -> tuple[int, int, int] | None:
""" Attempt to get the default version from the /opt/rocm file
Returns
-------
tuple[int, int, int] | None
The detected default ROCm version. ``None`` if not version detected
"""
path = os.path.join(os.sep, "opt", "rocm")
if not os.path.exists(path):
return None
real_path = os.path.abspath(os.path.realpath(path)) if os.path.islink(path) else path
retval = self._version_from_info(real_path) or self._version_from_lib(real_path)
logger.debug("Version from symlink: %s", retval)
return retval
def _version_from_dynamic_linker(self) -> tuple[int, int, int] | None:
""" Attempt to get the default version from ldconfig or $LD_LIBRARY_PATH
Returns
-------
tuple[int, int, int] | None
The detected default ROCm version. ``None`` if not version detected
"""
paths = _check_dynamic_linker("librocm-core.so.")
if len(paths) != 1: # Multiple or None
return None
path = os.path.dirname(paths[0])
retval = self._version_from_info(path) or self._version_from_lib(path)
logger.debug("Version from ld_config: %s", retval)
return retval
def _get_version(self) -> None:
""" Attempt to detect the default ROCm version """
version = (self._version_from_hipconfig() or
self._version_from_alternatives() or
self._version_from_link() or
self._version_from_dynamic_linker())
if version is not None:
logger.debug("ROCm default version: %s", version)
self.version = version
return
logger.debug("Could not locate default ROCm version")
def _rocm_check(self) -> None:
""" Attempt to locate the installed ROCm versions and the default ROCm version """
self._get_versions()
self._get_version()
logger.debug("ROCm Versions: %s, Version: %s", self.versions, self.version)
__all__ = get_module_objects(__name__)
if __name__ == "__main__":
print(Cuda())
print(ROCm())
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/system/ml_libs.py",
"license": "GNU General Public License v3.0",
"lines": 849,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:lib/system/system.py | #! /usr/env/bin/python3
"""
Holds information about the running system. Used in setup.py and lib.sysinfo
NOTE: Only packages from Python's Standard Library should be imported in this module
"""
from __future__ import annotations
import ctypes
import locale
import logging
import os
import platform
import re
import sys
import typing as T
from shutil import which
from subprocess import CalledProcessError, run
from lib.utils import get_module_objects
logger = logging.getLogger(__name__)
VALID_PYTHON = ((3, 11), (3, 13))
""" tuple[tuple[int, int], tuple[int, int]] : The minimum and maximum versions of Python that can
run Faceswap """
VALID_TORCH = ((2, 3), (2, 9))
""" tuple[tuple[int, int], tuple[int, int]] : The minimum and maximum versions of Torch that can
run Faceswap """
VALID_KERAS = ((3, 12), (3, 12))
""" tuple[tuple[int, int], tuple[int, int]] : The minimum and maximum versions of Keras that can
run Faceswap """
def _lines_from_command(command: list[str]) -> list[str]:
""" Output stdout lines from an executed command.
Parameters
----------
command : list[str]
The command to run
Returns
-------
list[str]
The output lines from the given command
"""
logger.debug("Running command %s", command)
try:
proc = run(command,
capture_output=True,
check=True,
encoding=locale.getpreferredencoding(),
errors="replace")
except (FileNotFoundError, CalledProcessError) as err:
logger.debug("Error from command: %s", str(err))
return []
return proc.stdout.splitlines()
class System: # pylint:disable=too-many-instance-attributes
""" Holds information about the currently running system and environment """
def __init__(self) -> None:
self.platform = platform.platform()
""" str : Human readable platform identifier """
self.system: T.Literal["darwin", "linux", "windows"] = T.cast(
T.Literal["darwin", "linux", "windows"], platform.system().lower())
""" str : The system (OS type) that this code is running on. Always lowercase """
self.machine = platform.machine()
""" str : The machine type (eg: "x86_64") """
self.release = platform.release()
""" str : The OS Release that this code is running on """
self.processor = platform.processor()
""" str : The processor in use, if detected """
self.cpu_count = os.cpu_count()
""" int : The number of CPU cores on the system """
self.python_implementation = platform.python_implementation()
""" str : The python implementation in use"""
self.python_version = platform.python_version()
""" str : The <major>.<minor>.<release> version of Python that is running """
self.python_architecture = platform.architecture()[0]
""" str : The Python architecture that is running (eg: 64bit/32bit)"""
self.encoding = locale.getpreferredencoding()
""" str : The system encoding """
self.is_conda = ("conda" in sys.version.lower() or
os.path.exists(os.path.join(sys.prefix, 'conda-meta')))
""" bool : ``True`` if running under Conda otherwise ``False`` """
self.is_admin = self._get_permissions()
""" bool : ``True`` if we are running with Admin privileges """
self.is_virtual_env = self._check_virtual_env()
""" bool : ``True`` if Python is being run inside a virtual environment """
@property
def is_linux(self) -> bool:
""" bool : `True` if running on a Linux system otherwise ``False``. """
return self.system == "linux"
@property
def is_macos(self) -> bool:
""" bool : `True` if running on a macOS system otherwise ``False``. """
return self.system == "darwin"
@property
def is_windows(self) -> bool:
""" bool : `True` if running on a Windows system otherwise ``False``. """
return self.system == "windows"
def __repr__(self) -> str:
""" Pretty print the system information for logging """
attrs = ", ".join(f"{k}={repr(v)}" for k, v in self.__dict__.items()
if not k.startswith("_"))
return f"{self.__class__.__name__}({attrs})"
def _get_permissions(self) -> bool:
""" Check whether user is admin
Returns
-------
bool
``True`` if we are running with Admin privileges
"""
if self.is_windows:
retval = ctypes.windll.shell32.IsUserAnAdmin() != 0 # type:ignore[attr-defined]
else:
retval = os.getuid() == 0 # type:ignore[attr-defined] # pylint:disable=no-member
return retval
def _check_virtual_env(self) -> bool:
""" Check whether we are in a virtual environment
Returns
-------
bool
``True`` if Python is being run inside a virtual environment
"""
if not self.is_conda:
retval = (hasattr(sys, "real_prefix") or
(hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix))
else:
prefix = os.path.dirname(sys.prefix)
retval = os.path.basename(prefix) == "envs"
return retval
def validate_python(self, max_version: tuple[int, int] | None = None) -> bool:
""" Check that the running Python version is valid
Parameters
----------
max_version: tuple[int, int] | None, Optional
The max version to validate Python against. ``None`` for the project Maximum.
Default: ``None`` (project maximum)
Returns
-------
bool
``True`` if the running Python version is valid, otherwise logs an error and exits
"""
max_python = VALID_PYTHON[1] if max_version is None else max_version
retval = (VALID_PYTHON[0] <= sys.version_info[:2] <= max_python
and self.python_architecture == "64bit")
logger.debug("Python version %s(%s) within %s - %s(64bit): %s",
self.python_version,
self.python_architecture,
VALID_PYTHON[0],
max_python,
retval)
if not retval:
print()
logger.error("Your Python version %s(%s) is unsupported. Please run with Python "
"version %s to %s 64bit.",
self.python_version,
self.python_architecture,
".".join(str(x) for x in VALID_PYTHON[0]),
".".join(str(x) for x in max_python))
print()
logger.error("If you have recently upgraded faceswap, then you will need to create a "
"new virtual environment.")
logger.error("The easiest way to do this is to run the latest version of the Faceswap "
"installer from:")
logger.error("https://github.com/deepfakes/faceswap/releases")
print()
input("Press <Enter> to close")
sys.exit(1)
return retval
def validate(self) -> None:
""" Perform validation that the running system can be used for faceswap. Log an error and
exit if it cannot """
if not any((self.is_linux, self.is_macos, self.is_windows)):
logger.error("Your system %s is not supported!", self.system.title())
sys.exit(1)
if self.is_macos and self.machine == "arm64" and not self.is_conda:
logger.error("Setting up Faceswap for Apple Silicon outside of a Conda "
"environment is unsupported")
sys.exit(1)
self.validate_python()
class Packages():
""" Holds information about installed python and conda packages.
Note: Packaging library is lazy loaded as it may not be available during setup.py
"""
def __init__(self) -> None:
self._conda_exe = which("conda")
self._installed_python = self._get_installed_python()
self._installed_conda: list[str] | None = None
self._get_installed_conda()
@property
def installed_python(self) -> dict[str, str]:
""" dict[str, str] : Installed Python package names to Python package versions """
return self._installed_python
@property
def installed_python_pretty(self) -> str:
""" str: A pretty printed representation of installed Python packages """
pkgs = self._installed_python
align = max(len(x) for x in pkgs) + 1
return "\n".join(f"{k.ljust(align)} {v}" for k, v in pkgs.items())
@property
def installed_conda(self) -> dict[str, tuple[str, str, str]]:
""" dict[str, tuple[str, str]] : Installed Conda package names to the version and
channel """
if not self._installed_conda:
return {}
installed = [re.sub(" +", " ", line.strip())
for line in self._installed_conda if not line.startswith("#")]
retval = {}
for pkg in installed:
item = pkg.split(" ")
assert len(item) == 4
retval[item[0]] = T.cast(tuple[str, str, str], tuple(item[1:]))
return retval
@property
def installed_conda_pretty(self) -> str:
""" str: A pretty printed representation of installed conda packages """
if not self._installed_conda:
return "Could not get Conda package list"
return "\n".join(self._installed_conda)
def __repr__(self) -> str:
""" Pretty print the installed packages for logging """
props = ", ".join(
f"{k}={repr(getattr(self, k))}"
for k, v in self.__class__.__dict__.items()
if isinstance(v, property) and not k.startswith("_") and "pretty" not in k)
return f"{self.__class__.__name__}({props})"
def _get_installed_python(self) -> dict[str, str]:
""" Parse the installed python modules
Returns
-------
dict[str, str]
Installed Python package names to Python package versions
"""
installed = _lines_from_command([sys.executable, "-m", "pip", "freeze", "--local"])
retval = {}
for pkg in installed:
if "==" not in pkg:
continue
item = pkg.split("==")
retval[item[0].lower()] = item[1]
logger.debug("Installed Python packages: %s", retval)
return retval
def _get_installed_conda(self) -> None:
""" Collect the output from 'conda list' for the installed Conda packages and
populate :attr:`_installed_conda`
Returns
-------
list[str]
Each line of output from the 'conda list' command
"""
if not self._conda_exe:
logger.debug("Conda not found. Not collecting packages")
return
lines = _lines_from_command([self._conda_exe, "list", "--show-channel-urls"])
if not lines:
self._installed_conda = ["Could not get Conda package list"]
return
self._installed_conda = lines
logger.debug("Installed Conda packages: %s", self.installed_conda)
__all__ = get_module_objects(__name__)
if __name__ == "__main__":
print(System())
print(Packages())
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/system/system.py",
"license": "GNU General Public License v3.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:lib/training/tensorboard.py | #!/usr/bin/env python3
""" Tensorboard call back for PyTorch logging. Hopefully temporary until a native Keras version
is implemented """
from __future__ import annotations
import logging
import os
import struct
import typing as T
import keras
from torch.utils.tensorboard import SummaryWriter
from lib.logger import parse_class_init
from lib.utils import get_module_objects
logger = logging.getLogger(__name__)
class RecordIterator:
""" A replacement for tensorflow's :func:`compat.v1.io.tf_record_iterator`
Parameters
----------
log_file : str
The event log file to obtain records from
is_live : bool, optional
``True`` if the log file is for a live training session that will constantly provide data.
Default: ``False``
"""
def __init__(self, log_file, is_live: bool = False) -> None:
logger.debug(parse_class_init(locals()))
self._file_path = log_file
self._log_file = open(self._file_path, "rb") # pylint:disable=consider-using-with
self._is_live = is_live
self._position = 0
logger.debug("Initialized %s", self.__class__.__name__)
def __iter__(self) -> RecordIterator:
""" Iterate over a Tensorboard event file"""
return self
def _on_file_read(self) -> None:
""" If the file is closed and we are reading live data, re-open the file and seek to the
correct position """
if not self._is_live or not self._log_file.closed:
return
logger.trace("Re-opening '%s' and Seeking to %s", # type:ignore[attr-defined]
self._file_path, self._position)
self._log_file = open(self._file_path, "rb") # pylint:disable=consider-using-with
self._log_file.seek(self._position, 0)
def _on_file_end(self) -> None:
""" Close the event file. If live data, record the current position"""
if self._is_live:
self._position = self._log_file.tell()
logger.trace("Setting live position to %s", # type:ignore[attr-defined]
self._position)
logger.trace("EOF. Closing '%s'", self._file_path) # type:ignore[attr-defined]
self._log_file.close()
def __next__(self) -> bytes:
""" Get the next event log from a Tensorboard event file
Returns
-------
bytes
A Tensorboard event log
Raises
------
StopIteration
When the event log is fully consumed
"""
self._on_file_read()
b_header = self._log_file.read(8)
if not b_header:
self._on_file_end()
raise StopIteration
read_len = int(struct.unpack('Q', b_header)[0])
self._log_file.seek(4, 1)
data = self._log_file.read(read_len)
self._log_file.seek(4, 1)
logger.trace("Returning event data of len %s", read_len) # type:ignore[attr-defined]
return data
class TorchTensorBoard(keras.callbacks.Callback):
"""Enable visualizations for TensorBoard. Adapted from Keras' Tensorboard Callback keeping
only the parts we need, and using Torch rather than TensorFlow
Parameters
----------
log_dir str
The path of the directory where to save the log files to be parsed by TensorBoard. e.g.,
`log_dir = os.path.join(working_dir, 'logs')`. This directory should not be reused by any
other callbacks.
write_graph: bool (Not supported at this time)
Whether to visualize the graph in TensorBoard. Note that the log file can become quite
large when `write_graph` is set to `True`.
update_freq: Literal["batch", "epoch"] | int
When using `"epoch"`, writes the losses and metrics to TensorBoard after every epoch.
If using an integer, let's say `1000`, all metrics and losses (including custom ones
added by `Model.compile`) will be logged to TensorBoard every 1000 batches. `"batch"`
is a synonym for 1, meaning that they will be written every batch. Note however that
writing too frequently to TensorBoard can slow down your training, especially when used
with distribution strategies as it will incur additional synchronization overhead. Batch-
level summary writing is also available via `train_step` override. Please see [TensorBoard
Scalars
tutorial](https://www.tensorflow.org/tensorboard/scalars_and_keras#batch-level_logging)
"""
def __init__(self,
log_dir: str = "logs",
write_graph: bool = True,
update_freq: T.Literal["batch", "epoch"] | int = "epoch") -> None:
logger.debug(parse_class_init(locals()))
super().__init__()
self.log_dir = str(log_dir)
self.write_graph = write_graph
self.update_freq = 1 if update_freq == "batch" else update_freq
self._should_write_train_graph = False
self._train_dir = os.path.join(self.log_dir, "train")
self._train_step = 0
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._model: keras.models.Model | None = None
self._writers: dict[str, SummaryWriter] = {}
logger.debug("Initialized %s", self.__class__.__name__)
@property
def _train_writer(self) -> SummaryWriter:
""":class:`torch.utils.tensorboard.SummaryWriter`: The summary writer """
if "train" not in self._writers:
self._writers["train"] = SummaryWriter(self._train_dir)
return self._writers["train"]
def _write_keras_model_summary(self) -> None:
"""Writes Keras graph network summary to TensorBoard."""
assert self._model is not None
summary = self._model.to_json()
self._train_writer.add_text("keras", summary, global_step=0)
def _write_keras_model_train_graph(self) -> None:
"""Writes Keras graph to TensorBoard."""
# TODO implement
logger.debug("Tensorboard graph logging not yet implemented")
def set_model(self, model: keras.models.Model) -> None:
"""Sets Keras model and writes graph if specified.
Parameters
----------
model: :class:`keras.models.Model`
The model that is being trained
"""
self._model = model
if self.write_graph:
self._write_keras_model_summary()
self._should_write_train_graph = True
def on_train_begin(self, logs=None) -> None:
""" Initialize the call back on train start
Parameters
----------
logs: None
Unused
"""
self._global_train_batch = 0
self._previous_epoch_iterations = 0
def on_train_batch_end(self, batch: int, logs: dict[str, float] | None = None) -> None:
""" Update Tensorboard logs on batch end
Parameters
----------
batch: int
The current iteration count
logs: dict[str, float]
The logs to write
"""
assert logs is not None
if self._should_write_train_graph:
self._write_keras_model_train_graph()
self._should_write_train_graph = False
for key, value in logs.items():
self._train_writer.add_scalar(f"batch_{key}",
value,
global_step=batch)
def on_save(self) -> None:
""" Flush data to disk on save """
logger.debug("Flushing Tensorboard writer")
self._train_writer.flush()
def on_train_end(self, logs=None) -> None:
""" Close the writer on train completion
Parameters
----------
logs: None
Unused
"""
for writer in self._writers.values():
writer.flush()
writer.close()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/training/tensorboard.py",
"license": "GNU General Public License v3.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:plugins/convert/convert_config.py | #!/usr/bin/env python3
""" Default configurations for convert """
import logging
import os
from lib.config import FaceswapConfig
logger = logging.getLogger(__name__)
class _Config(FaceswapConfig):
""" Config File for Convert """
def set_defaults(self, helptext=""):
""" Set the default values for config """
super().set_defaults(helptext=helptext)
self._defaults_from_plugin(os.path.dirname(__file__))
_CONFIG: _Config | None = None
def load_config(config_file: str | None = None) -> _Config:
""" Load the Extraction configuration .ini file
Parameters
----------
config_file : str | None, optional
Path to a custom .ini configuration file to load. Default: ``None`` (use default
configuration file)
Returns
-------
:class:`_Config`
The loaded convert config object
"""
global _CONFIG # pylint:disable=global-statement
if _CONFIG is None:
_CONFIG = _Config(configfile=config_file)
return _CONFIG
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/convert/convert_config.py",
"license": "GNU General Public License v3.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:plugins/extract/extract_config.py | #!/usr/bin/env python3
""" Default configurations for extract """
import gettext
import logging
import os
from lib.config import FaceswapConfig
from lib.config import ConfigItem
# LOCALES
_LANG = gettext.translation("plugins.extract.extract_config", localedir="locales", fallback=True)
_ = _LANG.gettext
logger = logging.getLogger(__name__)
class _Config(FaceswapConfig):
""" Config File for Extraction """
def set_defaults(self, helptext="") -> None:
""" Set the default values for config """
super().set_defaults(helptext=_("Options that apply to all extraction plugins"))
self._defaults_from_plugin(os.path.dirname(__file__))
aligner_min_scale = ConfigItem(
datatype=float,
default=0.03,
group=_("filters"),
info=_(
"Filters out faces below this size. This is a multiplier of the minimum dimension of "
"the frame (i.e. 1280x720 = 720). If the original face extract box is smaller than "
"the minimum dimension times this multiplier, it is considered a false positive and "
"discarded. Faces which are found to be unusually smaller than the frame tend to be "
"misaligned images, except in extreme long-shots. These can be usually be safely "
"discarded."),
min_max=(0.0, 1.0),
rounding=2)
aligner_max_scale = ConfigItem(
datatype=float,
default=4.00,
group=_("filters"),
info=_(
"Filters out faces above this size. This is a multiplier of the minimum dimension of "
"the frame (i.e. 1280x720 = 720). If the original face extract box is larger than the "
"minimum dimension times this multiplier, it is considered a false positive and "
"discarded. Faces which are found to be unusually larger than the frame tend to be "
"misaligned images except in extreme close-ups. These can be usually be safely "
"discarded."),
min_max=(0.0, 10.0),
rounding=2)
aligner_distance = ConfigItem(
datatype=float,
default=40.0,
group=_("filters"),
info=_(
"Filters out faces who's landmarks are above this distance from an 'average' face. "
"Values above 15 tend to be fairly safe. Values above 10 will remove more false "
"positives, but may also filter out some faces at extreme angles."),
min_max=(0.0, 45.0),
rounding=1)
aligner_roll = ConfigItem(
datatype=float,
default=0.0,
group=_("filters"),
info=_(
"Filters out faces who's calculated roll is greater than zero +/- this value in "
"degrees. Aligned faces should have a roll value close to zero. Values that are a "
"significant distance from 0 degrees tend to be misaligned images. These can usually "
"be safely disgarded."),
min_max=(0.0, 90.0),
rounding=1)
aligner_features = ConfigItem(
datatype=bool,
default=True,
group=_("filters"),
info=_(
"Filters out faces where the lowest point of the aligned face's eye or eyebrow is "
"lower than the highest point of the aligned face's mouth. Any faces where this "
"occurs are misaligned and can be safely disgarded."))
filter_refeed = ConfigItem(
datatype=bool,
default=True,
group=_("filters"),
info=_(
"If enabled, and 're-feed' has been selected for extraction, then interim alignments "
"will be filtered prior to averaging the final landmarks. This can help improve the "
"final alignments by removing any obvious misaligns from the interim results, and may "
"also help pick up difficult alignments. If disabled, then all re-feed results will "
"be averaged."))
save_filtered = ConfigItem(
datatype=bool,
default=False,
group=_("filters"),
info=_(
"If enabled, saves any filtered out images into a sub-folder during the extraction "
"process. If disabled, filtered faces are deleted. Note: The faces will always be "
"filtered out of the alignments file, regardless of whether you keep the faces or "
"not."))
realign_refeeds = ConfigItem(
datatype=bool,
default=True,
group=_("re-align"),
info=_(
"If enabled, and 're-align' has been selected for extraction, then all re-feed "
"iterations are re-aligned. If disabled, then only the final averaged output from re-"
"feed will be re-aligned."))
filter_realign = ConfigItem(
datatype=bool,
default=True,
group=_("re-align"),
info=_(
"If enabled, and 're-align' has been selected for extraction, then any alignments "
"which would be filtered out will not be re-aligned."))
# pylint:disable=duplicate-code
_IS_LOADED: bool = False
def load_config(config_file: str | None = None) -> None:
""" Load the Extraction configuration .ini file
Parameters
----------
config_file : str | None, optional
Path to a custom .ini configuration file to load. Default: ``None`` (use default
configuration file)
"""
global _IS_LOADED # pylint:disable=global-statement
if not _IS_LOADED:
_Config(configfile=config_file)
_IS_LOADED = True
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/extract/extract_config.py",
"license": "GNU General Public License v3.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
deepfakes/faceswap:plugins/train/model/_base/inference.py | #! /usr/env/bin/python3
""" Handles the recompilation of a Faceswap model into a version that can be used for inference """
from __future__ import annotations
import logging
import typing as T
import keras
from lib.logger import parse_class_init
from lib.utils import get_module_objects
if T.TYPE_CHECKING:
import keras.src.ops.node
logger = logging.getLogger(__name__)
class Inference():
""" Calculates required layers and compiles a saved model for inference.
Parameters
----------
saved_model: :class:`keras.Model`
The saved trained Faceswap model
switch_sides: bool
``True`` if the swap should be performed "B" > "A" ``False`` if the swap should be
"A" > "B"
"""
def __init__(self, saved_model: keras.Model, switch_sides: bool) -> None:
logger.debug(parse_class_init(locals()))
self._layers: list[keras.Layer] = [lyr for lyr in saved_model.layers
if not isinstance(lyr, keras.layers.InputLayer)]
"""list[:class:`keras.layers.Layer]: All the layers that exist within the model excluding
input layers """
self._input = self._get_model_input(saved_model, switch_sides)
""":class:`keras.KerasTensor`: The correct input for the inference model """
self._name = f"{saved_model.name}_inference"
"""str: The name for the final inference model"""
self._model = self._build()
logger.debug("Initialized: %s", self.__class__.__name__)
@property
def model(self) -> keras.Model:
""" :class:`keras.Model`: The Faceswap model, compiled for inference. """
return self._model
def _get_model_input(self, model: keras.Model, switch_sides: bool) -> list[keras.KerasTensor]:
""" Obtain the inputs for the requested swap direction.
Parameters
----------
saved_model: :class:`keras.Model`
The saved trained Faceswap model
switch_sides: bool
``True`` if the swap should be performed "B" > "A" ``False`` if the swap should be
"A" > "B"
Returns
-------
list[]:class:`keras.KerasTensor`]
The input tensor to feed the model for the requested swap direction
"""
inputs: list[keras.KerasTensor] = model.input
assert len(inputs) == 2, "Faceswap models should have exactly 2 inputs"
idx = 0 if switch_sides else 1
retval = inputs[idx]
logger.debug("model inputs: %s, idx: %s, inference_input: '%s'",
[(i.name, i.shape[1:]) for i in inputs], idx, retval.name)
return [retval]
def _get_candidates(self, input_tensors: list[keras.KerasTensor | keras.Layer]
) -> T.Generator[tuple[keras.Layer, list[keras.src.ops.node.KerasHistory]],
None, None]:
""" Given a list of input tensors, get all layers from the main model which have the given
input tensors marked as Inbound nodes for the model
Parameters
----------
input_tensors: list[:class:`keras.KerasTensor` | :class:`keras.Layer`]
List of Tensors that act as an input to a layer within the model
Yields
------
tuple[:class:`keras.KerasLayer`, list[:class:`keras.src.ops.node.KerasHistory']
Any layer in the main model that use the given input tensors as an input along with the
corresponding keras inbound history
"""
unique_input_names = set(i.name for i in input_tensors)
for layer in self._layers:
history = [tensor._keras_history # pylint:disable=protected-access
for node in layer._inbound_nodes # pylint:disable=protected-access
for parent in node.parent_nodes
for tensor in parent.outputs]
unique_inbound_names = set(h.operation.name for h in history)
if not unique_input_names.issubset(unique_inbound_names):
logger.debug("%s: Skipping candidate '%s' unmatched inputs: %s",
unique_input_names, layer.name, unique_inbound_names)
continue
logger.debug("%s: Yielding candidate '%s'. History: %s",
unique_input_names, layer.name, [(h.operation.name, h.node_index)
for h in history])
yield layer, history
@T.overload
def _group_inputs(self, layer: keras.Layer, inputs: list[tuple[keras.Layer, int]]
) -> list[list[tuple[keras.Layer, int]]]:
...
@T.overload
def _group_inputs(self, layer: keras.Layer, inputs: list[keras.src.ops.node.KerasHistory]
) -> list[list[keras.src.ops.node.KerasHistory]]:
...
def _group_inputs(self, layer, inputs):
""" Layers can have more than one input. In these instances we need to group the inputs
and the layers' inbound nodes to correspond to inputs per instance.
Parameters
----------
layer: :class:`keras.Layer`
The current layer being processed
inputs: list[:class:`keras.KerasTensor`] | list[:class:`keras.src.ops.node.KerasHistory`]
List of input tensors or inbound keras histories to be grouped per layer input
Returns
-------
list[list[tuple[:class:`keras.Layer`, int]]] |
list[list[:class:`keras.src.ops.node.KerasHistory`]
A list of list of input layers and the corresponding node index or inbound keras
histories
"""
layer_inputs = 1 if isinstance(layer.input, keras.KerasTensor) else len(layer.input)
num_inputs = len(inputs)
total_calls = num_inputs / layer_inputs
assert total_calls.is_integer()
total_calls = int(total_calls)
retval = [inputs[i * layer_inputs: i * layer_inputs + layer_inputs]
for i in range(total_calls)]
return retval
def _layers_from_inputs(self,
input_tensors: list[keras.KerasTensor | keras.Layer],
node_indices: list[int]
) -> tuple[list[keras.Layer],
list[keras.src.ops.node.KerasHistory],
list[int]]:
""" Given a list of input tensors and their corresponding inbound node ids, return all of
the layers for the model that uses the given nodes as their input
Parameters
----------
input_tensors: list[:class:`keras.KerasTensor` | :class:`keras.Layer`]
List of Tensors that act as an input to a layer within the model
node_indices: list[int]
The list of node indices corresponding to the inbound node index of the given layers
Returns
-------
list[:class:`keras.layers.Layer`]
Any layers from the model that use the given inputs as its input. Empty list if there
are no matches
list[:class:`keras.src.ops.node.KerasHistory`]
The keras inbound history for the layers
list[int]
The output node index for the layer, used for the inbound node index of the next layer
"""
retval: tuple[list[keras.Layer],
list[keras.src.ops.node.KerasHistory],
list[int]] = ([], [], [])
for layer, history in self._get_candidates(input_tensors):
grp_inputs = self._group_inputs(layer, list(zip(input_tensors, node_indices)))
grp_hist = self._group_inputs(layer, history)
for input_group in grp_inputs: # pylint:disable=not-an-iterable
have = [(i[0].name, i[1]) for i in input_group]
for out_idx, hist in enumerate(grp_hist):
requires = [(h.operation.name, h.node_index) for h in hist]
if sorted(have) != sorted(requires):
logger.debug("%s: Skipping '%s'. Requires %s. Output node index: %s",
have, layer.name, requires, out_idx)
continue
retval[0].append(layer)
retval[1].append(hist)
retval[2].append(out_idx)
logger.debug("Got layers %s for input_tensors: %s",
[x.name for x in retval[0]], [t.name for t in input_tensors])
return retval
def _build_layers(self,
layers: list[keras.Layer],
history: list[keras.src.ops.node.KerasHistory],
inputs: list[keras.KerasTensor]) -> list[keras.KerasTensor]:
""" Compile the given layers with the given inputs
Parameters
----------
layers: list[:class:`keras.Layer`]
The layers to be called with the given inputs
history: list[:class:`keras.src.ops.node.KerasHistory`]
The corresponding keras inbound history for the layers
inputs: list[:class:`keras.KerasTensor]
The inputs for the given layers
Returns
-------
list[:class:`keras.KerasTensor`]
The list of compiled layers
"""
retval = []
given_order = [i._keras_history.operation.name # pylint:disable=protected-access
for i in inputs]
for layer, hist in zip(layers, history):
layer_input = [inputs[given_order.index(h.operation.name)]
for h in hist if h.operation.name in given_order]
if layer_input != inputs:
logger.debug("Sorted layer inputs %s to %s",
given_order,
[i._keras_history.operation.name # pylint:disable=protected-access
for i in layer_input])
if isinstance(layer_input, list) and len(layer_input) == 1:
# Flatten single inputs to stop Keras warnings
actual_input = layer_input[0]
else:
actual_input = layer_input
built = layer(actual_input)
built = built if isinstance(built, list) else [built]
logger.debug(
"Compiled layer '%s' from input(s) %s",
layer.name,
[i._keras_history.operation.name # pylint:disable=protected-access
for i in layer_input])
retval.extend(built)
logger.debug(
"Compiled layers %s from input %s",
[x._keras_history.operation.name for x in retval], # pylint:disable=protected-access
[x._keras_history.operation.name for x in inputs]) # pylint:disable=protected-access
return retval
def _build(self):
""" Extract the sub-models from the saved model that are required for inference.
Returns
-------
:class:`keras.Model`
The model compiled for inference
"""
logger.debug("Compiling inference model")
layers = self._input
node_index = [0]
built = layers
while True:
layers, history, node_index = self._layers_from_inputs(layers, node_index)
if not layers:
break
built = self._build_layers(layers, history, built)
assert len(self._input) == 1
assert len(built) in (1, 2)
out = built[0] if len(built) == 1 else built
retval = keras.Model(inputs=self._input[0], outputs=out, name=self._name)
logger.debug("Compiled inference model '%s': %s", retval.name, retval)
return retval
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/model/_base/inference.py",
"license": "GNU General Public License v3.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:plugins/train/model/_base/state.py | #! /usr/env/bin/python3
""" Handles the loading and saving of a model's state file """
from __future__ import annotations
import logging
import os
import time
import typing as T
from importlib import import_module
from inspect import isclass
from lib.logger import parse_class_init
from lib.serializer import get_serializer
from lib.utils import get_module_objects
from lib.config.objects import ConfigItem, GlobalSection
from plugins.train import train_config as cfg
if T.TYPE_CHECKING:
from lib.config import ConfigValueType
logger = logging.getLogger(__name__)
class State(): # pylint:disable=too-many-instance-attributes
""" Holds state information relating to the plugin's saved model.
Parameters
----------
model_dir: str
The full path to the model save location
model_name: str
The name of the model plugin
no_logs: bool
``True`` if Tensorboard logs should not be generated, otherwise ``False``
"""
def __init__(self,
model_dir: str,
model_name: str,
no_logs: bool) -> None:
logger.debug(parse_class_init(locals()))
self._serializer = get_serializer("json")
filename = f"{model_name}_state.{self._serializer.file_extension}"
self._filename = os.path.join(model_dir, filename)
self._name = model_name
self._iterations = 0
self._mixed_precision_layers: list[str] = []
self._lr_finder = -1.0
self._rebuild_model = False
self._sessions: dict[int, dict] = {}
self.lowest_avg_loss: float = 0.0
"""float: The lowest average loss seen between save intervals. """
self._config: dict[str, ConfigValueType] = {}
self._updateable_options: list[str] = []
self._load()
self._session_id = self._new_session_id()
self._create_new_session(no_logs)
logger.debug("Initialized %s:", self.__class__.__name__)
@property
def filename(self) -> str:
""" str: Full path to the state filename """
return self._filename
@property
def loss_names(self) -> list[str]:
""" list: The loss names for the current session """
return self._sessions[self._session_id]["loss_names"]
@property
def current_session(self) -> dict:
""" dict: The state dictionary for the current :attr:`session_id`. """
return self._sessions[self._session_id]
@property
def iterations(self) -> int:
""" int: The total number of iterations that the model has trained. """
return self._iterations
@property
def session_id(self) -> int:
""" int: The current training session id. """
return self._session_id
@property
def sessions(self) -> dict[int, dict[str, T.Any]]:
""" dict[int, dict[str, Any]]: The session information for each session in the state
file """
return {int(k): v for k, v in self._sessions.items()}
@property
def mixed_precision_layers(self) -> list[str]:
"""list: Layers that can be switched between mixed-float16 and float32. """
return self._mixed_precision_layers
@property
def lr_finder(self) -> float:
""" The value discovered from the learning rate finder. -1 if no value stored """
return self._lr_finder
@property
def model_needs_rebuild(self) -> bool:
"""bool: ``True`` if mixed precision policy has changed so model needs to be rebuilt
otherwise ``False`` """
return self._rebuild_model
def _new_session_id(self) -> int:
""" Generate a new session id. Returns 1 if this is a new model, or the last session id + 1
if it is a pre-existing model.
Returns
-------
int
The newly generated session id
"""
if not self._sessions:
session_id = 1
else:
session_id = max(int(key) for key in self._sessions.keys()) + 1
logger.debug(session_id)
return session_id
def _create_new_session(self, no_logs: bool) -> None:
""" Initialize a new session, creating the dictionary entry for the session in
:attr:`_sessions`.
Parameters
----------
no_logs: bool
``True`` if Tensorboard logs should not be generated, otherwise ``False``
"""
logger.debug("Creating new session. id: %s", self._session_id)
self._sessions[self._session_id] = {"timestamp": time.time(),
"no_logs": no_logs,
"loss_names": [],
"batchsize": 0,
"iterations": 0,
"config": {k: v for k, v in self._config.items()
if k in self._updateable_options}}
def update_session_config(self, key: str, value: T.Any) -> None:
""" Update a configuration item of the currently loaded session.
Parameters
----------
key: str
The configuration item to update for the current session
value: any
The value to update to
"""
old_val = self.current_session["config"][key]
assert isinstance(value, type(old_val))
logger.debug("Updating configuration item '%s' from '%s' to '%s'", key, old_val, value)
self.current_session["config"][key] = value
def add_session_loss_names(self, loss_names: list[str]) -> None:
""" Add the session loss names to the sessions dictionary.
The loss names are used for Tensorboard logging
Parameters
----------
loss_names: list
The list of loss names for this session.
"""
logger.debug("Adding session loss_names: %s", loss_names)
self._sessions[self._session_id]["loss_names"] = loss_names
def add_session_batchsize(self, batch_size: int) -> None:
""" Add the session batch size to the sessions dictionary.
Parameters
----------
batch_size: int
The batch size for the current training session
"""
logger.debug("Adding session batch size: %s", batch_size)
self._sessions[self._session_id]["batchsize"] = batch_size
def increment_iterations(self) -> None:
""" Increment :attr:`iterations` and session iterations by 1. """
self._iterations += 1
self._sessions[self._session_id]["iterations"] += 1
def add_mixed_precision_layers(self, layers: list[str]) -> None:
""" Add the list of model's layers that are compatible for mixed precision to the
state dictionary """
logger.debug("Storing mixed precision layers: %s", layers)
self._mixed_precision_layers = layers
def add_lr_finder(self, learning_rate: float) -> None:
""" Add the optimal discovered learning rate from the learning rate finder
Parameters
----------
learning_rate : float
The discovered learning rate
"""
logger.debug("Storing learning rate from LR Finder: %s", learning_rate)
self._lr_finder = learning_rate
def save(self) -> None:
""" Save the state values to the serialized state file. """
state = {"name": self._name,
"sessions": {k: v for k, v in self._sessions.items()
if v.get("iterations", 0) > 0},
"lowest_avg_loss": self.lowest_avg_loss,
"iterations": self._iterations,
"mixed_precision_layers": self._mixed_precision_layers,
"lr_finder": self._lr_finder,
"config": self._config}
logger.debug("Saving State: %s", state)
self._serializer.save(self._filename, state)
logger.debug("Saved State: '%s'", self._filename)
def _update_legacy_config(self) -> bool:
""" Legacy updates for new config additions.
When new config items are added to the Faceswap code, existing model state files need to be
updated to handle these new items.
Current existing legacy update items:
* loss - If old `dssim_loss` is ``true`` set new `loss_function` to `ssim` otherwise
set it to `mae`. Remove old `dssim_loss` item
* l2_reg_term - If this exists, set loss_function_2 to ``mse`` and loss_weight_2 to
the value held in the old ``l2_reg_term`` item
* masks - If `learn_mask` does not exist then it is set to ``True`` if `mask_type` is
not ``None`` otherwise it is set to ``False``.
* masks type - Replace removed masks 'dfl_full' and 'facehull' with `components` mask
* clipnorm - Only existed in 2 models (DFL-SAE + Unbalanced). Replaced with global
option autoclip
* Clip model - layer names have had to be changed to replace dots with underscores, so
replace these
Returns
-------
bool
``True`` if legacy items exist and state file has been updated, otherwise ``False``
"""
logger.debug("Checking for legacy state file update")
priors = ["dssim_loss", "mask_type", "mask_type", "l2_reg_term", "clipnorm", "autoclip"]
new_items = ["loss_function", "learn_mask", "mask_type", "loss_function_2",
"gradient_clipping", "clipping"]
updated = False
for old, new in zip(priors, new_items):
if old not in self._config:
logger.debug("Legacy item '%s' not in state config. Skipping update", old)
continue
# dssim_loss > loss_function
if old == "dssim_loss":
self._config[new] = "ssim" if self._config[old] else "mae"
del self._config[old]
updated = True
logger.info("Updated state config from legacy dssim format. New config loss "
"function: '%s'", self._config[new])
continue
# Add learn mask option and set to True if model has "penalized_mask_loss" specified
if old == "mask_type" and new == "learn_mask" and new not in self._config:
self._config[new] = self._config["mask_type"] is not None
updated = True
logger.info("Added new 'learn_mask' state config item for this model. Value set "
"to: %s", self._config[new])
continue
# Replace removed masks with most similar equivalent
if old == "mask_type" and new == "mask_type" and self._config[old] in ("facehull",
"dfl_full"):
old_mask = self._config[old]
self._config[new] = "components"
updated = True
logger.info("Updated 'mask_type' from '%s' to '%s' for this model",
old_mask, self._config[new])
# Replace l2_reg_term with the correct loss_2_function and update the value of
# loss_2_weight
if old == "l2_reg_term":
self._config[new] = "mse"
self._config["loss_weight_2"] = self._config[old]
del self._config[old]
updated = True
logger.info("Updated state config from legacy 'l2_reg_term' to 'loss_function_2'")
# Replace clipnorm with correct gradient clipping type and value
if old == "clipnorm":
self._config[new] = "norm"
del self._config[old]
updated = True
logger.info("Updated state config from legacy '%s' to '%s: %s'", old, new, old)
# Replace autoclip with correct gradient clipping type
if old == "autoclip":
self._config[new] = old
del self._config[old]
updated = True
logger.info("Updated state config from legacy '%s' to '%s: %s'", old, new, old)
# Update Clip layer names from dots to underscores
mixed_precision = self._mixed_precision_layers
if any("." in name for name in mixed_precision):
self._mixed_precision_layers = [x.replace(".", "_") for x in mixed_precision]
updated = True
logger.info("Updated state config for legacy 'mixed_precision' storage of Clip layers")
logger.debug("State file updated for legacy config: %s", updated)
return updated
def _get_global_options(self) -> dict[str, ConfigItem]:
""" Obtain all of the current global user config options
Returns
-------
dict[str, :class:`lib.config.objects.ConfigItem`]
All of the current global user configuration options
"""
objects = {key: val for key, val in vars(cfg).items()
if isinstance(val, ConfigItem)
or isclass(val) and issubclass(val, GlobalSection) and val != GlobalSection}
retval: dict[str, ConfigItem] = {}
for key, obj in objects.items():
if isinstance(obj, ConfigItem):
retval[key] = obj
continue
for name, opt in obj.__dict__.items():
if isinstance(opt, ConfigItem):
retval[name] = opt
logger.debug("Loaded global config options: %s", {k: v.value for k, v in retval.items()})
return retval
def _get_model_options(self) -> dict[str, ConfigItem]:
""" Obtain all of the currently configured model user config options """
mod_name = f"plugins.train.model.{self._name}_defaults"
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
logger.debug("No plugin specific defaults file found at '%s'", mod_name)
return {}
retval = {k: v for k, v in vars(mod).items() if isinstance(v, ConfigItem)}
logger.debug("Loaded '%s' config options: %s",
self._name, {k: v.value for k, v in retval.items()})
return retval
def _update_config(self) -> None:
""" Update the loaded training config with the one contained within the values loaded
from the state file.
Check for any `fixed`=``False`` parameter changes and log info changes.
Update any legacy config items to their current versions.
"""
legacy_update = self._update_legacy_config()
# Add any new items to state config for legacy purposes where the new default may be
# detrimental to an existing model.
legacy_defaults: dict[str, str | int | bool | float] = {"centering": "legacy",
"coverage": 62.5,
"mask_loss_function": "mse",
"optimizer": "adam",
"mixed_precision": False}
rebuild_tasks = ["mixed_precision"]
options = self._get_global_options() | self._get_model_options()
for key, opt in options.items():
val: ConfigValueType = opt()
if key not in self._config:
val = legacy_defaults.get(key, val)
logger.info("Adding new config item to state file: '%s': %s", key, repr(val))
self._config[key] = val
old_val = self._config[key]
old_val = "none" if old_val is None else old_val # We used to allow NoneType. No more
if not opt.fixed:
self._updateable_options.append(key)
if not opt.fixed and val != old_val:
self._config[key] = val
logger.info("Config item: '%s' has been updated from %s to %s",
key, repr(old_val), repr(val))
self._rebuild_model = self._rebuild_model or key in rebuild_tasks
continue
if val != old_val:
logger.debug("Fixed config item '%s' Updated from %s to %s from state file",
key, repr(val), repr(old_val))
opt.set(old_val)
if legacy_update:
self.save()
logger.info("Using configuration saved in state file")
logger.debug("Updateable items: %s", self._updateable_options)
def _generate_config(self) -> None:
""" Generate an initial state config based on the currently selected user config """
options = self._get_global_options() | self._get_model_options()
for key, val in options.items():
self._config[key] = val.value
if not val.fixed:
self._updateable_options.append(key)
logger.debug("Generated initial state config for '%s': %s", self._name, self._config)
logger.debug("Updateable items: %s", self._updateable_options)
def _load(self) -> None:
""" Load a state file and set the serialized values to the class instance.
Updates the model's config with the values stored in the state file.
"""
logger.debug("Loading State")
if not os.path.exists(self._filename):
logger.info("No existing state file found. Generating.")
self._generate_config()
return
state = self._serializer.load(self._filename)
self._name = state.get("name", self._name)
self._sessions = state.get("sessions", {})
self.lowest_avg_loss = state.get("lowest_avg_loss", 0.0)
if isinstance(self.lowest_avg_loss, dict):
lowest_avg_loss = sum(self.lowest_avg_loss.values())
logger.debug("Collating legacy lowest_avg_loss from %s to %s",
self.lowest_avg_loss, lowest_avg_loss)
self.lowest_avg_loss = lowest_avg_loss
self._iterations = state.get("iterations", 0)
self._mixed_precision_layers = state.get("mixed_precision_layers", [])
self._lr_finder = state.get("lr_finder", -1.0)
self._config = state.get("config", {})
logger.debug("Loaded state: %s", state)
self._update_config()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/model/_base/state.py",
"license": "GNU General Public License v3.0",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:plugins/train/model/_base/update.py | #! /usr/env/bin/python3
""" Updating legacy faceswap models to the current version """
import json
import logging
import os
import typing as T
import zipfile
from shutil import copyfile, copytree
import h5py
import numpy as np
from keras import models as kmodels
from lib.logger import parse_class_init
from lib.model.layers import ScalarOp
from lib.model.networks import TypeModelsViT, ViT
from lib.utils import get_module_objects, FaceswapError
logger = logging.getLogger(__name__)
class Legacy: # pylint:disable=too-few-public-methods
""" Handles the updating of Keras 2.x models to Keras 3.x
Generally Keras 2.x models will open in Keras 3.x. There are a couple of bugs in Keras 3
legacy loading code which impacts Faceswap models:
- When a model receives a shared functional model as an inbound node, the node index needs
reducing by 1 (non-trivial to fix upstream)
- Keras 3 does not accept nested outputs, so Keras 2 FS models need to have the outputs
flattened
Parameters
----------
model_path: str
Full path to the legacy Keras 2.x model h5 file to upgrade
"""
def __init__(self, model_path: str):
logger.debug(parse_class_init(locals()))
self._old_model_file = model_path
"""str: Full path to the old .h5 model file"""
self._new_model_file = f"{os.path.splitext(model_path)[0]}.keras"
"""str: Full path to the new .keras model file"""
self._functionals: set[str] = set()
"""set[str]: The name of any Functional models discovered in the keras 2 model config"""
self._upgrade_model()
logger.debug("Initialized %s", self.__class__.__name__)
def _get_model_config(self) -> dict[str, T.Any]:
""" Obtain a keras 2.x config from a keras 2.x .h5 file.
As keras 3.x will error out loading the file, we collect it directly from the .h5 file
Returns
-------
dict[str, Any]
A keras 2.x model configuration dictionary
Raises
------
FaceswapError
If the file is not a valid Faceswap 2 .h5 model file
"""
h5file = h5py.File(self._old_model_file, "r")
s_version = T.cast(str | None, h5file.attrs.get("keras_version"))
s_config = T.cast(str | None, h5file.attrs.get("model_config"))
if not s_version or not s_config:
raise FaceswapError(f"'{self._old_model_file}' is not a valid Faceswap 2 model file")
version = s_version.split(".")[:2]
if len(version) != 2 or version[0] != "2":
raise FaceswapError(f"'{self._old_model_file}' is not a valid Faceswap 2 model file")
retval = json.loads(s_config)
logger.debug("Loaded keras 2.x model config: %s", retval)
return retval
@classmethod
def _unwrap_outputs(cls, outputs: list[list[T.Any]]) -> list[list[str | int]]:
""" Unwrap nested output tensors from a config dict to be a single list of output tensor
Parameters
----------
outputs: list[list[Any]]
The outputs that exist within the Keras 2 config dict that may be nested
Returns
-------
list[list[str | int]]
The output configuration formatted to be compatible with Keras 3
"""
retval = np.array(outputs).reshape(-1, 3).tolist()
for item in retval:
item[1] = int(item[1])
item[2] = int(item[2])
logger.debug("Unwrapped outputs: %s to: %s", outputs, retval)
return retval
def _get_clip_config(self) -> dict[str, T.Any]:
""" Build a clip model from the configuration information stored in the legacy state file
Returns
-------
dict[str, T.Any]
The new keras configuration for a Clip model
Raises
------
FaceswapError
If the clip model cannot be built
"""
state_file = f"{os.path.splitext(self._old_model_file)[0]}_state.json"
if not os.path.isfile(state_file):
raise FaceswapError(
f"The state file '{state_file}' does not exist. This model cannot be ported")
with open(state_file, "r", encoding="utf-8") as ifile:
config = json.load(ifile)
logger.debug("Loaded legacy config '%s': %s", state_file, config)
net_name = config.get("config", {}).get("enc_architecture", "")
scaling = config.get("config", {}).get("enc_scaling", 0) / 100
# Import here to prevent circular imports
from plugins.train.model.phaze_a import _MODEL_MAPPING # pylint:disable=C0415
vit_info = _MODEL_MAPPING.get(net_name)
if not scaling or not vit_info:
raise FaceswapError(
f"Clip network could not be found in '{state_file}'. Discovered network is "
f"'{net_name}' with encoder scaling: {scaling}. This model cannot be ported")
input_size = int(max(vit_info.min_size, ((vit_info.default_size * scaling) // 16) * 16))
vit_model = ViT(T.cast(TypeModelsViT, vit_info.keras_name), input_size=input_size)()
retval = vit_model.get_config()
del vit_model
logger.debug("Got new config for '%s' at input size: %s: %s", net_name, input_size, retval)
return retval
def _convert_lambda_config(self, layer: dict[str, T.Any]):
""" Keras 2 TFLambdaOps are not compatible with Keras 3. Scalar operations can be
relatively easily substituted with a :class:`~lib.model.layers.ScalarOp` layer
Parameters
----------
layer: dict[str, Any]
An existing Keras 2 TFLambdaOp layer
Raises
------
FaceswapError
If the TFLambdaOp is not currently supported
"""
name = layer["config"]["name"]
operation = name.rsplit(".", maxsplit=1)[-1]
if operation not in ("multiply", "truediv", "add", "subtract"):
raise FaceswapError(f"The TFLambdaOp '{name}' is not supported")
value = layer["inbound_nodes"][0][-1]["y"]
if isinstance(layer["config"]["dtype"], str):
dtype = layer["config"]["dtype"]
else:
dtype = layer["config"]["dtype"]["config"]["name"]
new_layer = ScalarOp(operation, value, name=name, dtype=dtype)
logger.debug("Converting legacy TFLambdaOp: %s", layer)
layer["class_name"] = "ScalarOp"
layer["config"] = new_layer.get_config()
for n in layer["inbound_nodes"]:
n[-1] = {}
layer["inbound_nodes"] = [layer["inbound_nodes"]]
logger.debug("Converted legacy TFLambdaOp to %s", layer)
def _process_deprecations(self, layer: dict[str, T.Any]) -> None:
""" Some layer kwargs are deprecated between Keras 2 and Keras 3. Some are not mission
critical, but updating these here prevents Keras from outputting warnings about deprecated
arguments. Others will fail to load the legacy model (eg Clip) so are replaced with a new
config. Operation is performed in place
Parameters
----------
layer: dict[str, T.Any]
A keras model config item representing a keras layer
"""
if layer["class_name"] == "LeakyReLU":
# Non mission-critical, but prevents scary deprecation messages
config = layer["config"]
old, new = "alpha", "negative_slope"
if old in config:
logger.debug("Updating '%s' kwarg '%s' to '%s'", layer["name"], old, new)
config[new] = config[old]
del config[old]
if layer["name"] == "visual":
# MultiHeadAttention is not backwards compatible, so get new config for Clip models
logger.debug("Getting new config for 'visual' model")
layer["config"] = self._get_clip_config()
if layer["class_name"] == "TFOpLambda":
# TFLambdaOp are not supported
self._convert_lambda_config(layer)
if layer["class_name"] in ("DepthwiseConv2D",
"Conv2DTranspose") and "groups" in layer["config"]:
# groups parameter doesn't exist in Keras 3. Hopefully it still works the same
logger.debug("Removing groups from %s '%s'", layer["class_name"], layer["name"])
del layer["config"]["groups"]
if "dtype" in layer["config"]:
# Incorrectly stored dtypes error when deserializing the new config. May be a Keras bug
actual_dtype = None
old_dtype = layer["config"]["dtype"]
if isinstance(old_dtype, str):
actual_dtype = layer["config"]["dtype"]
if isinstance(old_dtype, dict) and old_dtype.get("class_name") == "Policy":
actual_dtype = old_dtype["config"]["name"]
if actual_dtype is not None:
new_dtype = {"module": "keras",
"class_name": "DTypePolicy",
"config": {"name": actual_dtype},
"registered_name": None}
logger.debug("Updating dtype for '%s' from %s to %s", layer["name"],
old_dtype, new_dtype)
layer["config"]["dtype"] = new_dtype
def _process_inbounds(self,
layer_name: str,
inbound_nodes: list[list[list[str | int]]] | list[list[str | int]]
) -> None:
""" If the inbound nodes are from a shared functional model, decrement the node index by
one. Operation is performed in place
Parameters
----------
layer_name: str
The name of the layer (for logging)
inbound_nodes: list[list[list[str | int]]] | list[list[str | int]]
The inbound nodes from a Keras 2 config dict to process
"""
to_process = T.cast(
list[list[list[str | int]]],
inbound_nodes if isinstance(inbound_nodes[0][0], list) else [inbound_nodes])
for inbound in to_process:
for node in inbound:
name, node_index = node[0], node[1]
assert isinstance(name, str) and isinstance(node_index, int)
if name in self._functionals and node_index > 0:
logger.debug("Updating '%s' inbound node index for '%s' from %s to %s",
layer_name, name, node_index, node_index - 1)
node[1] = node_index - 1
def _update_layers(self, layer_list: list[dict[str, T.Any]]) -> None:
""" Given a list of keras layers from a keras 2 config dict, increment the indices for
any inbound nodes that come from a shared Functional model. Flatten any nested output
tensor lists. Operations are performed in place
Parameters
----------
layers: list[dict[str, Any]]
A list of layers that belong to a keras 2 functional model config dictionary
"""
for layer in layer_list:
if layer["class_name"] == "Functional":
logger.debug("Found Functional layer. Keys: %s", list(layer))
if layer.get("name"):
logger.debug("Storing layer: '%s'", layer["name"])
self._functionals.add(layer["name"])
layer["config"]["output_layers"] = self._unwrap_outputs(
layer["config"]["output_layers"])
self._update_layers(layer["config"]["layers"])
if not layer.get("inbound_nodes"):
continue
self._process_deprecations(layer)
self._process_inbounds(layer["name"], layer["inbound_nodes"])
def _archive_model(self) -> str:
""" Archive an existing Keras 2 model to a new archive location
Raises
------
FaceswapError
If the destination archive folder exists and is not empty
Returns
-------
str
The path to the archived keras 2 model folder
"""
model_dir = os.path.dirname(self._old_model_file)
dst_path = f"{model_dir}_fs2_backup"
if os.path.exists(dst_path) and os.listdir(dst_path):
raise FaceswapError(
f"The destination archive folder '{dst_path}' already exists. Either delete this "
"folder, select a different model folder, or remove the legacy model files from "
f"your model folder '{model_dir}'.")
if os.path.exists(dst_path):
logger.info("Removing pre-existing empty folder '%s'", dst_path)
os.rmdir(dst_path)
logger.info("Archiving model folder '%s' to '%s'", model_dir, dst_path)
os.rename(model_dir, dst_path)
return dst_path
def _restore_files(self, archive_dir: str) -> None:
""" Copy the state.json file and the logs folder from the archive folder to the new model
folder
Parameters
----------
archive_dir: str
The full path to the archived Keras 2 model
"""
model_dir = os.path.dirname(self._new_model_file)
model_name = os.path.splitext(os.path.basename(self._new_model_file))[0]
logger.debug("Restoring required '%s 'files from '%s' to '%s'",
model_name, archive_dir, model_dir)
for fname in os.listdir(archive_dir):
fullpath = os.path.join(archive_dir, fname)
new_path = os.path.join(model_dir, fname)
if fname == f"{model_name}_logs" and os.path.isdir(fullpath):
logger.debug("Restoring '%s' to '%s'", fullpath, new_path)
copytree(fullpath, new_path)
continue
if fname == f"{model_name}_state.json" and os.path.isfile(fullpath):
logger.debug("Restoring '%s' to '%s'", fullpath, new_path)
copyfile(fullpath, new_path)
continue
logger.debug("Skipping file: '%s'", fname)
def _upgrade_model(self) -> None:
""" Get the model configuration of a Faceswap 2 model and upgrade it to Faceswap 3
compatible """
logger.info("Upgrading model file from Faceswap 2 to Faceswap 3...")
config = self._get_model_config()
self._update_layers([config])
logger.debug("Migrating data to new model...")
model = kmodels.Model.from_config(config["config"])
model.load_weights(self._old_model_file)
archive_dir = self._archive_model()
dirname = os.path.dirname(self._new_model_file)
logger.debug("Saving model '%s'", self._new_model_file)
os.mkdir(dirname)
model.save(self._new_model_file)
logger.debug("Saved model '%s'", self._new_model_file)
self._restore_files(archive_dir)
logger.info("Model upgraded: '%s'", dirname)
class PatchKerasConfig:
""" This class exists to patch breaking changes when moving from older keras 3.x models to
newer versions
Parameters
----------
model_path : str
Full path to the keras model to be patched for the current version
"""
def __init__(self, model_path: str) -> None:
logger.debug(parse_class_init(locals()))
self._model_path = model_path
self._items, self._config = self._load_model()
metadata = json.loads(self._items["metadata.json"])
self._version = tuple(int(x) for x in metadata['keras_version'].split(".")[:2])
logger.debug("Initialized: %s", self.__class__.__name__)
def _load_model(self) -> tuple[dict[str, bytes], dict[str, T.Any]]:
""" Load the objects from the compressed keras model
Returns
-------
items : dict[str, bytes]
The filename and file objects within the keras 3 model file that are not the model
config
config : dict[str, Any]
The model configuration dictionary from the keras 3 model file
"""
with zipfile.ZipFile(self._model_path, "r") as zf:
items = {f.filename: zf.read(f) for f in zf.filelist if f.filename != "config.json"}
config = json.loads(zf.read("config.json"))
logger.debug("Loaded legacy existing items %s and 'config.json' from model '%s'",
list(items), self._model_path)
return items, config
def _update_nn_blocks(self, layer: dict[str, T.Any]):
""" In older versions of keras our :class:`lib.model.nn_blocks.Conv2D` and
:class:`lib.model.nn_blocks.DepthwiseConv2D` inherited from their respective Keras layers.
Sometime between 3.3.3 and 3.12 (during beta testing) this stopped working, raising a
TypeError. Subsequently we have refactored those classes to no longer inherit, and call the
underlying keras layer directly instead. The keras config needs to be rewritten to reflect
this.
Parameters
----------
layer dict[str, Any]
A layer config dictionary from a keras 3 model
"""
if (layer.get("module") == "lib.model.nn_blocks" and
layer.get("class_name") in ("Conv2D", "DepthwiseConv2D")):
new_module = "keras.layers"
logger.debug("Updating Keras %s layer '%s' to '%s': %s",
".".join(str(x) for x in self._version),
f"{layer['module']}.{layer['class_name']}",
f"{new_module}.{layer['class_name']}",
layer["name"])
layer["module"] = new_module
def _parse_inbound_args(self, inbound: list | dict[str, T.Any]) -> None:
""" Recurse through keras inbound node args until we arrive at a dictionary
Parameters
----------
list[lisr | dict[str, Any]]
A Keras inbound nodes args entry or the nested dictionary
"""
if not isinstance(inbound, (list, dict)):
return
if isinstance(inbound, list):
for arg in inbound:
self._parse_inbound_args(arg)
return
arg_conf = inbound["config"]
if "keras_history" not in arg_conf:
return
if "." in arg_conf["keras_history"][0]:
new_hist = arg_conf["keras_history"][:]
new_hist[0] = new_hist[0].replace(".", "_")
logger.debug("Updating Inbound Keras history from '%s' to '%s'",
arg_conf["keras_history"], new_hist)
arg_conf["keras_history"] = new_hist
def _update_dot_naming(self, layer: dict[str, T.Any]):
""" Sometime between 3.3.3 and 3.12 (during beta testing) layers with "." in the name
started generating a KeyError. This is odd as the error comes from Torch, but dot naming is
standard. To work around this all dots (.) in layer names have been converted to
underscores (_). The keras config needs to be rewritten to reflect this. This only impacts
FS models that used the CLiP encoder
Parameters
----------
layer dict[str, Any]
A layer config dictionary from a keras 3 model
"""
if "." in layer["name"]:
new_name = layer["name"].replace(".", "_")
logger.debug("Updating Keras layer name from '%s' to '%s'", layer["name"], new_name)
layer["name"] = new_name
config = layer["config"]
if "." in config["name"]:
new_name = config["name"].replace(".", "_")
logger.debug("Updating Keras config layer name from '%s' to '%s'",
config["name"], new_name)
config["name"] = new_name
inbound = layer["inbound_nodes"]
for in_ in inbound:
for arg in in_["args"]:
self._parse_inbound_args(arg)
def _update_config(self, config: dict[str, T.Any]) -> dict[str, T.Any]:
""" Recursively update the `config` dictionary from a full keras config in place
Parameters
----------
config : dict[str, Any]
A 'config' section of keras config
Returns
-------
dict[str, Any]
The updated `config` section of a keras config
"""
layer: dict[str, T.Any]
for layer in config["layers"]:
if layer.get("class_name") == "Functional":
self._update_config(layer["config"])
if self._version <= (3, 3):
self._update_nn_blocks(layer)
self._update_dot_naming(layer)
return config
def _save_model(self) -> None:
""" Save the updated keras model """
logger.info("Updating Keras model '%s'...", self._model_path)
with zipfile.ZipFile(self._model_path, "w", compression=zipfile.ZIP_DEFLATED) as zf:
for filename, data in self._items.items():
zf.writestr(filename, data)
zf.writestr("config.json", json.dumps(self._config).encode("utf-8"))
def __call__(self) -> None:
""" Update the keras configuration saved in a keras model file and save over the original
model """
logger.debug("Updating saved config for keras version %s", self._version)
self._config["config"] = self._update_config(self._config["config"])
self._save_model()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/model/_base/update.py",
"license": "GNU General Public License v3.0",
"lines": 431,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:plugins/train/train_config.py | #!/usr/bin/env python3
""" Default configurations for models """
import gettext
import logging
import os
from dataclasses import dataclass
from lib.config import ConfigItem, FaceswapConfig, GlobalSection
from plugins.plugin_loader import PluginLoader
from plugins.train.trainer import trainer_config
# LOCALES
_LANG = gettext.translation("plugins.train._config", localedir="locales", fallback=True)
_ = _LANG.gettext
logger = logging.getLogger(__name__)
_ADDITIONAL_INFO = _("\nNB: Unless specifically stated, values changed here will only take effect "
"when creating a new model.")
class _Config(FaceswapConfig):
""" Config File for Models """
# pylint:disable=too-many-statements
def set_defaults(self, helptext="") -> None:
""" Set the default values for config """
super().set_defaults(helptext=_("Options that apply to all models") + _ADDITIONAL_INFO)
self._defaults_from_plugin(os.path.dirname(__file__))
train_helptext, section, train_opts = trainer_config.get_defaults()
self.add_section(section, train_helptext)
for k, v in train_opts.items():
self.add_item(section, k, v)
centering = ConfigItem(
datatype=str,
default="face",
gui_radio=True,
group=_("face"),
info=_(
"How to center the training image. The extracted images are centered on the middle of the "
"skull based on the face's estimated pose. A subsection of these images are used for "
"training. The centering used dictates how this subsection will be cropped from the "
"aligned images."
"\n\tface: Centers the training image on the center of the face, adjusting for pitch and "
"yaw."
"\n\thead: Centers the training image on the center of the head, adjusting for pitch and "
"yaw. NB: You should only select head centering if you intend to include the full head ("
"including hair) in the final swap. This may give mixed results. Additionally, it is only "
"worth choosing head centering if you are training with a mask that includes the hair ("
"e.g. BiSeNet-FP-Head)."
"\n\tlegacy: The 'original' extraction technique. Centers the training image near the tip "
"of the nose with no adjustment. Can result in the edges of the face appearing outside of "
"the training area."),
choices=["face", "head", "legacy"],
fixed=True)
coverage = ConfigItem(
datatype=float,
default=100.0,
group=_("face"),
info=_(
"How much of the extracted image to train on. A lower coverage will limit the model's "
"scope to a zoomed-in central area while higher amounts can include the entire face. A "
"trade-off exists between lower amounts given more detail versus higher amounts avoiding "
"noticeable swap transitions. For 'Face' centering you will want to leave this above 75%. "
"For Head centering you will most likely want to set this to 100%. Sensible values for "
"'Legacy' centering are:"
"\n\t62.5% spans from eyebrow to eyebrow."
"\n\t75.0% spans from temple to temple."
"\n\t87.5% spans from ear to ear."
"\n\t100.0% is a mugshot."),
min_max=(62.5, 100.0),
rounding=2,
fixed=True)
vertical_offset = ConfigItem(
datatype=int,
default=0,
group=_("face"),
info=_(
"How much to adjust the vertical position of the aligned face as a percentage of face "
"image size. Negative values move the face up (expose more chin and less forehead). "
"Positive values move the face down (expose less chin and more forehead)"),
min_max=(-25, 25),
rounding=1,
fixed=True)
icnr_init = ConfigItem(
datatype=bool,
default=False,
group=_("initialization"),
info=_(
"Use ICNR to tile the default initializer in a repeating pattern. This strategy is "
"designed for pairing with sub-pixel / pixel shuffler to reduce the 'checkerboard effect' "
"in image reconstruction. \n\t https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf"))
conv_aware_init = ConfigItem(
datatype=bool,
default=False,
group=_("initialization"),
info=_(
"Use Convolution Aware Initialization for convolutional layers. This can help eradicate "
"the vanishing and exploding gradient problem as well as lead to higher accuracy, lower "
"loss and faster convergence.\nNB:\n\t This can use more VRAM when creating a new model "
"so you may want to lower the batch size for the first run. The batch size can be raised "
"again when reloading the model."
"\n\t Multi-GPU is not supported for this option, so you should start the model on a "
"single GPU. Once training has started, you can stop training, enable multi-GPU and "
"resume."
"\n\t Building the model will likely take several minutes as the calculations for this "
"initialization technique are expensive. This will only impact starting a new model."))
lr_finder_iterations = ConfigItem(
datatype=int,
default=1000,
group=_("Learning Rate Finder"),
info=_(
"The number of iterations to process to find the optimal learning rate. Higher values "
"will take longer, but will be more accurate."),
min_max=(100, 10000),
rounding=100,
fixed=True)
lr_finder_mode = ConfigItem(
datatype=str,
default="set",
group=_("Learning Rate Finder"),
info=_(
"The operation mode for the learning rate finder. Only applicable to new models. For "
"existing models this will always default to 'set'."
"\n\tset - Train with the discovered optimal learning rate."
"\n\tgraph_and_set - Output a graph in the training folder showing the discovered "
"learning rates and train with the optimal learning rate."
"\n\tgraph_and_exit - Output a graph in the training folder with the discovered learning "
"rates and exit."),
gui_radio=True,
choices=["set", "graph_and_set", "graph_and_exit"],
fixed=True)
lr_finder_strength = ConfigItem(
datatype=str,
default="default",
group=_("Learning Rate Finder"),
info=_(
"How aggressively to set the Learning Rate. More aggressive can learn faster, but is more "
"likely to lead to exploding gradients."
"\n\tdefault - The default optimal learning rate. A safe choice for nearly all use cases."
"\n\taggressive - Set's a higher learning rate than the default. May learn faster but "
"with a higher chance of exploding gradients."
"\n\textreme - The highest optimal learning rate. A much higher risk of exploding "
"gradients."),
gui_radio=True,
choices=["default", "aggressive", "extreme"],
fixed=True)
reflect_padding = ConfigItem(
datatype=bool,
default=False,
group=_("network"),
info=_(
"Use reflection padding rather than zero padding with convolutions. Each convolution must "
"pad the image boundaries to maintain the proper sizing. More complex padding schemes can "
"reduce artifacts at the border of the image."
"\n\t http://www-cs.engr.ccny.cuny.edu/~wolberg/cs470/hw/hw2_pad.txt"))
mixed_precision = ConfigItem(
datatype=bool,
default=False,
group=_("network"),
info=_(
"NVIDIA GPUs can run operations in float16 faster than in float32. Mixed precision allows "
"you to use a mix of float16 with float32, to get the performance benefits from float16 "
"and the numeric stability benefits from float32.\n\nThis is untested on non-Nvidia "
"cards, but will run on most Nvidia models. it will only speed up training on more recent "
"GPUs. Those with compute capability 7.0 or higher will see the greatest performance "
"benefit from mixed precision because they have Tensor Cores. Older GPUs offer no math "
"performance benefit for using mixed precision, however memory and bandwidth savings can "
"enable some speedups. Generally RTX GPUs and later will offer the most benefit."),
fixed=False)
nan_protection = ConfigItem(
datatype=bool,
default=True,
group=_("network"),
info=_(
"If a 'NaN' is generated in the model, this means that the model has corrupted and the "
"model is likely to start deteriorating from this point on. Enabling NaN protection will "
"stop training immediately in the event of a NaN. The last save will not contain the NaN, "
"so you may still be able to rescue your model."),
fixed=False)
convert_batchsize = ConfigItem(
datatype=int,
default=16,
group=_("convert"),
info=_(
"[GPU Only]. The number of faces to feed through the model at once when running the "
"Convert process.\n\nNB: Increasing this figure is unlikely to improve convert speed, "
"however, if you are getting Out of Memory errors, then you may want to reduce the batch "
"size."),
min_max=(1, 32),
rounding=1,
fixed=False)
_LOSS_HELP = {
"ffl": _(
"Focal Frequency Loss. Analyzes the frequency spectrum of the images rather than the "
"images themselves. This loss function can be used on its own, but the original paper "
"found increased benefits when using it as a complementary loss to another spacial loss "
"function (e.g. MSE). Ref: Focal Frequency Loss for Image Reconstruction and Synthesis "
"https://arxiv.org/pdf/2012.12821.pdf NB: This loss does not currently work on AMD "
"cards."),
"flip": _(
"Nvidia FLIP. A perceptual loss measure that approximates the difference perceived by "
"humans as they alternate quickly (or flip) between two images. Used on its own and this "
"loss function creates a distinct grid on the output. However it can be helpful when "
"used as a complimentary loss function. Ref: FLIP: A Difference Evaluator for "
"Alternating Images: "
"https://research.nvidia.com/sites/default/files/node/3260/FLIP_Paper.pdf"),
"gmsd": _(
"Gradient Magnitude Similarity Deviation seeks to match the global standard deviation of "
"the pixel to pixel differences between two images. Similar in approach to SSIM. Ref: "
"Gradient Magnitude Similarity Deviation: An Highly Efficient Perceptual Image Quality "
"Index https://arxiv.org/ftp/arxiv/papers/1308/1308.3052.pdf"),
"l_inf_norm": _(
"The L_inf norm will reduce the largest individual pixel error in an image. As "
"each largest error is minimized sequentially, the overall error is improved. This loss "
"will be extremely focused on outliers."),
"laploss": _(
"Laplacian Pyramid Loss. Attempts to improve results by focussing on edges using "
"Laplacian Pyramids. As this loss function gives priority to edges over other low-"
"frequency information, like color, it should not be used on its own. The original "
"implementation uses this loss as a complimentary function to MSE. "
"Ref: Optimizing the Latent Space of Generative Networks "
"https://arxiv.org/abs/1707.05776"),
"lpips_alex": _(
"LPIPS is a perceptual loss that uses the feature outputs of other pretrained models as a "
"loss metric. Be aware that this loss function will use more VRAM. Used on its own and "
"this loss will create a distinct moire pattern on the output, however it can be helpful "
"as a complimentary loss function. The output of this function is strong, so depending "
"on your chosen primary loss function, you are unlikely going to want to set the weight "
"above about 25%. Ref: The Unreasonable Effectiveness of Deep Features as a Perceptual "
"Metric http://arxiv.org/abs/1801.03924\nThis variant uses the AlexNet backbone. A fairly "
"light and old model which performed best in the paper's original implementation.\nNB: "
"For AMD Users the final linear layer is not implemented."),
"lpips_squeeze": _(
"Same as lpips_alex, but using the SqueezeNet backbone. A more lightweight "
"version of AlexNet.\nNB: For AMD Users the final linear layer is not implemented."),
"lpips_vgg16": _(
"Same as lpips_alex, but using the VGG16 backbone. A more heavyweight model.\n"
"NB: For AMD Users the final linear layer is not implemented."),
"logcosh": _(
"log(cosh(x)) acts similar to MSE for small errors and to MAE for large errors. Like "
"MSE, it is very stable and prevents overshoots when errors are near zero. Like MAE, it "
"is robust to outliers."),
"mae": _(
"Mean absolute error will guide reconstructions of each pixel towards its median value in "
"the training dataset. Robust to outliers but as a median, it can potentially ignore some "
"infrequent image types in the dataset."),
"mse": _(
"Mean squared error will guide reconstructions of each pixel towards its average value in "
"the training dataset. As an avg, it will be susceptible to outliers and typically "
"produces slightly blurrier results. Ref: Multi-Scale Structural Similarity for Image "
"Quality Assessment https://www.cns.nyu.edu/pub/eero/wang03b.pdf"),
"ms_ssim": _(
"Multiscale Structural Similarity Index Metric is similar to SSIM except that it "
"performs the calculations along multiple scales of the input image."),
"smooth_loss": _(
"Smooth_L1 is a modification of the MAE loss to correct two of its disadvantages. "
"This loss has improved stability and guidance for small errors. Ref: A General and "
"Adaptive Robust Loss Function https://arxiv.org/pdf/1701.03077.pdf"),
"ssim": _(
"Structural Similarity Index Metric is a perception-based loss that considers changes in "
"texture, luminance, contrast, and local spatial statistics of an image. Potentially "
"delivers more realistic looking images. Ref: Image Quality Assessment: From Error "
"Visibility to Structural Similarity http://www.cns.nyu.edu/pub/eero/wang03-reprint.pdf"),
"pixel_gradient_diff": _(
"Instead of minimizing the difference between the absolute value of each "
"pixel in two reference images, compute the pixel to pixel spatial difference in each "
"image and then minimize that difference between two images. Allows for large color "
"shifts, but maintains the structure of the image."),
"none": _("Do not use an additional loss function.")}
_NON_PRIMARY_LOSS = ["flip", "lpips_alex", "lpips_squeeze", "lpips_vgg16", "none"]
@dataclass
class Loss(GlobalSection):
""" global.loss configuration section
Loss Documentation
MAE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
MSE https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
LogCosh https://heartbeat.fritz.ai/5-regression-loss-functions-all-machine-learners-should-know-4fb140e9d4b0
L_inf_norm https://medium.com/@montjoile/l0-norm-l1-norm-l2-norm-l-infinity-norm-7a7d18a4f40c
""" # pylint:disable=line-too-long # noqa[E501]
helptext = _(
"Loss configuration options\n"
"Loss is the mechanism by which a Neural Network judges how well it thinks that it "
"is recreating a face.") + _ADDITIONAL_INFO
loss_function = ConfigItem(
datatype=str,
default="ssim",
group=_("loss"),
info=(_("The loss function to use.") +
"\n\n\t" + "\n\n\t".join(f"{k}: {v}"
for k, v in sorted(_LOSS_HELP.items())
if k not in _NON_PRIMARY_LOSS)),
choices=[x for x in sorted(_LOSS_HELP) if x not in _NON_PRIMARY_LOSS],
fixed=False)
loss_function_2 = ConfigItem(
datatype=str,
default="mse",
group=_("loss"),
info=_(
"The second loss function to use. If using a structural based loss (such as "
"SSIM, MS-SSIM or GMSD) it is common to add an L1 regularization(MAE) or L2 "
"regularization (MSE) function. You can adjust the weighting of this loss "
"function with the loss_weight_2 option." +
"\n\n\t" + "\n\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items()))),
choices=list(sorted(_LOSS_HELP)),
fixed=False)
loss_weight_2 = ConfigItem(
datatype=int,
default=100,
group=_("loss"),
info=_(
"The amount of weight to apply to the second loss function.\n\n"
"\n\nThe value given here is as a percentage denoting how much the selected "
"function should contribute to the overall loss cost of the model. For "
"example:"
"\n\t 100 - The loss calculated for the second loss function will be applied "
"at its full amount towards the overall loss score. "
"\n\t 25 - The loss calculated for the second loss function will be reduced "
"by a quarter prior to adding to the overall loss score. "
"\n\t 400 - The loss calculated for the second loss function will be "
"mulitplied 4 times prior to adding to the overall loss score. "
"\n\t 0 - Disables the second loss function altogether."),
min_max=(0, 400),
rounding=1,
fixed=False)
loss_function_3 = ConfigItem(
datatype=str,
default="none",
group=_("loss"),
info=_("The third loss function to use. You can adjust the weighting of this loss "
"function with the loss_weight_3 option." +
"\n\n\t" +
"\n\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items()))),
choices=list(sorted(_LOSS_HELP)),
fixed=False)
loss_weight_3 = ConfigItem(
datatype=int,
default=0,
group=_("loss"),
info=_(
"The amount of weight to apply to the third loss function.\n\n"
"\n\nThe value given here is as a percentage denoting how much the selected "
"function should contribute to the overall loss cost of the model. For "
"example:"
"\n\t 100 - The loss calculated for the third loss function will be applied "
"at its full amount towards the overall loss score. "
"\n\t 25 - The loss calculated for the third loss function will be reduced "
"by a quarter prior to adding to the overall loss score. "
"\n\t 400 - The loss calculated for the third loss function will be "
"mulitplied 4 times prior to adding to the overall loss score. "
"\n\t 0 - Disables the third loss function altogether."),
min_max=(0, 400),
rounding=1,
fixed=False)
loss_function_4 = ConfigItem(
datatype=str,
default="none",
group=_("loss"),
info=_(
"The fourth loss function to use. You can adjust the weighting of this "
"loss function with the loss_weight_3 option." +
"\n\n\t" +
"\n\n\t".join(f"{k}: {v}" for k, v in sorted(_LOSS_HELP.items()))),
choices=list(sorted(_LOSS_HELP)),
fixed=False)
loss_weight_4 = ConfigItem(
datatype=int,
default=0,
group=_("loss"),
info=_(
"The amount of weight to apply to the fourth loss function.\n\n"
"\n\nThe value given here is as a percentage denoting how much the selected "
"function should contribute to the overall loss cost of the model. For "
"example:"
"\n\t 100 - The loss calculated for the fourth loss function will be applied "
"at its full amount towards the overall loss score. "
"\n\t 25 - The loss calculated for the fourth loss function will be reduced "
"by a quarter prior to adding to the overall loss score. "
"\n\t 400 - The loss calculated for the fourth loss function will be "
"mulitplied 4 times prior to adding to the overall loss score. "
"\n\t 0 - Disables the fourth loss function altogether."),
min_max=(0, 400),
rounding=1,
fixed=False)
mask_loss_function = ConfigItem(
datatype=str,
default="mse",
group=_("loss"),
info=_(
"The loss function to use when learning a mask."
"\n\t MAE - Mean absolute error will guide reconstructions of each pixel "
"towards its median value in the training dataset. Robust to outliers but as "
"a median, it can potentially ignore some infrequent image types in the "
"dataset."
"\n\t MSE - Mean squared error will guide reconstructions of each pixel "
"towards its average value in the training dataset. As an average, it will be "
"susceptible to outliers and typically produces slightly blurrier results."),
choices=["mae", "mse"],
fixed=False)
eye_multiplier = ConfigItem(
datatype=int,
default=3,
group=_("loss"),
info=_(
"The amount of priority to give to the eyes.\n\nThe value given here is as a "
"multiplier of the main loss score. For example:"
"\n\t 1 - The eyes will receive the same priority as the rest of the face. "
"\n\t 10 - The eyes will be given a score 10 times higher than the rest of "
"the face."
"\n\nNB: Penalized Mask Loss must be enable to use this option."),
min_max=(1, 40),
rounding=1,
fixed=False)
mouth_multiplier = ConfigItem(
datatype=int,
default=2,
group=_("loss"),
info=_(
"The amount of priority to give to the mouth.\n\nThe value given here is as a "
"multiplier of the main loss score. For Example:"
"\n\t 1 - The mouth will receive the same priority as the rest of the face. "
"\n\t 10 - The mouth will be given a score 10 times higher than the rest of "
"the face."
"\n\nNB: Penalized Mask Loss must be enable to use this option."),
min_max=(1, 40),
rounding=1,
fixed=False)
penalized_mask_loss = ConfigItem(
datatype=bool,
default=True,
group=_("loss"),
info=_(
"Image loss function is weighted by mask presence. For areas of "
"the image without the facial mask, reconstruction errors will be "
"ignored while the masked face area is prioritized. May increase "
"overall quality by focusing attention on the core face area."))
mask_type = ConfigItem(
datatype=str,
default="extended",
group=_("mask"),
info=_(
"The mask to be used for training. If you have selected 'Learn Mask' or "
"'Penalized Mask Loss' you must select a value other than 'none'. The "
"required mask should have been selected as part of the Extract process. If "
"it does not exist in the alignments file then it will be generated prior to "
"training commencing."
"\n\tnone: Don't use a mask."
"\n\tbisenet-fp_face: Relatively lightweight NN based mask that provides more "
"refined control over the area to be masked (configurable in mask settings). "
"Use this version of bisenet-fp if your model is trained with 'face' or "
"'legacy' centering."
"\n\tbisenet-fp_head: Relatively lightweight NN based mask that provides more "
"refined control over the area to be masked (configurable in mask settings). "
"Use this version of bisenet-fp if your model is trained with 'head' "
"centering."
"\n\tcomponents: Mask designed to provide facial segmentation based on the "
"positioning of landmark locations. A convex hull is constructed around the "
"exterior of the landmarks to create a mask."
"\n\tcustom_face: Custom user created, face centered mask."
"\n\tcustom_head: Custom user created, head centered mask."
"\n\textended: Mask designed to provide facial segmentation based on the "
"positioning of landmark locations. A convex hull is constructed around the "
"exterior of the landmarks and the mask is extended upwards onto the forehead."
"\n\tvgg-clear: Mask designed to provide smart segmentation of mostly frontal "
"faces clear of obstructions. Profile faces and obstructions may result in "
"sub-par performance."
"\n\tvgg-obstructed: Mask designed to provide smart segmentation of mostly "
"frontal faces. The mask model has been specifically trained to recognize "
"some facial obstructions (hands and eyeglasses). Profile faces may result in "
"sub-par performance."
"\n\tunet-dfl: Mask designed to provide smart segmentation of mostly frontal "
"faces. The mask model has been trained by community members and will need "
"testing for further description. Profile faces may result in sub-par "
"performance."),
choices=PluginLoader.get_available_extractors("mask",
add_none=True, extend_plugin=True),
gui_radio=True)
mask_dilation = ConfigItem(
datatype=float,
default=0.0,
group=_("mask"),
info=_(
"Dilate or erode the mask. Negative values erode the mask (make it smaller). "
"Positive values dilate the mask (make it larger). The value given is a "
"percentage of the total mask size."),
min_max=(-5.0, 5.0),
rounding=1,
fixed=False)
mask_blur_kernel = ConfigItem(
datatype=int,
default=3,
group=_("mask"),
info=_(
"Apply gaussian blur to the mask input. This has the effect of smoothing the "
"edges of the mask, which can help with poorly calculated masks and give less "
"of a hard edge to the predicted mask. The size is in pixels (calculated from "
"a 128px mask). Set to 0 to not apply gaussian blur. This value should be "
"odd, if an even number is passed in then it will be rounded to the next odd "
"number."),
min_max=(0, 9),
rounding=1,
fixed=False)
mask_threshold = ConfigItem(
datatype=int,
default=4,
group=_("mask"),
info=_(
"Sets pixels that are near white to white and near black to black. Set to 0 "
"for off."),
min_max=(0, 50),
rounding=1,
fixed=False)
learn_mask = ConfigItem(
datatype=bool,
default=False,
group=_("mask"),
info=_(
"Dedicate a portion of the model to learning how to duplicate the input "
"mask. Increases VRAM usage in exchange for learning a quick ability to try "
"to replicate more complex mask models."))
@dataclass
class Optimizer(GlobalSection):
""" global.optimizer configuration section """
helptext = (_("Optimizer configuration options\n"
"The optimizer applies the output of the loss function to the model.\n")
+ _ADDITIONAL_INFO)
optimizer = ConfigItem(
datatype=str,
default="adam",
group=_("optimizer"),
info=_(
"The optimizer to use."
"\n\t adabelief - Adapting Stepsizes by the Belief in Observed Gradients. An "
"optimizer with the aim to converge faster, generalize better and remain more "
"stable. (https://arxiv.org/abs/2010.07468). NB: Epsilon for AdaBelief needs "
"to be set to a smaller value than other Optimizers. Generally setting the "
"'Epsilon Exponent' to around '-16' should work."
"\n\t adam - Adaptive Moment Optimization. A stochastic gradient descent "
"method that is based on adaptive estimation of first-order and second-order "
"moments."
"\n\t adamax - a variant of Adam based on the infinity norm. Due to its "
"capability of adjusting the learning rate based on data characteristics, it "
"is suited to learn time-variant process, "
"parameters follow those provided in the paper"
"\n\t adamw - Like 'adam' but with an added method to decay weights per the "
"techniques discussed in the paper (https://arxiv.org/abs/1711.05101). NB: "
"Weight decay should be set at 0.004 for default implementation."
"\n\t lion - A method that uses the sign operator to control the magnitude of "
"the update, rather than relying on second-order moments (Adam). saves VRAM "
"by only tracking the momentum. Performance gains should be better with "
"larger batch sizes. A suitable learning rate for Lion is typically 3-10x "
"smaller than that for AdamW. The weight decay for Lion should be 3-10x "
"larger than that for AdamW to maintain a similar strength."
"\n\t nadam - Adaptive Moment Optimization with Nesterov Momentum. Much like "
"Adam but uses a different formula for calculating momentum."
"\n\t rms-prop - Root Mean Square Propagation. Maintains a moving "
"(discounted) average of the square of the gradients. Divides the gradient by "
"the root of this average."),
choices=["adabelief", "adam", "adamax", "adamw", "lion", "nadam", "rms-prop"],
gui_radio=True,
fixed=True)
learning_rate = ConfigItem(
datatype=float,
default=5e-5,
group=_("optimizer"),
info=_(
"Learning rate - how fast your network will learn (how large are the "
"modifications to the model weights after one batch of training). Values that "
"are too large might result in model crashes and the inability of the model "
"to find the best solution. Values that are too small might be unable to "
"escape from dead-ends and find the best global minimum."),
min_max=(1e-6, 1e-4),
rounding=6,
fixed=False)
epsilon_exponent = ConfigItem(
datatype=int,
default=-7,
group=_("optimizer"),
info=_(
"The epsilon adds a small constant to weight updates to attempt to avoid "
"'divide by zero' errors. Unless you are using the AdaBelief Optimizer, then "
"Generally this option should be left at default value, For AdaBelief, "
"setting this to around '-16' should work.\n"
"In all instances if you are getting 'NaN' loss values, and have been unable "
"to resolve the issue any other way (for example, increasing batch size, or "
"lowering learning rate), then raising the epsilon can lead to a more stable "
"model. It may, however, come at the cost of slower training and a less "
"accurate final result.\n"
"Note: The value given here is the 'exponent' to the epsilon. For example, "
"choosing '-7' will set the epsilon to 1e-7. Choosing '-3' will set the "
"epsilon to 0.001 (1e-3).\n"
"Note: Not used by the Lion optimizer"),
min_max=(-20, 0),
rounding=1,
fixed=False)
save_optimizer = ConfigItem(
datatype=str,
default="exit",
group=_("optimizer"),
info=_(
"When to save the Optimizer Weights. Saving the optimizer weights is not "
"necessary and will increase the model file size 3x (and by extension the "
"amount of time it takes to save the model). However, it can be useful to "
"save these weights if you want to guarantee that a resumed model carries off "
"exactly from where it left off, rather than spending a few hundred "
"iterations catching up."
"\n\t never - Don't save optimizer weights."
"\n\t always - Save the optimizer weights at every save iteration. Model "
"saving will take longer, due to the increased file size, but you will always "
"have the last saved optimizer state in your model file."
"\n\t exit - Only save the optimizer weights when explicitly terminating a "
"model. This can be when the model is actively stopped or when the target "
"iterations are met. Note: If the training session ends because of another "
"reason (e.g. power outage, Out of Memory Error, NaN detected) then the "
"optimizer weights will NOT be saved."),
gui_radio=True,
choices=["never", "always", "exit"],
fixed=False)
gradient_clipping = ConfigItem(
datatype=str,
default="none",
group=_("clipping"),
info=_(
"Apply clipping to the gradients. Can help prevent NaNs and improve model "
"optimization at the expense of VRAM."
"\n\tautoclip: Analyzes the gradient weights and adjusts the normalization "
"value dynamically to fit the data"
"\n\tglobal_norm: Clips the gradient of each weight so that the global norm "
"is no higher than the given value."
"\n\tnorm: Clips the gradient of each weight so that its norm is no higher "
"than the given value."
"\n\tvalue: Clips the gradient of each weight so that it is no higher than "
"the given value."
"\n\tnone: Don't perform any clipping to the gradients."),
choices=["autoclip", "global_norm", "norm", "value", "none"],
gui_radio=True,
fixed=False)
clipping_value = ConfigItem(
datatype=float,
default=1.0,
group=_("clipping"),
info=_(
"The amount of clipping to perform."
"\n\tautoclip: The percentile to clip at. A value of 1.0 will clip at the "
"10th percentile a value of 2.5 will clip at the 25th percentile etc. "
"Default: 1.0"
"\n\tglobal_norm: The gradient of each weight is clipped so that the global "
"norm is no higher than this value."
"\n\tnorm: The gradient of each weight is clipped so that its norm is no "
"higher than this value."
"\n\tvalue: The gradient of each weight is clipped to be no higher than this "
"value."
"\n\tnone: This option is ignored."),
min_max=(0.0, 10.0),
rounding=1,
fixed=False)
autoclip_history = ConfigItem(
datatype=int,
default=10000,
group=_("clipping"),
info=_(
"The maximum number of prior iterations for autoclipper to analyze when "
"calculating the normalization amount. 0 to always include all prior "
"iterations."),
min_max=(0, 100000),
rounding=1000,
fixed=False)
weight_decay = ConfigItem(
datatype=float,
default=0.0,
group=_("updates"),
info=_("If set, weight decay is applied. 0.0 for no weight decay. Default is 0.0 "
"for all optimizers except AdamW (0.004)"),
min_max=(0.0, 1.0),
rounding=4,
fixed=False)
gradient_accumulation = ConfigItem(
datatype=int,
default=1,
group=_("updates"),
info=_(
"Values above 1 will enable Gradient Accumulation. Updates will not be at "
"every iteration; instead they will occur every number of iterations given "
"here. The update will be the average value of the gradients since the last "
"update. Can be useful when your batch size is very small, in order to reduce "
"gradient noise at each update iteration."),
min_max=(1, 100),
rounding=1,
fixed=False)
use_ema = ConfigItem(
datatype=bool,
default=False,
group=_("exponential moving average"),
info=_(
"Enable exponential moving average (EMA). EMA consists of computing an "
"exponential moving average of the weights of the model (as the weight values "
"change after each training batch), and periodically overwriting the weights "
"with their moving average"),
fixed=True)
ema_momentum = ConfigItem(
datatype=float,
default=0.99,
group=_("exponential moving average"),
info=_(
"Only used if use_ema is enabled. This is the momentum to use when computing "
"the EMA of the model's weights: new_average = ema_momentum * old_average + "
"(1 - ema_momentum) * current_variable_value."),
min_max=(0.0, 1.0),
rounding=4,
fixed=True)
ema_frequency = ConfigItem(
datatype=int,
default=100,
group=_("exponential moving average"),
info=_(
"Only used if use_ema is enabled. Set the number of iterations, to overwrite "
"the model variable by its moving average. "),
min_max=(10, 10000),
rounding=10,
fixed=True)
ada_beta_1 = ConfigItem(
datatype=float,
default=0.9,
group=_("optimizer specific"),
info=_(
"The exponential decay rate for the 1st moment estimates. Used for the "
"following Optimizers: AdaBelief, Adam, Adamax, AdamW, Lion, nAdam. Ignored "
"for all others."),
min_max=(0.0, 1.0),
rounding=4,
fixed=True)
ada_beta_2 = ConfigItem(
datatype=float,
default=0.999,
group=_("optimizer specific"),
info=_(
"The exponential decay rate for the 2nd moment estimates. Used for the "
"following Optimizers: AdaBelief, Adam, Adamax, AdamW, Lion, nAdam. Ignored "
"for all others."),
min_max=(0.0, 1.0),
rounding=4,
fixed=True)
ada_amsgrad = ConfigItem(
datatype=bool,
default=False,
group=_("optimizer specific"),
info=_(
"Whether to apply AMSGrad variant of the algorithm from the paper 'On the "
"Convergence of Adam and beyond. Used for the following Optimizers: "
"AdaBelief, Adam, AdamW. Ignored for all others.'"),
fixed=True)
# pylint:disable=duplicate-code
_IS_LOADED: bool = False
def load_config(config_file: str | None = None) -> None:
""" Load the Train configuration .ini file
Parameters
----------
config_file : str | None, optional
Path to a custom .ini configuration file to load. Default: ``None`` (use default
configuration file)
"""
global _IS_LOADED # pylint:disable=global-statement
if not _IS_LOADED:
_Config(configfile=config_file)
_IS_LOADED = True
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/train_config.py",
"license": "GNU General Public License v3.0",
"lines": 758,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:plugins/train/trainer/_display.py | #!/usr/bin/env python3
""" Handles the creation of display images for preview window and timelapses """
from __future__ import annotations
import logging
import time
import typing as T
import os
import cv2
import numpy as np
import torch
from lib.image import hex_to_rgb
from lib.utils import get_folder, get_image_paths, get_module_objects
from plugins.train import train_config as cfg
if T.TYPE_CHECKING:
from keras import KerasTensor
from lib.training import Feeder
from plugins.train.model._base import ModelBase
logger = logging.getLogger(__name__)
class Samples():
""" Compile samples for display for preview and time-lapse
Parameters
----------
model: plugin from :mod:`plugins.train.model`
The selected model that will be running this trainer
coverage_ratio: float
Ratio of face to be cropped out of the training image.
mask_opacity: int
The opacity (as a percentage) to use for the mask overlay
mask_color: str
The hex RGB value to use the mask overlay
Attributes
----------
images: dict
The :class:`numpy.ndarray` training images for generating previews on each side. The
dictionary should contain 2 keys ("a" and "b") with the values being the training images
for generating samples corresponding to each side.
"""
def __init__(self,
model: ModelBase,
coverage_ratio: float,
mask_opacity: int,
mask_color: str) -> None:
logger.debug("Initializing %s: model: '%s', coverage_ratio: %s, mask_opacity: %s, "
"mask_color: %s)",
self.__class__.__name__, model, coverage_ratio, mask_opacity, mask_color)
self._model = model
self._display_mask = cfg.Loss.learn_mask() or cfg.Loss.penalized_mask_loss()
self.images: dict[T.Literal["a", "b"], list[np.ndarray]] = {}
self._coverage_ratio = coverage_ratio
self._mask_opacity = mask_opacity / 100.0
self._mask_color = np.array(hex_to_rgb(mask_color))[..., 2::-1] / 255.
logger.debug("Initialized %s", self.__class__.__name__)
def toggle_mask_display(self) -> None:
""" Toggle the mask overlay on or off depending on user input. """
if not (cfg.Loss.learn_mask() or cfg.Loss.penalized_mask_loss()):
return
display_mask = not self._display_mask
print("\x1b[2K", end="\r") # Clear last line
logger.info("Toggling mask display %s...", "on" if display_mask else "off")
self._display_mask = display_mask
def show_sample(self) -> np.ndarray:
""" Compile a preview image.
Returns
-------
:class:`numpy.ndarry`
A compiled preview image ready for display or saving
"""
logger.debug("Showing sample")
feeds: dict[T.Literal["a", "b"], np.ndarray] = {}
for idx, side in enumerate(T.get_args(T.Literal["a", "b"])):
feed = self.images[side][0]
input_shape = self._model.model.input_shape[idx][1:]
if input_shape[0] / feed.shape[1] != 1.0:
feeds[side] = self._resize_sample(side, feed, input_shape[0])
else:
feeds[side] = feed
preds = self._get_predictions(feeds["a"], feeds["b"])
return self._compile_preview(preds)
@classmethod
def _resize_sample(cls,
side: T.Literal["a", "b"],
sample: np.ndarray,
target_size: int) -> np.ndarray:
""" Resize a given image to the target size.
Parameters
----------
side: str
The side ("a" or "b") that the samples are being generated for
sample: :class:`numpy.ndarray`
The sample to be resized
target_size: int
The size that the sample should be resized to
Returns
-------
:class:`numpy.ndarray`
The sample resized to the target size
"""
scale = target_size / sample.shape[1]
if scale == 1.0:
# cv2 complains if we don't do this :/
return np.ascontiguousarray(sample)
logger.debug("Resizing sample: (side: '%s', sample.shape: %s, target_size: %s, scale: %s)",
side, sample.shape, target_size, scale)
interpn = cv2.INTER_CUBIC if scale > 1.0 else cv2.INTER_AREA
retval = np.array([cv2.resize(img, (target_size, target_size), interpolation=interpn)
for img in sample])
logger.debug("Resized sample: (side: '%s' shape: %s)", side, retval.shape)
return retval
def _filter_multiscale_output(self, standard: list[KerasTensor], swapped: list[KerasTensor]
) -> tuple[list[KerasTensor], list[KerasTensor]]:
""" Only return the largest predictions if the model has multi-scaled output
Parameters
----------
standard: list[:class:`keras.KerasTensor`]
The standard output from the model
swapped: list[:class:`keras.KerasTensor`]
The swapped output from the model
Returns
-------
standard: list[:class:`keras.KerasTensor`]
The standard output from the model, filtered to just the largest output
swapped: list[:class:`keras.KerasTensor`]
The swapped output from the model, filtered to just the largest output
"""
sizes = T.cast(set[int], set(p.shape[1] for p in standard))
if len(sizes) == 1:
return standard, swapped
logger.debug("Received outputs. standard: %s, swapped: %s",
[s.shape for s in standard], [s.shape for s in swapped])
logger.debug("Stripping multi-scale outputs for sizes %s", sizes)
standard = [s for s in standard if s.shape[1] == max(sizes)]
swapped = [s for s in swapped if s.shape[1] == max(sizes)]
logger.debug("Stripped outputs. standard: %s, swapped: %s",
[s.shape for s in standard], [s.shape for s in swapped])
return standard, swapped
def _collate_output(self, standard: list[torch.Tensor], swapped: list[torch.Tensor]
) -> tuple[list[np.ndarray], list[np.ndarray]]:
""" Merge the mask onto the preview image's 4th channel if learn mask is selected.
Return as numpy array
Parameters
----------
standard: list[:class:`torch.Tensor`]
The standard output from the model
swapped: list[:class:`torch.Tensor`]
The swapped output from the model
Returns
-------
standard: list[:class:`numpy.ndarray`]
The standard output from the model, with mask merged
swapped: list[:class:`numpy.ndarray`]
The swapped output from the model, with mask merged
"""
logger.debug("Received tensors. standard: %s, swapped: %s",
[s.shape for s in standard], [s.shape for s in swapped])
# Pull down outputs
nstandard = [p.cpu().detach().numpy() for p in standard]
nswapped = [p.cpu().detach().numpy() for p in swapped]
if cfg.Loss.learn_mask(): # Add mask to 4th channel of final output
nstandard = [np.concatenate(nstandard[idx * 2: (idx * 2) + 2], axis=-1)
for idx in range(2)]
nswapped = [np.concatenate(nswapped[idx * 2: (idx * 2) + 2], axis=-1)
for idx in range(2)]
logger.debug("Collated output. standard: %s, swapped: %s",
[(s.shape, s.dtype) for s in nstandard],
[(s.shape, s.dtype) for s in nswapped])
return nstandard, nswapped
def _get_predictions(self, feed_a: np.ndarray, feed_b: np.ndarray
) -> dict[T.Literal["a_a", "a_b", "b_b", "b_a"], np.ndarray]:
""" Feed the samples to the model and return predictions
Parameters
----------
feed_a: :class:`numpy.ndarray`
Feed images for the "a" side
feed_a: :class:`numpy.ndarray`
Feed images for the "b" side
Returns
-------
list:
List of :class:`numpy.ndarray` of predictions received from the model
"""
logger.debug("Getting Predictions")
preds: dict[T.Literal["a_a", "a_b", "b_b", "b_a"], np.ndarray] = {}
with torch.inference_mode():
standard = self._model.model([feed_a, feed_b])
swapped = self._model.model([feed_b, feed_a])
standard, swapped = self._filter_multiscale_output(standard, swapped)
standard, swapped = self._collate_output(standard, swapped)
preds["a_a"] = standard[0]
preds["b_b"] = standard[1]
preds["a_b"] = swapped[0]
preds["b_a"] = swapped[1]
logger.debug("Returning predictions: %s", {key: val.shape for key, val in preds.items()})
return preds
def _compile_preview(self, predictions: dict[T.Literal["a_a", "a_b", "b_b", "b_a"], np.ndarray]
) -> np.ndarray:
""" Compile predictions and images into the final preview image.
Parameters
----------
predictions: dict[Literal["a_a", "a_b", "b_b", "b_a"], np.ndarray
The predictions from the model
Returns
-------
:class:`numpy.ndarry`
A compiled preview image ready for display or saving
"""
figures: dict[T.Literal["a", "b"], np.ndarray] = {}
headers: dict[T.Literal["a", "b"], np.ndarray] = {}
for side, samples in self.images.items():
other_side = "a" if side == "b" else "b"
preds = [predictions[T.cast(T.Literal["a_a", "a_b", "b_b", "b_a"],
f"{side}_{side}")],
predictions[T.cast(T.Literal["a_a", "a_b", "b_b", "b_a"],
f"{other_side}_{side}")]]
display = self._to_full_frame(side, samples, preds)
headers[side] = self._get_headers(side, display[0].shape[1])
figures[side] = np.stack([display[0], display[1], display[2], ], axis=1)
if self.images[side][1].shape[0] % 2 == 1:
figures[side] = np.concatenate([figures[side],
np.expand_dims(figures[side][0], 0)])
width = 4
if width // 2 != 1:
headers = self._duplicate_headers(headers, width // 2)
header = np.concatenate([headers["a"], headers["b"]], axis=1)
figure = np.concatenate([figures["a"], figures["b"]], axis=0)
height = int(figure.shape[0] / width)
figure = figure.reshape((width, height) + figure.shape[1:])
figure = _stack_images(figure)
figure = np.concatenate((header, figure), axis=0)
logger.debug("Compiled sample")
return np.clip(figure * 255, 0, 255).astype('uint8')
def _to_full_frame(self,
side: T.Literal["a", "b"],
samples: list[np.ndarray],
predictions: list[np.ndarray]) -> list[np.ndarray]:
""" Patch targets and prediction images into images of model output size.
Parameters
----------
side: {"a" or "b"}
The side that these samples are for
samples: list
List of :class:`numpy.ndarray` of feed images and sample images
predictions: list
List of :class: `numpy.ndarray` of predictions from the model
Returns
-------
list
The images resized and collated for display in the preview frame
"""
logger.debug("side: '%s', number of sample arrays: %s, prediction.shapes: %s)",
side, len(samples), [pred.shape for pred in predictions])
faces, full = samples[:2]
if self._model.color_order.lower() == "rgb": # Switch color order for RGB model display
full = full[..., ::-1]
faces = faces[..., ::-1]
predictions = [pred[..., 2::-1] for pred in predictions]
full = self._process_full(side, full, predictions[0].shape[1], (0., 0., 1.0))
images = [faces] + predictions
if self._display_mask:
images = self._compile_masked(images, samples[-1])
elif cfg.Loss.learn_mask():
# Remove masks when learn mask is selected but mask toggle is off
images = [batch[..., :3] for batch in images]
images = [self._overlay_foreground(full.copy(), image) for image in images]
return images
def _process_full(self,
side: T.Literal["a", "b"],
images: np.ndarray,
prediction_size: int,
color: tuple[float, float, float]) -> np.ndarray:
""" Add a frame overlay to preview images indicating the region of interest.
This applies the red border that appears in the preview images.
Parameters
----------
side: {"a" or "b"}
The side that these samples are for
images: :class:`numpy.ndarray`
The input training images to to process
prediction_size: int
The size of the predicted output from the model
color: tuple
The (Blue, Green, Red) color to use for the frame
Returns
-------
:class:`numpy,ndarray`
The input training images, sized for output and annotated for coverage
"""
logger.debug("full_size: %s, prediction_size: %s, color: %s",
images.shape[1], prediction_size, color)
display_size = int((prediction_size / self._coverage_ratio // 2) * 2)
images = self._resize_sample(side, images, display_size) # Resize targets to display size
padding = (display_size - prediction_size) // 2
if padding == 0:
logger.debug("Resized background. Shape: %s", images.shape)
return images
length = display_size // 4
t_l, b_r = (padding - 1, display_size - padding)
for img in images:
cv2.rectangle(img, (t_l, t_l), (t_l + length, t_l + length), color, 1)
cv2.rectangle(img, (b_r, t_l), (b_r - length, t_l + length), color, 1)
cv2.rectangle(img, (b_r, b_r), (b_r - length, b_r - length), color, 1)
cv2.rectangle(img, (t_l, b_r), (t_l + length, b_r - length), color, 1)
logger.debug("Overlayed background. Shape: %s", images.shape)
return images
def _compile_masked(self, faces: list[np.ndarray], masks: np.ndarray) -> list[np.ndarray]:
""" Add the mask to the faces for masked preview.
Places an opaque red layer over areas of the face that are masked out.
Parameters
----------
faces: list
The :class:`numpy.ndarray` sample faces and predictions that are to have the mask
applied
masks: :class:`numpy.ndarray`
The masks that are to be applied to the faces
Returns
-------
list
List of :class:`numpy.ndarray` faces with the opaque mask layer applied
"""
orig_masks = 1. - masks
masks3: list[np.ndarray] | np.ndarray = []
if faces[-1].shape[-1] == 4: # Mask contained in alpha channel of predictions
pred_masks = [1. - face[..., -1][..., None] for face in faces[-2:]]
faces[-2:] = [face[..., :-1] for face in faces[-2:]]
masks3 = [orig_masks, *pred_masks]
else:
masks3 = np.repeat(np.expand_dims(orig_masks, axis=0), 3, axis=0)
retval: list[np.ndarray] = []
overlays3 = np.ones_like(faces) * self._mask_color
for previews, overlays, compiled_masks in zip(faces, overlays3, masks3):
compiled_masks *= self._mask_opacity
overlays *= compiled_masks
previews *= (1. - compiled_masks)
retval.append(previews + overlays)
logger.debug("masked shapes: %s", [faces.shape for faces in retval])
return retval
@classmethod
def _overlay_foreground(cls, backgrounds: np.ndarray, foregrounds: np.ndarray) -> np.ndarray:
""" Overlay the preview images into the center of the background images
Parameters
----------
backgrounds: :class:`numpy.ndarray`
Background images for placing the preview images onto
backgrounds: :class:`numpy.ndarray`
Preview images for placing onto the background images
Returns
-------
:class:`numpy.ndarray`
The preview images compiled into the full frame size for each preview
"""
offset = (backgrounds.shape[1] - foregrounds.shape[1]) // 2
for foreground, background in zip(foregrounds, backgrounds):
background[offset:offset + foreground.shape[0],
offset:offset + foreground.shape[1], :3] = foreground
logger.debug("Overlayed foreground. Shape: %s", backgrounds.shape)
return backgrounds
@classmethod
def _get_headers(cls, side: T.Literal["a", "b"], width: int) -> np.ndarray:
""" Set header row for the final preview frame
Parameters
----------
side: {"a" or "b"}
The side that the headers should be generated for
width: int
The width of each column in the preview frame
Returns
-------
:class:`numpy.ndarray`
The column headings for the given side
"""
logger.debug("side: '%s', width: %s",
side, width)
titles = ("Original", "Swap") if side == "a" else ("Swap", "Original")
height = int(width / 4.5)
total_width = width * 3
logger.debug("height: %s, total_width: %s", height, total_width)
font = cv2.FONT_HERSHEY_SIMPLEX
texts = [f"{titles[0]} ({side.upper()})",
f"{titles[0]} > {titles[0]}",
f"{titles[0]} > {titles[1]}"]
scaling = (width / 144) * 0.45
text_sizes = [cv2.getTextSize(texts[idx], font, scaling, 1)[0]
for idx in range(len(texts))]
text_y = int((height + text_sizes[0][1]) / 2)
text_x = [int((width - text_sizes[idx][0]) / 2) + width * idx
for idx in range(len(texts))]
logger.debug("texts: %s, text_sizes: %s, text_x: %s, text_y: %s",
texts, text_sizes, text_x, text_y)
header_box = np.ones((height, total_width, 3), np.float32)
for idx, text in enumerate(texts):
cv2.putText(header_box,
text,
(text_x[idx], text_y),
font,
scaling,
(0, 0, 0),
1,
lineType=cv2.LINE_AA)
logger.debug("header_box.shape: %s", header_box.shape)
return header_box
@classmethod
def _duplicate_headers(cls,
headers: dict[T.Literal["a", "b"], np.ndarray],
columns: int) -> dict[T.Literal["a", "b"], np.ndarray]:
""" Duplicate headers for the number of columns displayed for each side.
Parameters
----------
headers: dict
The headers to be duplicated for each side
columns: int
The number of columns that the header needs to be duplicated for
Returns
-------
:class:dict
The original headers duplicated by the number of columns for each side
"""
for side, header in headers.items():
duped = tuple(header for _ in range(columns))
headers[side] = np.concatenate(duped, axis=1)
logger.debug("side: %s header.shape: %s", side, header.shape)
return headers
class Timelapse():
""" Create a time-lapse preview image.
Parameters
----------
model: plugin from :mod:`plugins.train.model`
The selected model that will be running this trainer
coverage_ratio: float
Ratio of face to be cropped out of the training image.
image_count: int
The number of preview images to be displayed in the time-lapse
mask_opacity: int
The opacity (as a percentage) to use for the mask overlay
mask_color: str
The hex RGB value to use the mask overlay
feeder: :class:`~lib.training.generator.Feeder`
The feeder for generating the time-lapse images.
image_paths: dict
The full paths to the training images for each side of the model
"""
def __init__(self,
model: ModelBase,
coverage_ratio: float,
image_count: int,
mask_opacity: int,
mask_color: str,
feeder: Feeder,
image_paths: dict[T.Literal["a", "b"], list[str]]) -> None:
logger.debug("Initializing %s: model: %s, coverage_ratio: %s, image_count: %s, "
"mask_opacity: %s, mask_color: %s, feeder: %s, image_paths: %s)",
self.__class__.__name__, model, coverage_ratio, image_count, mask_opacity,
mask_color, feeder, len(image_paths))
self._num_images = image_count
self._samples = Samples(model, coverage_ratio, mask_opacity, mask_color)
self._model = model
self._feeder = feeder
self._image_paths = image_paths
self._output_file = ""
logger.debug("Initialized %s", self.__class__.__name__)
def _setup(self, input_a: str, input_b: str, output: str) -> None:
""" Setup the time-lapse folder locations and the time-lapse feed.
Parameters
----------
input_a: str
The full path to the time-lapse input folder containing faces for the "a" side
input_b: str
The full path to the time-lapse input folder containing faces for the "b" side
output: str, optional
The full path to the time-lapse output folder. If ``None`` is provided this will
default to the model folder
"""
logger.debug("Setting up time-lapse")
if not output:
output = get_folder(os.path.join(str(self._model.io.model_dir),
f"{self._model.name}_timelapse"))
self._output_file = output
logger.debug("Time-lapse output set to '%s'", self._output_file)
# Rewrite paths to pull from the training images so mask and face data can be accessed
images: dict[T.Literal["a", "b"], list[str]] = {}
for side, input_ in zip(T.get_args(T.Literal["a", "b"]), (input_a, input_b)):
training_path = os.path.dirname(self._image_paths[side][0])
images[side] = [os.path.join(training_path, os.path.basename(pth))
for pth in get_image_paths(input_)]
batchsize = min(len(images["a"]),
len(images["b"]),
self._num_images)
self._feeder.set_timelapse_feed(images, batchsize)
logger.debug("Set up time-lapse")
def output_timelapse(self, timelapse_kwargs: dict[T.Literal["input_a",
"input_b",
"output"], str]) -> None:
""" Generate the time-lapse samples and output the created time-lapse to the specified
output folder.
Parameters
----------
timelapse_kwargs: dict:
The keyword arguments for setting up the time-lapse. All values should be full paths
the keys being `input_a`, `input_b`, `output`
"""
logger.debug("Ouputting time-lapse")
if not self._output_file:
self._setup(**T.cast(dict[str, str], timelapse_kwargs))
logger.debug("Getting time-lapse samples")
self._samples.images = self._feeder.generate_preview(is_timelapse=True)
logger.debug("Got time-lapse samples: %s",
{side: len(images) for side, images in self._samples.images.items()})
image = self._samples.show_sample()
if image is None:
return
filename = os.path.join(self._output_file, str(int(time.time())) + ".jpg")
cv2.imwrite(filename, image)
logger.debug("Created time-lapse: '%s'", filename)
def _stack_images(images: np.ndarray) -> np.ndarray:
""" Stack images evenly for preview.
Parameters
----------
images: :class:`numpy.ndarray`
The preview images to be stacked
Returns
-------
:class:`numpy.ndarray`
The stacked preview images
"""
logger.debug("Stack images")
def get_transpose_axes(num):
if num % 2 == 0:
logger.debug("Even number of images to stack")
y_axes = list(range(1, num - 1, 2))
x_axes = list(range(0, num - 1, 2))
else:
logger.debug("Odd number of images to stack")
y_axes = list(range(0, num - 1, 2))
x_axes = list(range(1, num - 1, 2))
return y_axes, x_axes, [num - 1]
images_shape = np.array(images.shape)
new_axes = get_transpose_axes(len(images_shape))
new_shape = [np.prod(images_shape[x]) for x in new_axes]
logger.debug("Stacked images")
return np.transpose(images, axes=np.concatenate(new_axes)).reshape(new_shape)
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/trainer/_display.py",
"license": "GNU General Public License v3.0",
"lines": 539,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:plugins/train/trainer/distributed.py | #!/usr/bin/env python3
""" Original Trainer """
from __future__ import annotations
import logging
import typing as T
import warnings
from keras import ops
import torch
from lib.utils import get_module_objects
from .original import Trainer as OriginalTrainer
if T.TYPE_CHECKING:
from plugins.train.model._base import ModelBase
import keras
logger = logging.getLogger(__name__)
class WrappedModel(torch.nn.Module):
""" A torch module that wraps a dual input Faceswap model with a single input version that is
compatible with DataParallel training
Parameters
----------
model : :class:`keras.Model`
The original faceswap model that is to be wrapped
"""
def __init__(self, model: keras.Model):
logger.debug("Wrapping keras model: %s", model.name)
super().__init__()
self._keras_model = model
logger.debug("Wrapped keras model: %s (%s)", model.name, self)
def forward(self,
input_a: torch.Tensor,
input_b: torch.Tensor,
targets_a: torch.Tensor,
targets_b: torch.Tensor,
*targets: torch.Tensor) -> torch.Tensor:
""" Run the forward pass per GPU
Parameters
----------
input_a : :class:`torch.Tensor`
The A batch of input images for 1 GPU
input_b : :class:`torch.Tensor`
The B batch of input images for 1 GPU
targets_a : :class:`torch.Tensor` | list[torch.Tensor]
The A batch of target images for 1 GPU. If this is a multi-output model then this list
will be the target images per output for all items in the current batch, regardless of
GPU. If we have 1 output, this will be a Tensor for this GPUs current batch output
targets_b : :class:`torch.Tensor` | list[torch.Tensor]
The B batch of target images for 1 GPU. If this is a multi-output model then this list
will be the target images per output for all items in the current batch, regardless of
GPU. If we have 1 output, this will be a Tensor for this GPUs current batch output
targets : :class:`torch.Tensor` | list[torch.Tensor], optional
Used for multi-output models. Any additional outputs can be added here. They should be
added in A-B order
Returns
-------
:class:`torch.Tensor`
The loss outputs for each side of the model for 1 GPU
"""
preds = self._keras_model((input_a, input_b), training=True)
self._keras_model.zero_grad()
if targets: # Go from [A1, B1, A2, B2, A3, B3] to [A1, A2, A3, B1, B2, B3]
all_targets = [targets_a, targets_b, *targets]
assert len(all_targets) % 2 == 0
loss_targets = all_targets[0::2] + all_targets[1::2]
else:
loss_targets = [targets_a, targets_b]
losses = torch.stack([loss_fn(y_true, y_pred)
for loss_fn, y_true, y_pred in zip(self._keras_model.loss,
loss_targets,
preds)])
logger.trace("Losses: %s", losses) # type:ignore[attr-defined]
return losses
class Trainer(OriginalTrainer):
""" Distributed training with torch.nn.DataParallel
Parameters
----------
model : plugin from :mod:`plugins.train.model`
The model that will be running this trainer
batch_size : int
The requested batch size for iteration to be trained through the model.
"""
def __init__(self, model: ModelBase, batch_size: int) -> None:
self._gpu_count = torch.cuda.device_count()
batch_size = self._validate_batch_size(batch_size)
self._is_multi_out: bool | None = None
super().__init__(model, batch_size)
self._distributed_model = self._set_distributed()
def _validate_batch_size(self, batch_size: int) -> int:
""" Validate that the batch size is suitable for the number of GPUs and update accordingly.
Parameters
----------
batch_size : int
The requested training batch size
Returns
-------
int
A valid batch size for the GPU configuration
"""
if batch_size < self._gpu_count:
logger.warning("Batch size (%s) is less than the number of GPUs (%s). Updating batch "
"size to: %s", batch_size, self._gpu_count, self._gpu_count)
batch_size = self._gpu_count
if batch_size % self._gpu_count:
new_batch_size = (batch_size // self._gpu_count) * self._gpu_count
logger.warning("Batch size %s is sub-optimal for %s GPUs. You may want to adjust your "
"batch size to %s or %s.",
batch_size,
self._gpu_count,
new_batch_size,
new_batch_size + self._gpu_count)
return batch_size
def _handle_torch_gpu_mismatch_warning(
self, warn_messages: list[warnings.WarningMessage] | None) -> None:
""" Handle the warning generated by Torch when significantly mismatched GPUs are used and
remove potentially confusing information not relevant for Faceswap
Parameters
----------
warn_messages : list[:class:`warnings.WarningMessage]
Any qualifying warning messages that may have been generated when wrapping the model
"""
if warn_messages is None or not warn_messages:
return
warn_msg = warn_messages[0]
terminate = "You can do so by"
msg = ""
for x in str(warn_msg.message).split("\n"):
x = x.strip()
if not x:
continue
if terminate in msg:
msg = msg[:msg.find(terminate)]
break
msg += f" {x}"
logger.warning(msg.strip())
def _set_distributed(self) -> torch.nn.DataParallel:
"""Wrap the loaded model in a torch.nn.DataParallel instance
Returns
-------
:class:`torch.nn.Parallel`
A wrapped version of the faceswap model compatible with distributed training
"""
name = self.model.model.name
logger.debug("Setting distributed training for '%s'", name)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("default",
message="There is an imbalance between your GPUs",
category=UserWarning)
# We already set CUDA_VISIBLE_DEVICES from -X command line flag, so just need to wrap
wrapped = torch.nn.DataParallel(WrappedModel(model=self.model.model))
self._handle_torch_gpu_mismatch_warning(w)
logger.info("Distributed training enabled. Model: '%s', devices: %s",
name, wrapped.device_ids)
return wrapped
def _forward(self,
inputs: torch.Tensor,
targets: list[torch.Tensor]) -> torch.Tensor:
""" Perform the forward pass on the model
Parameters
----------
inputs : :class:`torch.Tensor`
The batch of input image tensors to the model in shape `(side, batch_size,
*dims)` with `side` 0 being input A and `side` 1 being input B
targets : list[:class:`torch.Tensor`]
The corresponding batch of target images for the model for each side's output(s). For
each model output an array should exist in the order of model outputs in the format `(
side, batch_size, *dims)` with `side` 0 being input A and `side` 1 being input B
Returns
-------
:class:`torch.Tensor`
The loss for each side of this batch in layout (A1, ..., An, B1, ..., Bn)
"""
if self._is_multi_out is None:
self._is_multi_out = len(targets) > 1
logger.debug("Setting multi-out to: %s", self._is_multi_out)
if self._is_multi_out:
multi_targets = tuple(t[i] for t in targets[1:] for i in range(2))
else:
multi_targets = ()
loss: torch.Tensor = self._distributed_model(inputs[0],
inputs[1],
targets[0][0],
targets[0][1],
*multi_targets)
scaled = T.cast(torch.Tensor, ops.sum(ops.reshape(loss, (self._gpu_count, 2, -1)),
axis=0) / self._gpu_count)
return scaled.flatten()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/trainer/distributed.py",
"license": "GNU General Public License v3.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:plugins/train/trainer/trainer_config.py | #!/usr/bin/env python3
""" Default configurations for trainers """
import gettext
import logging
from lib.config import ConfigItem
from lib.utils import get_module_objects
logger = logging.getLogger(__name__)
# LOCALES
_LANG = gettext.translation("plugins.train.trainer.train_config",
localedir="locales", fallback=True)
_ = _LANG.gettext
def get_defaults() -> tuple[str, str, dict[str, ConfigItem]]:
""" Obtain the default values for adding to the config.ini file
Returns
-------
helptext : str
The help text for the training config section
section : str
The section name for the config items
defaults : dict[str, :class:`lib.config.objects.ConfigItem`]
The option names and config items
"""
section = "trainer.augmentation"
helptext = _(
"Data Augmentation Options.\n"
"WARNING: The defaults for augmentation will be fine for 99.9% of use cases. "
"Only change them if you absolutely know what you are doing!")
defaults = {k: v for k, v in globals().items()
if isinstance(v, ConfigItem)}
logger.debug("Training config. Helptext: %s, options: %s", helptext, defaults)
return helptext, section, defaults
preview_images = ConfigItem(
datatype=int,
default=14,
group=_("evaluation"),
info=_("Number of sample faces to display for each side in the preview when training."),
rounding=2,
min_max=(2, 16))
mask_opacity = ConfigItem(
datatype=int,
default=30,
group=_("evaluation"),
info=_("The opacity of the mask overlay in the training preview. Lower values are more "
"transparent."),
rounding=2,
min_max=(0, 100))
mask_color = ConfigItem(
datatype=str,
default="#ff0000",
choices="colorchooser",
group=_("evaluation"),
info=_("The RGB hex color to use for the mask overlay in the training preview."))
zoom_amount = ConfigItem(
datatype=int,
default=5,
group=_("image augmentation"),
info=_("Percentage amount to randomly zoom each training image in and out."),
rounding=1,
min_max=(0, 25))
rotation_range = ConfigItem(
datatype=int,
default=10,
group=_("image augmentation"),
info=_("Percentage amount to randomly rotate each training image."),
rounding=1,
min_max=(0, 25))
shift_range = ConfigItem(
datatype=int,
default=5,
group=_("image augmentation"),
info=_("Percentage amount to randomly shift each training image horizontally and "
"vertically."),
rounding=1,
min_max=(0, 25))
flip_chance = ConfigItem(
datatype=int,
default=50,
group=_("image augmentation"),
info=_("Percentage chance to randomly flip each training image horizontally.\n"
"NB: This is ignored if the 'no-flip' option is enabled"),
rounding=1,
min_max=(0, 75))
color_lightness = ConfigItem(
datatype=int,
default=30,
group=_("color augmentation"),
info=_("Percentage amount to randomly alter the lightness of each training image.\n"
"NB: This is ignored if the 'no-augment-color' option is enabled"),
rounding=1,
min_max=(0, 75))
color_ab = ConfigItem(
datatype=int,
default=8,
group=_("color augmentation"),
info=_("Percentage amount to randomly alter the 'a' and 'b' colors of the L*a*b* color "
"space of each training image.\nNB: This is ignored if the 'no-augment-color' option"
"is enabled"),
rounding=1,
min_max=(0, 50))
color_clahe_chance = ConfigItem(
datatype=int,
default=50,
group=_("color augmentation"),
info=_("Percentage chance to perform Contrast Limited Adaptive Histogram Equalization on "
"each training image.\nNB: This is ignored if the 'no-augment-color' option is "
"enabled"),
rounding=1,
min_max=(0, 75),
fixed=False)
color_clahe_max_size = ConfigItem(
datatype=int,
default=4,
group=_("color augmentation"),
info=_("The grid size dictates how much Contrast Limited Adaptive Histogram Equalization is "
"performed on any training image selected for clahe. Contrast will be applied "
"randomly with a gridsize of 0 up to the maximum. This value is a multiplier "
"calculated from the training image size.\nNB: This is ignored if the "
"'no-augment-color' option is enabled"),
rounding=1,
min_max=(1, 8))
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/trainer/trainer_config.py",
"license": "GNU General Public License v3.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:plugins/train/training.py | #! /usr/env/bin/python3
""" Run the training loop for a training plugin """
from __future__ import annotations
import logging
import os
import typing as T
import time
import warnings
import numpy as np
import torch
from torch.cuda import OutOfMemoryError
from lib.training import Feeder, LearningRateFinder, LearningRateWarmup
from lib.training.tensorboard import TorchTensorBoard
from lib.utils import get_module_objects, FaceswapError
from plugins.train import train_config as mod_cfg
from plugins.train.trainer import trainer_config as trn_cfg
from plugins.train.trainer._display import Samples, Timelapse
if T.TYPE_CHECKING:
from collections.abc import Callable
from plugins.train.trainer._base import TrainerBase
logger = logging.getLogger(__name__)
# Suppress non-Faceswap related Keras warning about backend padding mismatches
warnings.filterwarnings("ignore",
message="You might experience inconsistencies",
category=UserWarning)
class Trainer:
""" Handles the feeding of training images to Faceswap models, the generation of Tensorboard
logs and the creation of sample/time-lapse preview images.
All Trainer plugins must inherit from this class.
Parameters
----------
plugin : :class:`TrainerBase`
The plugin that will be processing each batch
images : dict[literal["a", "b"], list[str]]
The file paths for the images to be trained on for each side. The dictionary should contain
2 keys ("a" and "b") with the values being a list of full paths corresponding to each side.
"""
def __init__(self, plugin: TrainerBase, images: dict[T.Literal["a", "b"], list[str]]) -> None:
self._batch_size = plugin.batch_size
self._plugin = plugin
self._model = plugin.model
self._feeder = Feeder(images, plugin.model, plugin.batch_size)
self._exit_early = self._handle_lr_finder()
if self._exit_early:
logger.debug("Exiting from LR Finder")
return
self._warmup = self._get_warmup()
self._model.state.add_session_batchsize(plugin.batch_size)
self._images = images
self._sides = sorted(key for key in self._images.keys())
self._tensorboard = self._set_tensorboard()
self._samples = Samples(self._model,
self._model.coverage_ratio,
trn_cfg.mask_opacity(),
trn_cfg.mask_color())
num_images = trn_cfg.preview_images()
assert isinstance(num_images, int)
self._timelapse = Timelapse(self._model,
self._model.coverage_ratio,
num_images,
trn_cfg.mask_opacity(),
trn_cfg.mask_color(),
self._feeder,
self._images)
logger.debug("Initialized %s", self.__class__.__name__)
@property
def exit_early(self) -> bool:
""" True if the trainer should exit early, without perfoming any training steps """
return self._exit_early
@property
def batch_size(self) -> int:
"""int : The batch size that the model is set to train at. """
return self._batch_size
def _handle_lr_finder(self) -> bool:
""" Handle the learning rate finder.
If this is a new model, then find the optimal learning rate and return ``True`` if user has
just requested the graph, otherwise return ``False`` to continue training
If it as existing model, set the learning rate to the value found by the learing rate
finder and return ``False`` to continue training
Returns
-------
bool
``True`` if the learning rate finder options dictate that training should not continue
after finding the optimal leaning rate
"""
if not self._model.command_line_arguments.use_lr_finder:
return False
if self._model.state.lr_finder > -1:
learning_rate = self._model.state.lr_finder
logger.info("Setting learning rate from Learning Rate Finder to %s",
f"{learning_rate:.1e}")
self._model.model.optimizer.learning_rate.assign(learning_rate)
self._model.state.update_session_config("learning_rate", learning_rate)
return False
if self._model.state.iterations == 0 and self._model.state.session_id == 1:
lrf = LearningRateFinder(self)
success = lrf.find()
return mod_cfg.lr_finder_mode() == "graph_and_exit" or not success
logger.debug("No learning rate finder rate. Not setting")
return False
def _get_warmup(self) -> LearningRateWarmup:
""" Obtain the learning rate warmup instance
Returns
-------
:class:`plugins.train.lr_warmup.LRWarmup`
The Learning Rate Warmup object
"""
target_lr = float(self._model.model.optimizer.learning_rate.value.cpu().numpy())
return LearningRateWarmup(self._model.model, target_lr, self._model.warmup_steps)
def _set_tensorboard(self) -> TorchTensorBoard | None:
""" Set up Tensorboard callback for logging loss.
Bypassed if command line option "no-logs" has been selected.
Returns
-------
:class:`keras.callbacks.TensorBoard` | None
Tensorboard object for the the current training session. ``None`` if Tensorboard
logging is not selected
"""
if self._model.state.current_session["no_logs"]:
logger.verbose("TensorBoard logging disabled") # type: ignore
return None
logger.debug("Enabling TensorBoard Logging")
logger.debug("Setting up TensorBoard Logging")
log_dir = os.path.join(str(self._model.io.model_dir),
f"{self._model.name}_logs",
f"session_{self._model.state.session_id}")
tensorboard = TorchTensorBoard(log_dir=log_dir,
write_graph=True,
update_freq="batch")
tensorboard.set_model(self._model.model)
logger.verbose("Enabled TensorBoard Logging") # type: ignore
return tensorboard
def toggle_mask(self) -> None:
""" Toggle the mask overlay on or off based on user input. """
self._samples.toggle_mask_display()
def train_one_batch(self) -> np.ndarray:
""" Process a single batch through the model and obtain the loss
Returns
-------
:class:`numpy.ndarray`
The total loss in the first position then A losses, by output order, then B losses, by
output order
"""
try:
inputs, targets = self._feeder.get_batch()
loss_t = self._plugin.train_batch(torch.from_numpy(inputs),
[torch.from_numpy(t) for t in targets])
loss_cpu = loss_t.detach().cpu().numpy()
retval = np.array([sum(loss_cpu), *loss_cpu])
except OutOfMemoryError as err:
msg = ("You do not have enough GPU memory available to train the selected model at "
"the selected settings. You can try a number of things:"
"\n1) Close any other application that is using your GPU (web browsers are "
"particularly bad for this)."
"\n2) Lower the batchsize (the amount of images fed into the model each "
"iteration)."
"\n3) Try enabling 'Mixed Precision' training."
"\n4) Use a more lightweight model, or select the model's 'LowMem' option "
"(in config) if it has one.")
raise FaceswapError(msg) from err
return retval
def train_one_step(self,
viewer: Callable[[np.ndarray, str], None] | None,
timelapse_kwargs: dict[T.Literal["input_a", "input_b", "output"],
str] | None) -> None:
""" Running training on a batch of images for each side.
Triggered from the training cycle in :class:`scripts.train.Train`.
* Runs a training batch through the model.
* Outputs the iteration's loss values to the console
* Logs loss to Tensorboard, if logging is requested.
* If a preview or time-lapse has been requested, then pushes sample images through the \
model to generate the previews
* Creates a snapshot if the total iterations trained so far meet the requested snapshot \
criteria
Notes
-----
As every iteration is called explicitly, the Parameters defined should always be ``None``
except on save iterations.
Parameters
----------
viewer: :func:`scripts.train.Train._show` or ``None``
The function that will display the preview image
timelapse_kwargs: dict
The keyword arguments for generating time-lapse previews. If a time-lapse preview is
not required then this should be ``None``. Otherwise all values should be full paths
the keys being `input_a`, `input_b`, `output`.
"""
self._model.state.increment_iterations()
logger.trace("Training one step: (iteration: %s)", self._model.iterations) # type: ignore
snapshot_interval = self._model.command_line_arguments.snapshot_interval
do_snapshot = (snapshot_interval != 0 and
self._model.iterations - 1 >= snapshot_interval and
(self._model.iterations - 1) % snapshot_interval == 0)
self._warmup()
loss = self.train_one_batch()
self._log_tensorboard(loss)
loss = self._collate_and_store_loss(loss[1:])
self._print_loss(loss)
if do_snapshot:
self._model.io.snapshot()
self._update_viewers(viewer, timelapse_kwargs)
def _log_tensorboard(self, loss: np.ndarray) -> None:
""" Log current loss to Tensorboard log files
Parameters
----------
loss : :class:`numpy.ndarray`
The total loss in the first position then A losses, by output order, then B losses, by
output order
"""
if not self._tensorboard:
return
logger.trace("Updating TensorBoard log") # type: ignore
logs = {log[0]: float(log[1])
for log in zip(self._model.state.loss_names, loss)}
self._tensorboard.on_train_batch_end(self._model.iterations, logs=logs)
def _collate_and_store_loss(self, loss: np.ndarray) -> np.ndarray:
""" Collate the loss into totals for each side.
The losses are summed into a total for each side. Loss totals are added to
:attr:`model.state._history` to track the loss drop per save iteration for backup purposes.
If NaN protection is enabled, Checks for NaNs and raises an error if detected.
Parameters
----------
loss : :class:`numpy.ndarray`
The total loss in the first position then A losses, by output order, then B losses, by
output order
Returns
-------
:class:`numpy.ndarray`
2 ``floats`` which is the total loss for each side (eg sum of face + mask loss)
Raises
------
FaceswapError
If a NaN is detected, a :class:`FaceswapError` will be raised
"""
# NaN protection
if mod_cfg.nan_protection() and not all(np.isfinite(val) for val in loss):
logger.critical("NaN Detected. Loss: %s", loss)
raise FaceswapError("A NaN was detected and you have NaN protection enabled. Training "
"has been terminated.")
split = len(loss) // 2
combined_loss = np.array([sum(loss[:split]), sum(loss[split:])])
self._model.add_history(combined_loss)
logger.trace("original loss: %s, combined_loss: %s", loss, combined_loss) # type: ignore
return combined_loss
def _print_loss(self, loss: np.ndarray) -> None:
""" Outputs the loss for the current iteration to the console.
Parameters
----------
loss : :class`numpy.ndarray`
The loss for each side. List should contain 2 ``floats`` side "a" in position 0 and
side "b" in position `.
"""
output = ", ".join([f"Loss {side}: {side_loss:.5f}"
for side, side_loss in zip(("A", "B"), loss)])
timestamp = time.strftime("%H:%M:%S")
output = f"[{timestamp}] [#{self._model.iterations:05d}] {output}"
print(f"{output}", end="\r")
def _update_viewers(self,
viewer: Callable[[np.ndarray, str], None] | None,
timelapse_kwargs: dict[T.Literal["input_a", "input_b", "output"],
str] | None) -> None:
""" Update the preview viewer and timelapse output
Parameters
----------
viewer: :func:`scripts.train.Train._show` or ``None``
The function that will display the preview image
timelapse_kwargs: dict
The keyword arguments for generating time-lapse previews. If a time-lapse preview is
not required then this should be ``None``. Otherwise all values should be full paths
the keys being `input_a`, `input_b`, `output`.
"""
if viewer is not None:
self._samples.images = self._feeder.generate_preview()
samples = self._samples.show_sample()
if samples is not None:
viewer(samples,
"Training - 'S': Save Now. 'R': Refresh Preview. 'M': Toggle Mask. 'F': "
"Toggle Screen Fit-Actual Size. 'ENTER': Save and Quit")
if timelapse_kwargs:
self._timelapse.output_timelapse(timelapse_kwargs)
def _clear_tensorboard(self) -> None:
""" Stop Tensorboard logging.
Tensorboard logging needs to be explicitly shutdown on training termination. Called from
:class:`scripts.train.Train` when training is stopped.
"""
if not self._tensorboard:
return
logger.debug("Ending Tensorboard Session: %s", self._tensorboard)
self._tensorboard.on_train_end()
def save(self, is_exit: bool = False) -> None:
""" Save the model
Parameters
----------
is_exit: bool, optional
``True`` if save has been called on model exit. Default: ``False``
"""
self._model.io.save(is_exit=is_exit)
assert self._tensorboard is not None
self._tensorboard.on_save()
if is_exit:
self._clear_tensorboard()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "plugins/train/training.py",
"license": "GNU General Public License v3.0",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
deepfakes/faceswap:requirements/requirements.py | #! /usr/env/bin/python3
""" Parses the contents of python requirements.txt files and holds the information in a parsable
format
NOTE: Only packages from the Python Standard Library should be imported in this module
"""
from __future__ import annotations
import logging
import typing as T
import os
from importlib import import_module, util as import_util
if T.TYPE_CHECKING:
from packaging.markers import Marker
from packaging.requirements import Requirement
from packaging.specifiers import Specifier
logger = logging.getLogger(__name__)
PYTHON_VERSIONS: dict[str, tuple[int, int]] = {"rocm_60": (3, 12)}
""" dict[str, tuple[int, int]] : Mapping of requirement file names to the maximum supported
Python version, if below the project maximum """
class Requirements:
""" Parse requirement information
Parameters
----------
include_dev : bool, optional
``True`` to additionally load requirements from the dev requirements file
"""
def __init__(self, include_dev: bool = False) -> None:
self._include_dev = include_dev
self._marker: type[Marker] | None = None
self._requirement: type[Requirement] | None = None
self._specifier: type[Specifier] | None = None
self._global_options: dict[str, list[str]] = {}
self._requirements: dict[str, list[Requirement]] = {}
@property
def packaging_available(self) -> bool:
""" bool : ``True`` if the packaging Library is available otherwise ``False`` """
if self._requirement is not None:
return True
return import_util.find_spec("packaging") is not None
@property
def requirements(self) -> dict[str, list[Requirement]]:
""" dict[str, list[Requirement]] : backend type as key, list of required packages as
value """
if not self._requirements:
self._load_requirements()
return self._requirements
@property
def global_options(self) -> dict[str, list[str]]:
""" dict[str, list[str]] : The global pip install options for each backend """
if not self._requirements:
self._load_requirements()
return self._global_options
def __repr__(self) -> str:
""" Pretty print the required packages for logging """
props = ", ".join(
f"{k}={repr(getattr(self, k))}"
for k, v in self.__class__.__dict__.items()
if isinstance(v, property) and not k.startswith("_"))
return f"{self.__class__.__name__}({props})"
def _import_packaging(self) -> None:
""" Import the packaging library and set the required classes to class attributes. """
if self._requirement is not None:
return
logger.debug("Importing packaging library")
mark_mod = import_module("packaging.markers")
req_mod = import_module("packaging.requirements")
spec_mod = import_module("packaging.specifiers")
self._marker = mark_mod.Marker
self._requirement = req_mod.Requirement
self._specifier = spec_mod.Specifier
@classmethod
def _parse_file(cls, file_path: str) -> tuple[list[str], list[str]]:
""" Parse a requirements file
Parameters
----------
file_path : str
The full path to a requirements file to parse
Returns
-------
global_options : list[str]
Any global options collected from the requirements file
requirements : list[str]
The requirements strings from the requirments file
"""
global_options = []
requirements = []
with open(file_path, encoding="utf8") as f:
for line in f:
line = line.strip() # Skip blanks, comments and nested requirement files
if not line or line.startswith(("#", "-r")):
continue
line = line.split("#", maxsplit=1)[0] # Strip inline comments
if line.startswith("-"): # Collect global option
global_options.append(line)
continue
requirements.append(line) # Collect requirement
logger.debug("Parsed requirements file '%s'. global_options: %s, requirements: %s",
os.path.basename(file_path), global_options, requirements)
return global_options, requirements
def parse_requirements(self, packages: list[str]) -> list[Requirement]:
""" Drop in replacement for deprecated pkg_resources.parse_requirements
Parameters
----------
packages: list[str]
List of packages formatted from a requirements.txt file
Returns
-------
list[:class:`packaging.Requirement`]
List of Requirement objects
"""
self._import_packaging()
assert self._requirement is not None
requirements = [self._requirement(p) for p in packages]
retval = [r for r in requirements if r.marker is None or r.marker.evaluate()]
if len(retval) != len(requirements):
logger.debug("Filtered invalid packages %s",
[(r.name, r.marker) for r in set(requirements).difference(set(retval))])
logger.debug("Parsed requirements %s: %s", packages, retval)
return retval
def _parse_options(self, options: list[str]) -> list[str]:
""" Parse global options from a requirements file and only return valid options
Parameters
----------
options: list[str]
List of global options formatted from a requirements.txt file
Returns
-------
list[str]
List of global options valid for the running system
"""
if not options:
return options
assert self._marker is not None
retval = []
for opt in options:
if ";" not in opt:
retval.append(opt)
continue
directive, marker = opt.split(";", maxsplit=1)
if not self._marker(marker.strip()).evaluate():
logger.debug("Filtered invalid option: '%s'", opt)
continue
retval.append(directive.strip())
logger.debug("Selected options: %s", retval)
return retval
def _load_requirements(self) -> None:
""" Parse the requirements files and populate information to :attr:`_requirements` """
req_path = os.path.dirname(os.path.realpath(__file__))
base_file = os.path.join(req_path, "_requirements_base.txt")
req_files = [os.path.join(req_path, f)
for f in os.listdir(req_path)
if f.startswith("requirements_")
and os.path.splitext(f)[-1] == ".txt"]
opts_base, reqs_base = self._parse_file(base_file)
parsed_reqs_base = self.parse_requirements(reqs_base)
parsed_opts_base = self._parse_options(opts_base)
if self._include_dev:
opts_dev, reqs_dev = self._parse_file(os.path.join(req_path, "_requirements_dev.txt"))
opts_base += opts_dev
parsed_reqs_base += self.parse_requirements(reqs_dev)
parsed_opts_base += self._parse_options(opts_dev)
for req_file in req_files:
backend = os.path.splitext(os.path.basename(req_file))[0].replace("requirements_", "")
assert backend
opts, reqs = self._parse_file(req_file)
self._requirements[backend] = parsed_reqs_base + self.parse_requirements(reqs)
self._global_options[backend] = parsed_opts_base + self._parse_options(opts)
logger.debug("[%s] Requirements: %s , Options: %s",
backend, self._requirements[backend], self._global_options[backend])
if __name__ == "__main__":
print(Requirements(include_dev=True))
| {
"repo_id": "deepfakes/faceswap",
"file_path": "requirements/requirements.py",
"license": "GNU General Public License v3.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepfakes/faceswap:tests/lib/config/config_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.config.config` """
import pytest
from lib.config import config as config_mod
from tests.lib.config.helpers import FakeConfigItem
# pylint:disable=too-few-public-methods,protected-access,invalid-name
def get_instance(mocker, module="plugins.test.test_config"):
""" Generate a FaceswapConfig instance, substituting the calling module for the one given """
mocker.patch("lib.config.config.FaceswapConfig.__module__", module)
return config_mod.FaceswapConfig()
_MODULES = (("plugins.test.test_config", "path_valid"),
("plugins.test.test", "path_invalid"),
("plugins.config.test_config", "folder_invalid"))
_MODULE_IDS = [x[-1] for x in _MODULES]
@pytest.mark.parametrize(("module", "mod_status"), _MODULES, ids=_MODULE_IDS)
def test_FaceswapConfig_init(module, mod_status, mocker):
""" Test that :class:`lib.config.config.FaceswapConfig` initializes correctly """
mocker.patch("lib.config.config.FaceswapConfig.set_defaults", mocker.MagicMock())
mocker.patch("lib.config.config.ConfigFile.on_load", mocker.MagicMock())
if mod_status.endswith("invalid"):
with pytest.raises(AssertionError):
get_instance(mocker, module=module)
return
test = get_instance(mocker, module=module)
assert test._plugin_group == "test"
assert isinstance(test._ini, config_mod.ConfigFile)
test.set_defaults.assert_called_once()
test._ini.on_load.assert_called_once_with(test.sections) # pylint:disable=no-member
assert config_mod._CONFIGS["test"] == test
def test_FaceswapConfig_add_section(mocker):
""" Test :class:`lib.config.config.FaceswapConfig.add_section` works """
instance = get_instance(mocker)
title = "my.test.section"
info = "And here is some test help text"
assert title not in instance.sections
instance.add_section(title, info)
assert title in instance.sections
assert isinstance(instance.sections[title], config_mod.ConfigSection)
assert instance.sections[title].helptext == info
def test_FaceswapConfig_add_item(mocker):
""" Test :class:`lib.config.config.FaceswapConfig.add_item` works """
instance = get_instance(mocker)
section = "my.test.section"
title = "test_option"
config_item = "TEST_CONFIG_ITEM"
assert section not in instance.sections
with pytest.raises(KeyError): # Fail adding item to non-existant key
instance.add_item(section, title, config_item)
instance.add_section(section, "")
assert title not in instance.sections[section].options
instance.add_item(section, title, config_item)
assert title in instance.sections[section].options
assert instance.sections[section].options[title] == config_item
@pytest.mark.parametrize("filename",
("test_defaults.py", "train_defaults.py", "different_name.py"))
def test_FaceswapConfig_import_defaults_from_module(mocker, filename):
""" Test :class:`lib.config.config.FaceswapConfig._defaults_from_module` works """
mocker.patch("lib.config.config.ConfigItem", FakeConfigItem)
class DummyMod:
""" Dummy Module for loading config items """
opt1 = FakeConfigItem(10)
opt2 = FakeConfigItem(20)
invalid = "invalid"
HELPTEXT = "Test help text"
mock_mod = mocker.MagicMock(return_value=DummyMod)
mocker.patch("lib.config.config.import_module", mock_mod)
instance = get_instance(mocker)
module_path = "test.module.path"
plugin_type = "test"
section = plugin_type + "." + filename[:-3].replace("_defaults", "")
assert section not in instance.sections
instance._import_defaults_from_module(filename, module_path, plugin_type)
mock_mod.assert_called_once_with(f"{module_path}.{filename[:-3]}")
assert section in instance.sections
assert instance.sections[section].helptext == DummyMod.HELPTEXT
assert len(instance.sections[section].options) == 2
assert isinstance(instance.sections[section].options["opt1"], FakeConfigItem)
assert isinstance(instance.sections[section].options["opt2"], FakeConfigItem)
def test_FaceswapConfig_defaults_from_plugin(mocker):
""" Test :class:`lib.config.config.FaceswapConfig._defaults_from_plugin` works """
mocker.patch("lib.config.config.ConfigItem", FakeConfigItem)
dir_tree = [("plugins/train/model/plugin_a", [], ['plugin_a_defaults.py', '__init__.py']),
("plugins/extract", [], ['extract_defaults.py', '__init__.py']),
("plugins/convert/writer", [], ['writer_defaults.py', '__init__.py']),
("plugins/train", ["model", "trainer"], ['train_config.py', '__init__.py'])]
mock_walk = mocker.MagicMock(return_value=dir_tree)
mocker.patch("lib.config.config.os.walk", mock_walk)
instance = get_instance(mocker)
instance._import_defaults_from_module = mocker.MagicMock()
instance._defaults_from_plugin("test")
assert instance._import_defaults_from_module.call_count == 3 # 3 valid, 1 invalid
def test_FaceswapConfig_set_defaults_global(mocker):
""" Test :class:`lib.config.config.FaceswapConfig.set_defaults` works for global sections """
mocker.patch("lib.config.config.ConfigItem", FakeConfigItem)
class DummyMod:
""" Dummy Module for loading config items """
opt1 = FakeConfigItem(10)
opt2 = FakeConfigItem(20)
invalid = "invalid"
HELPTEXT = "Test help text"
mocker.patch("lib.config.config.sys.modules",
config_mod.sys.modules | {"plugins.test.test_config": DummyMod})
instance = get_instance(mocker)
instance.add_section = mocker.MagicMock()
instance.add_item = mocker.MagicMock()
instance.set_defaults("")
instance.add_section.assert_not_called()
instance.add_item.assert_not_called()
instance.set_defaults("test")
instance.add_section.assert_called_once()
assert instance.add_item.call_count == 2
def test_FaceswapConfig_set_defaults_subsection(mocker):
""" Test :class:`lib.config.config.FaceswapConfig.set_defaults` works for sub-sections """
mocker.patch("lib.config.config.ConfigItem", FakeConfigItem)
class DummyGlobal(config_mod.GlobalSection):
""" Dummy GlobalSection class """
opt1 = FakeConfigItem(30)
opt2 = FakeConfigItem(40)
opt3 = FakeConfigItem(50)
invalid = "invalid"
helptext = "Section help text"
class DummyMod:
""" Dummy Module class for loading config items """
opt1 = FakeConfigItem(10)
opt2 = FakeConfigItem(20)
sect1 = DummyGlobal
invalid = "invalid"
HELPTEXT = "Test help text"
mocker.patch("lib.config.config.sys.modules",
config_mod.sys.modules | {"plugins.test.test_config": DummyMod})
instance = get_instance(mocker)
instance.add_section = mocker.MagicMock()
instance.add_item = mocker.MagicMock()
instance.set_defaults("test")
assert instance.add_section.call_count == 2 # global + subsection
assert instance.add_item.call_count == 5 # global + subsection
def test_FaceswapConfig_set_defaults(mocker):
""" Test :class:`lib.config.config.FaceswapConfig._set_defaults` works """
instance = get_instance(mocker)
class DummySection1:
""" Dummy ConfigSection class """
options = {"opt1": FakeConfigItem(10),
"opt2": FakeConfigItem(20),
"opt3": FakeConfigItem(30)}
class DummySection2:
""" Dummy ConfigSection class """
options = {"opt1": FakeConfigItem(40),
"opt2": FakeConfigItem(50),
"opt3": FakeConfigItem(60)}
class DummySection3:
""" Dummy ConfigSection class """
options = {"opt1": FakeConfigItem(70),
"opt2": FakeConfigItem(80),
"opt3": FakeConfigItem(90)}
instance.set_defaults = mocker.MagicMock()
sections = {"zzz_section": DummySection1(),
"mmm_section": DummySection2(),
"aaa_section": DummySection3()}
instance.sections = sections
instance._set_defaults()
instance.set_defaults.assert_called_once()
for sect_name, sect in instance.sections.items():
for key, opt in sect.options.items():
assert opt._name == f"test.{sect_name}.{key}"
assert list(instance.sections) == sorted(sections)
def test_FaceswapConfig_save(mocker):
""" Test :class:`lib.config.config.FaceswapConfig.save` works """
instance = get_instance(mocker)
instance._ini.update_from_app = mocker.MagicMock()
instance.sections = "TEST_SECTIONS"
instance.save_config()
instance._ini.update_from_app.assert_called_once_with(instance.sections)
def test_get_configs(mocker):
""" Test :class:`lib.config.config.get_configs` works """
mock_gen_configs = mocker.MagicMock()
mocker.patch("lib.config.config.generate_configs", mock_gen_configs)
mocker.patch("lib.config.config._CONFIGS", "TEST_ALL_CONFIGS")
result = config_mod.get_configs()
mock_gen_configs.assert_called_once_with(force=True)
assert result == "TEST_ALL_CONFIGS"
def test_generate_configs(mocker):
""" Test :class:`lib.config.config.generate_configs` works """
_root = "/path/to/faceswap"
mocker.patch("lib.config.config.PROJECT_ROOT", _root)
dir_tree = [
(f"{_root}/plugins/train", [], ['train_config.py', '__init__.py']), # Success
(f"{_root}/plugins/extract", [], ['extract_config.py', '__init__.py']), # Success
(f"{_root}/plugins/convert/writer", [], ['writer_config.py', '__init__.py']), # Too deep
# Wrong name
(f"{_root}/plugins/train", ["model", "trainer"], ['train_defaults.py', '__init__.py'])]
mock_walk = mocker.MagicMock(return_value=dir_tree)
mocker.patch("lib.config.config.os.walk", mock_walk)
mock_initialized = mocker.MagicMock()
class DummyConfig(config_mod.FaceswapConfig):
""" Dummy FaceswapConfig class """
def __init__(self, # pylint:disable=unused-argument,super-init-not-called
*args,
**kwargs):
mock_initialized()
class DummyMod:
""" Dummy Module to load configs from """
mod1 = DummyConfig
mock_mod = mocker.MagicMock(return_value=DummyMod)
mocker.patch("lib.config.config.import_module", mock_mod)
config_mod.generate_configs(False)
assert mock_mod.call_count == 2 # 2 modules imported
assert mock_initialized.call_count == 2 # 2 configs loaded
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/config/config_test.py",
"license": "GNU General Public License v3.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/config/helpers.py | #! /usr/env/bin/python3
""" Helper mock items for ConfigItems """
import pytest
class FakeConfigItem:
""" ConfigItem substitute"""
def __init__(self, value):
self.value = value
self._name = ""
@property
def ini_value(self):
""" Dummy ini value """
return self.value.lower() if isinstance(self.value, str) else self.value
@property
def helptext(self):
""" Dummy help text """
return f"Test helptext for {self._name}:{self.value}"
def get(self):
""" Return the value """
return self.value
def set(self, value):
""" Return the value """
self.value = value
def set_name(self, name):
""" Set the name """
self._name = name
def __call__(self):
return self.value
def __repr__(self):
return f"FakeConfigItem(value={self.value!r})"
@pytest.fixture
def patch_config(monkeypatch: pytest.MonkeyPatch):
""" Fixture to patch user config values """
def _apply(module, cfg_dict):
""" Create the fake ConfigItem object """
for key, value in cfg_dict.items():
monkeypatch.setattr(module, key, FakeConfigItem(value))
return _apply
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/config/helpers.py",
"license": "GNU General Public License v3.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/config/ini_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.config.ini` """
import os
import pytest
from lib.config import ini as ini_mod
from tests.lib.config.helpers import FakeConfigItem
# pylint:disable=protected-access,invalid-name
_GROUPS = ("group1", "group2", "group3")
_CONFIG = ("custom", "custom_missing", "root", "root_missing")
@pytest.mark.parametrize("plugin_group", _GROUPS)
@pytest.mark.parametrize("config", _CONFIG)
def test_ConfigFile(tmpdir, mocker, plugin_group, config):
""" Test that :class:`lib.config.ini.ConfigFile` initializes correctly """
root_conf = tmpdir.mkdir("root").mkdir("config").join(f"{plugin_group}.ini")
root_dir = os.path.dirname(os.path.dirname(root_conf))
if config != "root_missing":
root_conf.write("")
mocker.patch("lib.config.ini.PROJECT_ROOT", root_dir)
conf_file = None
if config.startswith("custom"):
conf_file = tmpdir.mkdir("config").join("test_custom_config.ini")
if config == "custom":
conf_file.write("")
mock_load = mocker.MagicMock()
mocker.patch("lib.config.ini.ConfigFile.load", mock_load)
if config == "custom_missing": # Error on explicit missing
with pytest.raises(ValueError):
ini_mod.ConfigFile("group2test", conf_file)
return
instance = ini_mod.ConfigFile(plugin_group, conf_file)
file_path = conf_file if config == "custom" else root_conf
assert instance._file_path == file_path
assert instance._plugin_group == plugin_group
assert instance._parser.optionxform is str
if config in ("custom", "root"): # load when exists
mock_load.assert_called_once()
else:
mock_load.assert_not_called() # Don't load when it doesn't
def test_ConfigFile_load(mocker):
""" Test that :class:`lib.config.ini.ConfigFile.load` calls correctly """
instance = ini_mod.ConfigFile("test")
mock_read = mocker.MagicMock()
instance._parser.read = mock_read
instance.load()
mock_read.assert_called_once()
def test_ConfigFile_save(mocker):
""" Test that :class:`lib.config.ini.ConfigFile.save` calls correctly """
instance = ini_mod.ConfigFile("test")
mock_write = mocker.MagicMock()
instance._parser.write = mock_write
instance.save()
mock_write.assert_called_once()
class FakeConfigSection: # pylint:disable=too-few-public-methods
""" Fake config section """
def __init__(self, num_opts=2):
self.options = {f"opt{i}": FakeConfigItem(f"test_value{i}") for i in range(num_opts)}
self.helptext = f"Test helptext for {num_opts} options"
def get_local_remote(sections=[2, 1, 3]): # pylint:disable=dangerous-default-value
""" Obtain an object representing inputs to a ConfigParser and a matching object representing
Faceswap Config """
parser_sections = {f"section{i}": {f"opt{idx}": f"test_value{idx}" for idx in range(s)}
for i, s in enumerate(sections)}
fs_sections = {f"section{i}": FakeConfigSection(s) for i, s in enumerate(sections)}
return parser_sections, fs_sections
def test_ConfigFile_is_synced_structure():
""" Test that :class:`lib.config.ini.ConfigFile.is_synced_structure` is logical """
instance = ini_mod.ConfigFile("test")
sect_sizes = [2, 1, 3]
parser_sects, fs_sects = get_local_remote(sect_sizes)
# No Config
test = instance._is_synced_structure(fs_sects)
assert test is False
# Sects exist
for section in parser_sects:
instance._parser.add_section(section)
test = instance._is_synced_structure(fs_sects)
assert test is False
# Some Options missing
for section, options in parser_sects.items():
for opt, val in options.items():
instance._parser.set(section, opt, val)
break
test = instance._is_synced_structure(fs_sects)
assert test is False
# Structure matches
for section, options in parser_sects.items():
for opt, val in options.items():
instance._parser.set(section, opt, val)
test = instance._is_synced_structure(fs_sects)
assert test is True
# Extra saved section
instance._parser.add_section("text_extra_section")
test = instance._is_synced_structure(fs_sects)
assert test is False
# Structure matches
del instance._parser["text_extra_section"]
test = instance._is_synced_structure(fs_sects)
assert test is True
# Extra Option
instance._parser.set(section, "opt_test_extra_option", "val_test_extra_option")
test = instance._is_synced_structure(fs_sects)
assert test is False
def testConfigFile_format_help():
""" Test that :class:`lib.config.ini.ConfigFile.format_help` inserts # on each line """
instance = ini_mod.ConfigFile("test")
text = "This\nis a test\n\n\nof some text\n"
result = instance.format_help(text)
assert all(x.startswith("#") for x in result.splitlines() if x)
@pytest.mark.parametrize("section",
("section1", "another_section", "section_test"))
def testConfigFile_insert_section(mocker, section):
""" Test that :class:`lib.config.ini.ConfigFile._insert_section` calls correctly """
helptext = f"{section}_helptext"
instance = ini_mod.ConfigFile("test")
instance.format_help = mocker.MagicMock(return_value=helptext)
parser = instance._parser
assert section not in parser
instance._insert_section(section, helptext, parser)
instance.format_help.assert_called_once_with(helptext, is_section=True)
assert section in parser
assert helptext in parser[section]
@pytest.mark.parametrize(("section", "name", "value"),
(("section1", "opt1", "value1"),
("another_section", "my_option", "what_its_worth")))
def testConfigFile_insert_option(mocker, section, name, value):
""" Test that :class:`lib.config.ini.ConfigFile._insert_option` calls correctly """
helptext = f"{section}_helptext"
instance = ini_mod.ConfigFile("test")
instance.format_help = mocker.MagicMock(return_value=helptext)
parser = instance._parser
parser.add_section(section)
assert name not in parser[section]
instance._insert_option(section, name, helptext, value, parser)
instance.format_help.assert_called_once_with(helptext, is_section=False)
assert name in parser[section]
assert parser[section][name] == value
_ini, _app, = get_local_remote([2, 1, 3])
_ini_extra, _app_extra = get_local_remote(sections=[3, 1, 3])
_ini_value, _ = get_local_remote(sections=[2, 1, 3])
_ini_value["section0"]["opt0"] = "updated_value"
_SYNC = ((_ini, _app, "synced"),
(_ini, _app_extra, "new_from_app"),
(_ini_extra, _app, "del_from_app"),
(_ini_value, _app, "updated_ini"))
_SYNC_IDS = [x[-1] for x in _SYNC]
@pytest.mark.parametrize(("ini_config", "app_config", "status"), _SYNC, ids=_SYNC_IDS)
@pytest.mark.parametrize("exists", (True, False), ids=("exists", "not_exists"))
def testConfigFile_sync_from_app(ini_config, # pylint:disable=too-many-branches # noqa[C901]
app_config,
status,
exists,
mocker):
""" Test :class:`lib.config.ini.ConfigFile._sync_from_app` logic """
mocker.patch("lib.config.ini.ConfigFile._exists", exists)
instance = ini_mod.ConfigFile("test")
instance.save = mocker.MagicMock()
original_parser = instance._parser
if exists:
for section, opts in ini_config.items():
original_parser.add_section(section)
for name, opt in opts.items():
original_parser[section][name] = opt
opt_pairs = [({k: v.value for k, v in opts.options.items()},
dict(original_parser[s].items()))
for s, opts in app_config.items()]
# Sanity check that the loaded parser is set correctly
if status == "synced":
assert all(set(x[0]) == set(x[1]) for x in opt_pairs)
elif status == "new_from_app":
assert any(len(x[1]) < len(x[0]) for x in opt_pairs)
elif status == "new_from_ini":
assert any(len(x[0]) < len(x[1]) for x in opt_pairs)
elif status == "updated_ini":
vals = [(set(x[0].values()), set(x[1].values())) for x in opt_pairs]
assert not all(a == i for a, i in vals)
else:
for section in ini_config:
assert section not in instance._parser
instance._sync_from_app(app_config) # Sync
instance.save.assert_called_once() # Saved
if exists:
assert instance._parser is not original_parser # New config Generated
else:
assert instance._parser is original_parser # Blank Config pre-exists
opt_pairs = [({k: v.value for k, v in opts.options.items()},
{k: v for k, v in instance._parser[s].items() if k.startswith("opt")})
for s, opts in app_config.items()]
# Test options are now in sync
assert all(set(x[0]) == set(x[1]) for x in opt_pairs)
# Test that ini value kept
vals = [(set(x[0].values()), set(x[1].values())) for x in opt_pairs]
if exists and status == "updated_ini":
assert any("updated_value" in i for _, i in vals)
assert any(a != i for a, i in vals)
else:
assert not any("updated_value" in i for _, i in vals)
assert all(a == i for a, i in vals)
@pytest.mark.parametrize(("section", "option", "value", "datatype"),
(("section1", "opt_str", "test_str", str),
("section2", "opt_bool", "True", bool),
("section3", "opt_int", "42", int),
("section4", "opt_float", "42.69", float),
("section5", "opt_other", "[test_other]", str)),
ids=("str", "bool", "int", "float", "other"))
def testConfigFile_get_converted_value(section, option, value, datatype):
""" Test :class:`lib.config.ini.ConfigFile._get_converted_value` logic """
instance = ini_mod.ConfigFile("test")
instance._parser.add_section(section)
instance._parser[section][option] = value
result = instance._get_converted_value(section, option, datatype)
assert isinstance(result, datatype)
assert datatype(value) == result
_ini, _app, = get_local_remote([2, 1, 3])
_ini_changed, _ = get_local_remote(sections=[2, 1, 3])
_ini_changed["section0"]["opt0"] = "updated_value"
_ini_changed["section2"]["opt1"] = "updated_value"
_SYNC_TO = ((_ini, _app, "synced"), (_ini_changed, _app, "updated_ini"))
_SYNC__TO_IDS = [x[-1] for x in _SYNC_TO]
@pytest.mark.parametrize(("ini_config", "app_config", "status"), _SYNC_TO, ids=_SYNC__TO_IDS)
def testConfigFile_sync_to_app(ini_config, app_config, status, mocker):
""" Test :class:`lib.config.ini.ConfigFile._sync_to_app` logic """
for sect in app_config.values(): # Add a dummy datatype param to FSConfig
for opt in sect.options.values():
setattr(opt, "datatype", str)
instance = ini_mod.ConfigFile("test")
instance._get_converted_value = mocker.MagicMock(return_value="updated_value")
for section, opts in ini_config.items(): # Load up the dummy ini info
instance._parser.add_section(section)
for name, opt in opts.items():
instance._parser[section][name] = opt
instance._sync_to_app(app_config)
app_values = {sname: set(v.value for v in sect.options.values())
for sname, sect in app_config.items()}
sect_values = {sname: set(instance._parser[sname].values())
for sname in instance._parser.sections()}
if status == "synced": # No items change
instance._get_converted_value.assert_not_called()
else: # 2 items updated in the config.ini
assert instance._get_converted_value.call_count == 2
# App and ini values must now match
assert set(app_values) == set(sect_values)
for sect in app_values:
assert set(app_values[sect]) == set(sect_values[sect])
@pytest.mark.parametrize("structure_synced",
(True, False),
ids=("struc_synced", "not_struc_synced"))
@pytest.mark.parametrize("exists", (True, False), ids=("exists", "not_exists"))
def testConfigFile_sync_on_load(structure_synced, exists, mocker):
""" Test :class:`lib.config.ini.ConfigFile.on_load` logic """
mocker.patch("lib.config.ini.ConfigFile._exists", exists)
_, app_config = get_local_remote()
instance = ini_mod.ConfigFile("test")
instance._sync_from_app = mocker.MagicMock()
instance._sync_to_app = mocker.MagicMock()
instance._is_synced_structure = mocker.MagicMock(return_value=structure_synced)
instance.on_load(app_config)
instance._is_synced_structure.assert_called_once_with(app_config)
instance._sync_to_app.assert_called_once_with(app_config)
if not exists or not structure_synced:
instance._sync_from_app.assert_called_with(app_config)
call_count = 2 if (not exists and not structure_synced) else 1
else:
call_count = 0
assert instance._sync_from_app.call_count == call_count
@pytest.mark.parametrize("app_config",
(get_local_remote([2, 1, 3])[1],
get_local_remote([4, 2, 6, 8])[1],
get_local_remote([3])[1]))
def testConfigFile_sync_update_from_app(app_config, mocker):
""" Test :class:`lib.config.ini.ConfigFile.update_from_app` logic """
instance = ini_mod.ConfigFile("test")
instance.save = mocker.MagicMock()
for sect in app_config:
# Updating from app always replaces the existing parser with a new one
assert sect not in instance._parser.sections()
instance.update_from_app(app_config)
instance.save.assert_called_once()
for sect_name, sect in app_config.items():
assert sect_name in instance._parser.sections()
for opt_name, val in sect.options.items():
assert opt_name in instance._parser[sect_name]
assert instance._parser[sect_name][opt_name] == val.ini_value
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/config/ini_test.py",
"license": "GNU General Public License v3.0",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/config/objects_test.py | #! /usr/env/bin/python
""" Unit tests for lib.convert.objects """
import pytest
from lib.config.objects import ConfigItem
# pylint:disable=invalid-name
_TEST_GROUP = "TestGroup"
_TEST_INFO = "TestInfo"
_STR_CONFIG = ( # type:ignore[var-annotated]
("TestDefault", ["TestDefault", "Other"], "success-choices"),
("TestDefault", [], "success-no-choices"),
("#ffffff", "colorchooser", "success-colorchooser"),
("FailDefault", ["TestDefault", "Other"], "fail-choices"),
("TestDefault", "Invalid", "fail-invalid-choices"),
("TestDefault", "colorchooser", "fail-colorchooser"),
(1, [], "fail-int"),
(1.1, [], "fail-float"),
(True, [], "fail-bool"),
(["test", "list"], [], "fail-list"))
_STR_PARAMS = ["default", "choices", "status"]
@pytest.mark.parametrize(_STR_PARAMS, _STR_CONFIG, ids=[x[-1] for x in _STR_CONFIG])
def test_ConfigItem_str(default, choices, status):
""" Test that datatypes validate for strings and value is set correctly """
dtype = str
if status.startswith("success"):
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
choices=choices)
assert dclass.value == default.lower()
else:
with pytest.raises(ValueError):
ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
choices=choices)
_INT_CONFIG = ((10, (0, 100), 1, "success"),
(20, None, 1, "fail-min-max-missing"),
(30, (0.1, 100.1), 1, "fail-min-max-dtype"),
(35, "TestMinMax", 1, "fail-min-max-type"),
(40, (0, 100), -1, "fail-rounding-missing"),
(50, (0, 100), 1.1, "fail-rounding-dtype"),
("TestDefault", (0, 100), 1, "fail-str"),
(1.1, (0, 100), 1, "fail-float"),
(True, (0, 100), 1, "fail-bool"),
([0, 1], [0.0, 100.0], 1, "fail-list"))
_INT_PARAMS = ["default", "min_max", "rounding", "status"]
@pytest.mark.parametrize(_INT_PARAMS, _INT_CONFIG, ids=[x[-1] for x in _INT_CONFIG])
def test_ConfigItem_int(default, min_max, rounding, status):
""" Test that datatypes validate for integers and value is set correctly """
dtype = int
if status.startswith("success"):
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
min_max=min_max,
rounding=rounding)
assert dclass.value == default
else:
with pytest.raises(ValueError):
ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
min_max=min_max,
rounding=rounding)
_FLOAT_CONFIG = ((10.0, (0.0, 100.0), 1, "success"),
(20.1, None, 1, "fail-min-max-missing"),
(30.2, (1, 100), 1, "fail-min-max-dtype"),
(35.0, "TestMinMax", 1, "fail-min-max-type"),
(40.3, (0.0, 100.0), -1, "fail-rounding-missing"),
(50.4, (0.0, 100.0), 1.1, "fail-rounding-dtype"),
("TestDefault", (0.0, 100.0), 1, "fail-str"),
(1, (0.0, 100.0), 1, "fail-float"),
(True, (0.0, 100.0), 1, "fail-bool"),
([0.1, 1.2], [0.0, 100.0], 1, "fail-list"))
_FLOAT_PARAMS = ["default", "min_max", "rounding", "status"]
@pytest.mark.parametrize(_FLOAT_PARAMS, _FLOAT_CONFIG, ids=[x[-1] for x in _FLOAT_CONFIG])
def test_ConfigItem_float(default, min_max, rounding, status):
""" Test that datatypes validate for floats and value is set correctly """
dtype = float
if status.startswith("success"):
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
min_max=min_max,
rounding=rounding)
assert dclass.value == default
else:
with pytest.raises(ValueError):
ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
min_max=min_max,
rounding=rounding)
_BOOL_CONFIG = ((True, "success-true"),
(False, "success-false"),
("True", "fail-str"),
(42, "fail-int"),
(42.69, "fail-float"),
([True, False], "fail-list"))
_BOOL_PARAMS = ["default", "status"]
@pytest.mark.parametrize(_BOOL_PARAMS, _BOOL_CONFIG, ids=[x[-1] for x in _BOOL_CONFIG])
def test_ConfigItem_bool(default, status):
""" Test that datatypes validate for bool and value is set correctly """
dtype = bool
if status.startswith("success"):
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO)
assert dclass.value is default
else:
with pytest.raises(ValueError):
ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO)
_LIST_CONFIG = ( # type:ignore[var-annotated]
(["TestDefault"], ["TestDefault", "Other"], "success"),
(["TestDefault", "Fail"], ["TestDefault", "Other"], "fail-invalid-choice"),
(["TestDefault"], [], "fail-no-choices"),
([1, 2], [1, 2, 3], "fail-dtype"),
("test", ["TestDefault", "Other"], "fail-str"),
(1, ["TestDefault", "Other"], "fail-int"),
(1.1, ["TestDefault", "Other"], "fail-float"),
(True, ["TestDefault", "Other"], "fail-bool"))
_LIST_PARAMS = ["default", "choices", "status"]
@pytest.mark.parametrize(_LIST_PARAMS, _LIST_CONFIG, ids=[x[-1] for x in _LIST_CONFIG])
def test_ConfigItem_list(default, choices, status):
""" Test that datatypes validate for strings and value is set correctly """
dtype = list
if status.startswith("success"):
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
choices=choices)
assert dclass.value == [x.lower() for x in default]
else:
with pytest.raises(ValueError):
ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
choices=choices)
_REQ_CONFIG = (("TestGroup", "TestInfo", "success"),
("", "TestGroup", "fail-no-group"),
("TestGroup", "", "fail-no-info"))
_REQ_PARAMS = ["group", "info", "status"]
@pytest.mark.parametrize(_REQ_PARAMS, _REQ_CONFIG, ids=[x[-1] for x in _REQ_CONFIG])
def test_ConfigItem_missing_required(group, info, status):
""" Test that an error is raised when either group or info are not provided """
dtype = str
default = "test"
if status.startswith("success"):
dclass = ConfigItem(datatype=dtype,
default=default,
group=group,
info=info)
assert dclass.group == group
assert dclass.info == info
assert isinstance(dclass.helptext, str) and dclass.helptext
assert dclass.name == ""
else:
with pytest.raises(ValueError):
ConfigItem(datatype=dtype,
default=default,
group=group,
info=info)
_NAME_CONFIG = (("TestName", "success"),
("", "fail-no-name"),
(100, "fail-dtype"))
@pytest.mark.parametrize(("name", "status"), _NAME_CONFIG, ids=[x[-1] for x in _NAME_CONFIG])
def test_ConfigItem_set_name(name, status):
""" Test that setting the config item's name functions correctly """
dtype = str
default = "test"
dclass = ConfigItem(datatype=dtype,
default=default,
group="TestGroup",
info="TestInfo")
if status.startswith("success"):
dclass.set_name(name)
assert dclass.name == name
else:
with pytest.raises(AssertionError):
dclass.set_name(name)
_STR_SET_CONFIG = ( # type:ignore[var-annotated]
("NewValue", ["TestDefault", "NewValue"], "success-choices"),
("NoValue", ["TestDefault", "NewValue"], "success-fallback"),
("NewValue", [], "success-no-choices"),
("#AAAAAA", "colorchooser", "success-colorchooser"),
("NewValue", "colorchooser", "fail-colorchooser"),
(1, [], "fail-int"),
(1.1, [], "fail-float"),
(True, [], "fail-bool"),
(["test", "list"], [], "fail-list"))
_STR_SET_PARAMS = ("value", "choices", "status")
@pytest.mark.parametrize(_STR_SET_PARAMS, _STR_SET_CONFIG, ids=[x[-1] for x in _STR_SET_CONFIG])
def test_ConfigItem_set_str(value, choices, status):
""" Test that strings validate and set correctly """
default = "#ffffff" if choices == "colorchooser" else "TestDefault"
dtype = str
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
choices=choices)
with pytest.raises(ValueError): # Confirm setting fails when name not set
dclass.set(value)
dclass.set_name("TestName")
if status.startswith("success"):
dclass.set(value)
if status == "success-fallback":
assert dclass.value == dclass() == dclass.get() == dclass.default.lower()
else:
assert dclass.value == dclass() == dclass.get() == value.lower()
else:
with pytest.raises(ValueError):
dclass.set(value)
_INT_SET_CONFIG = ((10, "success"),
("Test", "fail-str"),
(1.1, "fail-float"),
(["test", "list"], "fail-list"))
_INT_SET_PARAMS = ("value", "status")
@pytest.mark.parametrize(_INT_SET_PARAMS, _INT_SET_CONFIG, ids=[x[-1] for x in _INT_SET_CONFIG])
def test_ConfigItem_set_int(value, status):
""" Test that ints validate and set correctly """
default = 20
dtype = int
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
min_max=(0, 10),
rounding=1)
with pytest.raises(ValueError): # Confirm setting fails when name not set
dclass.set(value)
dclass.set_name("TestName")
if status.startswith("success"):
dclass.set(value)
assert dclass.value == dclass() == dclass.get() == value
else:
with pytest.raises(ValueError):
dclass.set(value)
_FLOAT_SET_CONFIG = ((69.42, "success"),
("Test", "fail-str"),
(42, "fail-int"),
(True, "fail-bool"),
(["test", "list"], "fail-list"))
_FLOAT_SET_PARAMS = ("value", "status")
@pytest.mark.parametrize(_FLOAT_SET_PARAMS,
_FLOAT_SET_CONFIG,
ids=[x[-1] for x in _FLOAT_SET_CONFIG])
def test_ConfigItem_set_float(value, status):
""" Test that floats validate and set correctly """
default = 20.025
dtype = float
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
min_max=(0.0, 100.0),
rounding=1)
with pytest.raises(ValueError): # Confirm setting fails when name not set
dclass.set(value)
dclass.set_name("TestName")
if status.startswith("success"):
dclass.set(value)
assert dclass.value == dclass() == dclass.get() == value
else:
with pytest.raises(ValueError):
dclass.set(value)
_BOOL_SET_CONFIG = ((True, "success-true"),
(False, "success-false"),
("Test", "fail-str"),
(42, "fail-int"),
(42.69, "fail-float"),
(["test", "list"], "fail-list"))
_BOOL_SET_PARAMS = ("value", "status")
@pytest.mark.parametrize(_BOOL_SET_PARAMS, _BOOL_SET_CONFIG, ids=[x[-1] for x in _BOOL_SET_CONFIG])
def test_ConfigItem_set_bool(value, status):
""" Test that bools validate and set correctly """
default = True
dtype = bool
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO)
with pytest.raises(ValueError): # Confirm setting fails when name not set
dclass.set(value)
dclass.set_name("TestName")
if status.startswith("success"):
dclass.set(value)
assert dclass.value == dclass() == dclass.get() == value
else:
with pytest.raises(ValueError):
dclass.set(value)
_LIST_SET_CONFIG = ((["NewValue"], "success-choices"),
("NewValue, TestDefault", "success-delim-comma"),
("NewValue TestDefault", "success-delim-space"),
("NewValue", "success-delim-1value"),
(["NoValue"], "success-fallback1"),
(["NewValue", "NoValue"], "success-fallback2"),
("NewValue, NoValue", "success-fallback-delim-comma"),
("NewValue NoValue", "success-fallback-delim-space"),
("NoValue", "success-fallback-delim-1value"),
(1, "fail-int"),
(1.1, "fail-float"),
(True, "fail-bool"))
_LIST_SET_PARAMS = ("value", "status")
@pytest.mark.parametrize(_LIST_SET_PARAMS, _LIST_SET_CONFIG, ids=[x[-1] for x in _LIST_SET_CONFIG])
def test_ConfigItem_set_list(value, status):
""" Test that lists validate and set correctly """
default = ["TestDefault"]
choices = ["TestDefault", "NewValue"]
dtype = list
dclass = ConfigItem(datatype=dtype,
default=default,
group=_TEST_GROUP,
info=_TEST_INFO,
choices=choices)
with pytest.raises(ValueError): # Confirm setting fails when name not set
dclass.set(value)
dclass.set_name("TestName")
if status.startswith("success"):
dclass.set(value)
if not isinstance(value, list):
value = [x.strip() for x in value.split(",")] if "," in value else value.split()
assert dclass.value == dclass() == dclass.get()
expected = [x.lower() for x in value]
if status.startswith("success-fallback"):
expected = [x.lower() for x in value if x in choices]
if not expected:
expected = [x.lower() for x in default]
assert set(expected) == set(dclass.value)
else:
with pytest.raises(ValueError):
dclass.set(value)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/config/objects_test.py",
"license": "GNU General Public License v3.0",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/gui/stats/moving_average_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.gui.stats.moving_average` """
import numpy as np
import pytest
from lib.gui.analysis.moving_average import ExponentialMovingAverage as EMA
# pylint:disable=[protected-access,invalid-name]
_INIT_PARAMS = ((np.array([1, 2, 3], dtype="float32"), 0.0),
(np.array([4, 5, 6], dtype="float64"), 0.25),
(np.array([7, 8, 9], dtype="uint8"), 1.0),
(np.array([0, np.nan, 1], dtype="float32"), 0.74),
(np.array([2, 3, np.inf], dtype="float32"), 0.33),
(np.array([4, 5, 6], dtype="float32"), -1.0),
(np.array([7, 8, 9], dtype="float32"), 99.0))
_INIT_IDS = ["float32", "float64", "uint8", "nan", "inf", "amount:-1", "amount:99"]
@pytest.mark.parametrize(("data", "amount"), _INIT_PARAMS, ids=_INIT_IDS)
def test_ExponentialMovingAverage_init(data: np.ndarray, amount: float):
""" Test that moving_average.MovingAverage correctly initializes """
attrs = {"_data": np.ndarray,
"_alpha": float,
"_dtype": str,
"_row_size": int,
"_out": np.ndarray}
instance = EMA(data, amount)
# Verify required attributes exist and are of the correct type
for attr, attr_type in attrs.items():
assert attr in instance.__dict__
assert isinstance(getattr(instance, attr), attr_type)
# Verify we are testing all existing attributes
for key in instance.__dict__:
assert key in attrs
# Verify numeric sanitization
assert not np.any(np.isnan(instance._data))
assert not np.any(np.isinf(instance._data))
# Check alpha clamp logic
expected_alpha = 1. - min(0.999, max(0.001, amount))
assert instance._alpha == expected_alpha
# dtype assignment logic
expected_dtype = "float32" if data.dtype == np.float32 else "float64"
assert instance._dtype == expected_dtype
# ensure row size is positive and output matches shape and dtype
assert instance._row_size > 0
assert instance._out.shape == data.shape
assert instance._out.dtype == expected_dtype
def naive_ewma(data: np.ndarray, alpha: float) -> np.ndarray:
""" A simple ewma implementation to test for correctness """
out = np.empty_like(data, dtype=data.dtype)
out[0] = data[0]
for i in range(1, len(data)):
out[i] = alpha * data[i] + (1 - alpha) * out[i - 1]
return out
@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.25, 0.33, 0.5, 0.66, 0.75, 0.90, 0.999])
@pytest.mark.parametrize("dtype", ("float32", "float64"))
def test_ExponentialMovingAverage_matches_naive(alpha: float, dtype: str) -> None:
""" Make sure that we get sane results out for various data sizes against our reference
for various amounts """
rows = max(5, int(np.random.random() * 25000))
data = np.random.rand(rows).astype(dtype)
instance = EMA(data, 1 - alpha)
out = instance()
ref = naive_ewma(data, alpha)
np.testing.assert_allclose(out, ref, rtol=3e-6, atol=3e-6)
@pytest.mark.parametrize("dtype", ("float32", "float64"))
def test_ExponentialMovingAverage_small_data(dtype: str) -> None:
""" Make sure we get sane results out of our small path """
data = np.array([1., 2., 3.], dtype=dtype)
instance = EMA(data, 0.5)
out = instance()
ref = naive_ewma(data, instance._alpha)
np.testing.assert_allclose(out, ref)
@pytest.mark.parametrize("dtype", ("float32", "float64"))
def test_ExponentialMovingAverage_large_data_safe_path(dtype: str) -> None:
""" Make sure we get sane results out of our safe path """
data = np.random.rand(50000).astype(dtype)
instance = EMA(data, 0.1)
# Force safe path
instance._row_size = 10
out = instance()
ref = naive_ewma(data, instance._alpha)
np.testing.assert_allclose(out, ref, rtol=1e-6, atol=1e-6)
@pytest.mark.parametrize("dtype", ("float32", "float64"))
def test_ExponentialMovingAverage_empty_input(dtype: str) -> None:
""" Test that we get no data on an empty input """
data = np.array([], dtype=dtype)
instance = EMA(data, 0.5)
out = instance()
assert out.size == 0
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/gui/stats/moving_average_test.py",
"license": "GNU General Public License v3.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/model/losses/feature_loss_test.py | #!/usr/bin/env python3
""" Tests for Faceswap Feature Losses. Adapted from Keras tests. """
import pytest
import numpy as np
from keras import device, Variable
# pylint:disable=import-error
from lib.model.losses.feature_loss import LPIPSLoss
from lib.utils import get_backend
_NETS = ("alex", "squeeze", "vgg16")
_IDS = [f"LPIPS_{x}[{get_backend().upper()}]" for x in _NETS]
@pytest.mark.parametrize("net", _NETS, ids=_IDS)
def test_loss_output(net):
""" Basic dtype and value tests for loss functions. """
with device("cpu"):
y_a = Variable(np.random.random((2, 32, 32, 3)))
y_b = Variable(np.random.random((2, 32, 32, 3)))
objective_output = LPIPSLoss(net)(y_a, y_b)
output = objective_output.detach().numpy() # type:ignore
assert output.dtype == "float32" and not np.any(np.isnan(output))
assert output < 0.1 # LPIPS loss is reduced 10x
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/model/losses/feature_loss_test.py",
"license": "GNU General Public License v3.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/model/losses/loss_test.py | #!/usr/bin/env python3
""" Tests for Faceswap Losses.
Adapted from Keras tests.
"""
import pytest
import numpy as np
from keras import device, losses as k_losses, Variable
from lib.model.losses.loss import (FocalFrequencyLoss, GeneralizedLoss, GradientLoss,
LaplacianPyramidLoss, LInfNorm, LossWrapper)
from lib.model.losses.feature_loss import LPIPSLoss
from lib.model.losses.perceptual_loss import DSSIMObjective, GMSDLoss, LDRFLIPLoss, MSSIMLoss
from lib.utils import get_backend
_PARAMS = ((FocalFrequencyLoss, 1.0),
(GeneralizedLoss, 1.0),
(GradientLoss, 200.0),
(LaplacianPyramidLoss, 1.0),
(LInfNorm, 1.0))
_IDS = [f"{x[0].__name__}[{get_backend().upper()}]" for x in _PARAMS]
@pytest.mark.parametrize(["loss_func", "max_target"], _PARAMS, ids=_IDS)
def test_loss_output(loss_func, max_target):
""" Basic dtype and value tests for loss functions. """
with device("cpu"):
y_a = Variable(np.random.random((2, 32, 32, 3)))
y_b = Variable(np.random.random((2, 32, 32, 3)))
objective_output = loss_func()(y_a, y_b)
output = objective_output.detach().numpy()
assert output.dtype == "float32" and not np.any(np.isnan(output))
assert output < max_target
_LWPARAMS = [(FocalFrequencyLoss, ()),
(GeneralizedLoss, ()),
(GradientLoss, ()),
(LaplacianPyramidLoss, ()),
(LInfNorm, ()),
(LPIPSLoss, ("squeeze", )),
(DSSIMObjective, ()),
(GMSDLoss, ()),
(LDRFLIPLoss, ()),
(MSSIMLoss, ()),
(k_losses.LogCosh, ()),
(k_losses.MeanAbsoluteError, ()),
(k_losses.MeanSquaredError, ())]
_LWIDS = [f"{x[0].__name__}[{get_backend().upper()}]" for x in _LWPARAMS]
@pytest.mark.parametrize(["loss_func", "func_args"], _LWPARAMS, ids=_LWIDS)
def test_loss_wrapper(loss_func, func_args):
""" Test penalized loss wrapper works as expected """
with device("cpu"):
p_loss = LossWrapper()
p_loss.add_loss(loss_func(*func_args), 1.0, -1)
p_loss.add_loss(k_losses.MeanSquaredError(), 2.0, 3)
y_a = Variable(np.random.random((2, 32, 32, 4)))
y_b = Variable(np.random.random((2, 32, 32, 3)))
output = p_loss(y_a, y_b)
output = output.detach().numpy() # type:ignore
assert output.dtype == "float32" and not np.any(np.isnan(output))
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/model/losses/loss_test.py",
"license": "GNU General Public License v3.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/model/losses/perceptual_loss_test.py | #!/usr/bin/env python3
""" Tests for Faceswap Feature Losses. Adapted from Keras tests. """
import pytest
import numpy as np
from keras import device, Variable
# pylint:disable=import-error
from lib.model.losses.perceptual_loss import DSSIMObjective, GMSDLoss, LDRFLIPLoss, MSSIMLoss
from lib.utils import get_backend
_PARAMS = [DSSIMObjective, GMSDLoss, LDRFLIPLoss, MSSIMLoss]
_IDS = [f"{x.__name__}[{get_backend().upper()}]" for x in _PARAMS]
@pytest.mark.parametrize("loss_func", _PARAMS, ids=_IDS)
def test_loss_output(loss_func):
""" Basic dtype and value tests for loss functions. """
with device("cpu"):
y_a = Variable(np.random.random((2, 32, 32, 3)))
y_b = Variable(np.random.random((2, 32, 32, 3)))
objective_output = loss_func()(y_a, y_b)
output = objective_output.detach().numpy() # type:ignore
assert output.dtype == "float32" and not np.any(np.isnan(output))
assert output < 1.0
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/model/losses/perceptual_loss_test.py",
"license": "GNU General Public License v3.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/system/sysinfo_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.system.sysinfo` """
import platform
import typing as T
from collections import namedtuple
from io import StringIO
from unittest.mock import MagicMock
import pytest
import pytest_mock
# pylint:disable=import-error
from lib.gpu_stats import GPUInfo
from lib.system.sysinfo import _Configs, _State, _SysInfo, get_sysinfo
from lib.system import Cuda, Packages, ROCm, System
# pylint:disable=protected-access
# _SysInfo
@pytest.fixture(name="sys_info_instance")
def sys_info_fixture() -> _SysInfo:
""" Single :class:`~lib.system.sysinfo._SysInfo` object for tests """
return _SysInfo()
def test_init(sys_info_instance: _SysInfo) -> None:
""" Test :class:`lib.system.sysinfo._SysInfo` __init__ and attributes """
assert isinstance(sys_info_instance, _SysInfo)
attrs = ["_state_file", "_configs", "_system",
"_python", "_packages", "_gpu", "_cuda", "_rocm"]
assert all(a in sys_info_instance.__dict__ for a in attrs)
assert all(a in attrs for a in sys_info_instance.__dict__)
assert isinstance(sys_info_instance._state_file, str)
assert isinstance(sys_info_instance._configs, str)
assert isinstance(sys_info_instance._system, System)
assert isinstance(sys_info_instance._python, dict)
assert sys_info_instance._python == {"implementation": platform.python_implementation(),
"version": platform.python_version()}
assert isinstance(sys_info_instance._packages, Packages)
assert isinstance(sys_info_instance._gpu, GPUInfo)
assert isinstance(sys_info_instance._cuda, Cuda)
assert isinstance(sys_info_instance._rocm, ROCm)
def test_properties(sys_info_instance: _SysInfo) -> None:
""" Test :class:`lib.system.sysinfo._SysInfo` properties """
ints = ["_ram_free", "_ram_total", "_ram_available", "_ram_used"]
strs = ["_fs_command", "_conda_version", "_git_commits", "_cuda_versions",
"_cuda_version", "_cudnn_versions", "_rocm_version", "_rocm_versions"]
for prop in ints:
assert hasattr(sys_info_instance, prop), f"sysinfo missing property '{prop}'"
assert isinstance(getattr(sys_info_instance, prop),
int), f"sysinfo property '{prop}' not int"
for prop in strs:
assert hasattr(sys_info_instance, prop), f"sysinfo missing property '{prop}'"
assert isinstance(getattr(sys_info_instance, prop),
str), f"sysinfo property '{prop}' not str"
def test_get_gpu_info(sys_info_instance: _SysInfo) -> None:
""" Test _get_gpu_info method of :class:`lib.system.sysinfo._SysInfo` returns as expected """
assert hasattr(sys_info_instance, "_get_gpu_info")
gpu_info = sys_info_instance._get_gpu_info()
assert isinstance(gpu_info, GPUInfo)
def test__format_ram(sys_info_instance: _SysInfo, monkeypatch: pytest.MonkeyPatch) -> None:
""" Test the _format_ram method of :class:`lib.system.sysinfo._SysInfo` """
assert hasattr(sys_info_instance, "_format_ram")
svmem = namedtuple("svmem", ["available", "free", "total", "used"])
data = svmem(12345678, 1234567, 123456789, 123456)
monkeypatch.setattr("psutil.virtual_memory", lambda *args, **kwargs: data)
ram_info = sys_info_instance._format_ram()
assert isinstance(ram_info, str)
assert ram_info == "Total: 117MB, Available: 11MB, Used: 0MB, Free: 1MB"
def test_full_info(sys_info_instance: _SysInfo) -> None:
""" Test the full_info method of :class:`lib.system.sysinfo._SysInfo` returns as expected """
assert hasattr(sys_info_instance, "full_info")
sys_info = sys_info_instance.full_info()
assert isinstance(sys_info, str)
sections = ["System Information", "Pip Packages", "Configs"]
for section in sections:
assert section in sys_info, f"Section {section} not in full_info"
if sys_info_instance._system.is_conda:
assert "Conda Packages" in sys_info
else:
assert "Conda Packages" not in sys_info
keys = ["backend", "os_platform", "os_machine", "os_release", "py_conda_version",
"py_implementation", "py_version", "py_command", "py_virtual_env", "sys_cores",
"sys_processor", "sys_ram", "encoding", "git_branch", "git_commits",
"gpu_cuda_versions", "gpu_cuda", "gpu_cudnn", "gpu_rocm_versions", "gpu_rocm_version",
"gpu_driver", "gpu_devices", "gpu_vram", "gpu_devices_active"]
for key in keys:
assert f"{key}:" in sys_info, f"'{key}:' not in full_info"
# get_sys_info
def test_get_sys_info(mocker: pytest_mock.MockerFixture) -> None:
""" Thest that the :func:`~lib.utils.sysinfo.get_sysinfo` function executes correctly """
sys_info = get_sysinfo()
assert isinstance(sys_info, str)
full_info = mocker.patch("lib.system.sysinfo._SysInfo.full_info")
get_sysinfo()
assert full_info.called
# _Configs
@pytest.fixture(name="configs_instance")
def configs_fixture():
""" Pytest fixture for :class:`~lib.utils.sysinfo._Configs` """
return _Configs()
def test__configs__init__(configs_instance: _Configs) -> None:
""" Test __init__ and attributes for :class:`~lib.utils.sysinfo._Configs` """
assert hasattr(configs_instance, "config_dir")
assert isinstance(configs_instance.config_dir, str)
assert hasattr(configs_instance, "configs")
assert isinstance(configs_instance.configs, str)
def test__configs__get_configs(configs_instance: _Configs) -> None:
""" Test __init__ and attributes for :class:`~lib.utils.sysinfo._Configs` """
assert hasattr(configs_instance, "_get_configs")
assert isinstance(configs_instance._get_configs(), str)
def test__configs__parse_configs(configs_instance: _Configs,
mocker: pytest_mock.MockerFixture) -> None:
""" Test _parse_configs function for :class:`~lib.utils.sysinfo._Configs` """
assert hasattr(configs_instance, "_parse_configs")
assert isinstance(configs_instance._parse_configs([]), str)
configs_instance._parse_ini = T.cast(MagicMock, mocker.MagicMock()) # type:ignore
configs_instance._parse_json = T.cast(MagicMock, mocker.MagicMock()) # type:ignore
configs_instance._parse_configs(config_files=["test.ini", ".faceswap"])
assert configs_instance._parse_ini.called
assert configs_instance._parse_json.called
def test__configs__parse_ini(configs_instance: _Configs,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test _parse_ini function for :class:`~lib.utils.sysinfo._Configs` """
assert hasattr(configs_instance, "_parse_ini")
file = ("[test.ini_header]\n"
"# Test Header\n\n"
"param = value")
monkeypatch.setattr("builtins.open", lambda *args, **kwargs: StringIO(file))
converted = configs_instance._parse_ini("test.ini")
assert isinstance(converted, str)
assert converted == ("\n[test.ini_header]\n"
"param: value\n")
def test__configs__parse_json(configs_instance: _Configs,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test _parse_json function for :class:`~lib.utils.sysinfo._Configs` """
assert hasattr(configs_instance, "_parse_json")
file = '{"test": "param"}'
monkeypatch.setattr("builtins.open", lambda *args, **kwargs: StringIO(file))
converted = configs_instance._parse_json(".file")
assert isinstance(converted, str)
assert converted == ("test: param\n")
def test__configs__format_text(configs_instance: _Configs) -> None:
""" Test _format_text function for :class:`~lib.utils.sysinfo._Configs` """
assert hasattr(configs_instance, "_format_text")
key, val = " test_key ", "test_val "
formatted = configs_instance._format_text(key, val)
assert isinstance(formatted, str)
assert formatted == "test_key: test_val\n"
# _State
@pytest.fixture(name="state_instance")
def state_fixture():
""" Pytest fixture for :class:`~lib.utils.sysinfo._State` """
return _State()
def test__state__init__(state_instance: _State) -> None:
""" Test __init__ and attributes for :class:`~lib.utils.sysinfo._State` """
assert hasattr(state_instance, "_model_dir")
assert state_instance._model_dir is None
assert hasattr(state_instance, "_trainer")
assert state_instance._trainer is None
assert hasattr(state_instance, "state_file")
assert isinstance(state_instance.state_file, str)
def test__state__is_training(state_instance: _State,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test _is_training function for :class:`~lib.utils.sysinfo._State` """
assert hasattr(state_instance, "_is_training")
assert isinstance(state_instance._is_training, bool)
assert not state_instance._is_training
monkeypatch.setattr("sys.argv", ["faceswap.py", "train"])
assert state_instance._is_training
monkeypatch.setattr("sys.argv", ["faceswap.py", "extract"])
assert not state_instance._is_training
def test__state__get_arg(state_instance: _State,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test _get_arg function for :class:`~lib.utils.sysinfo._State` """
assert hasattr(state_instance, "_get_arg")
assert state_instance._get_arg("-t", "--test_arg") is None
monkeypatch.setattr("sys.argv", ["test", "command", "-t", "test_option"])
assert state_instance._get_arg("-t", "--test_arg") == "test_option"
def test__state__get_state_file(state_instance: _State,
mocker: pytest_mock.MockerFixture,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test _get_state_file function for :class:`~lib.utils.sysinfo._State` """
assert hasattr(state_instance, "_get_state_file")
assert isinstance(state_instance._get_state_file(), str)
mock_is_training = mocker.patch("lib.system.sysinfo._State._is_training")
# Not training or missing training arguments
mock_is_training.return_value = False
assert state_instance._get_state_file() == ""
mock_is_training.return_value = False
monkeypatch.setattr(state_instance, "_model_dir", None)
assert state_instance._get_state_file() == ""
monkeypatch.setattr(state_instance, "_model_dir", "test_dir")
monkeypatch.setattr(state_instance, "_trainer", None)
assert state_instance._get_state_file() == ""
monkeypatch.setattr(state_instance, "_trainer", "test_trainer")
# Training but file not found
assert state_instance._get_state_file() == ""
# State file is just a json dump
file = ('{\n'
' "test": "json",\n'
'}')
monkeypatch.setattr("os.path.isfile", lambda *args, **kwargs: True)
monkeypatch.setattr("builtins.open", lambda *args, **kwargs: StringIO(file))
assert state_instance._get_state_file().endswith(file)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/system/sysinfo_test.py",
"license": "GNU General Public License v3.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/system/system_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.system.system` """
import ctypes
import locale
import os
import platform
import sys
import pytest
import pytest_mock
# pylint:disable=import-error
import lib.system.system as system_mod
from lib.system.system import _lines_from_command, VALID_PYTHON, Packages, System
# pylint:disable=protected-access
def test_valid_python() -> None:
""" Confirm python version has a min and max and that it is Python 3 """
assert len(VALID_PYTHON) == 2
assert all(len(v) == 2 for v in VALID_PYTHON)
assert all(isinstance(x, int) for v in VALID_PYTHON for x in v)
assert all(v[0] == 3 for v in VALID_PYTHON)
assert VALID_PYTHON[0] <= VALID_PYTHON[1]
def test_lines_from_command(mocker: pytest_mock.MockerFixture) -> None:
""" Confirm lines from command executes as expected """
input_ = ["test", "input"]
subproc_out = " this \nis\n test\noutput \n"
mock_run = mocker.patch("lib.system.system.run")
mock_run.return_value.stdout = subproc_out
result = _lines_from_command(input_)
assert mock_run.called
assert result == subproc_out.splitlines()
# System
@pytest.fixture(name="system_instance")
def system_fixture() -> System:
""" Single :class:`lib.system.System` object for tests """
return System()
def test_system_init(system_instance: System) -> None:
""" Test :class:`lib.system.System` __init__ and attributes """
assert isinstance(system_instance, System)
attrs = ["platform", "system", "machine", "release", "processor", "cpu_count",
"python_implementation", "python_version", "python_architecture", "encoding",
"is_conda", "is_admin", "is_virtual_env"]
assert all(a in system_instance.__dict__ for a in attrs)
assert all(a in attrs for a in system_instance.__dict__)
assert system_instance.platform == platform.platform()
assert system_instance.system == platform.system().lower()
assert system_instance.machine == platform.machine()
assert system_instance.release == platform.release()
assert system_instance.processor == platform.processor()
assert system_instance.cpu_count == os.cpu_count()
assert system_instance.python_implementation == platform.python_implementation()
assert system_instance.python_version == platform.python_version()
assert system_instance.python_architecture == platform.architecture()[0]
assert system_instance.encoding == locale.getpreferredencoding()
assert system_instance.is_conda == ("conda" in sys.version.lower() or
os.path.exists(os.path.join(sys.prefix, "conda-meta")))
assert isinstance(system_instance.is_admin, bool)
assert isinstance(system_instance.is_virtual_env, bool)
def test_system_properties(system_instance: System) -> None:
""" Test :class:`lib.system.System` properties """
assert hasattr(system_instance, "is_linux")
assert isinstance(system_instance.is_linux, bool)
if platform.system().lower() == "linux":
assert system_instance.is_linux
assert not system_instance.is_macos
assert not system_instance.is_windows
assert hasattr(system_instance, "is_macos")
assert isinstance(system_instance.is_macos, bool)
if platform.system().lower() == "darwin":
assert system_instance.is_macos
assert not system_instance.is_linux
assert not system_instance.is_windows
assert hasattr(system_instance, "is_windows")
assert isinstance(system_instance.is_windows, bool)
if platform.system().lower() == "windows":
assert system_instance.is_windows
assert not system_instance.is_linux
assert not system_instance.is_macos
def test_system_get_permissions(system_instance: System) -> None:
""" Test :class:`lib.system.System` _get_permissions method """
assert hasattr(system_instance, "_get_permissions")
is_admin = system_instance._get_permissions()
if platform.system() == "Windows":
assert is_admin == (ctypes.windll.shell32.IsUserAnAdmin() != 0) # type:ignore
else:
assert is_admin == (os.getuid() == 0) # type:ignore # pylint:disable=no-member
def test_system_check_virtual_env(system_instance: System,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test :class:`lib.system.System` _check_virtual_env method """
system_instance.is_conda = True
monkeypatch.setattr(system_mod.sys, "prefix", "/home/user/miniconda3/envs/testenv")
assert system_instance._check_virtual_env()
monkeypatch.setattr(system_mod.sys, "prefix", "/home/user/miniconda3/bin/")
assert not system_instance._check_virtual_env()
system_instance.is_conda = False
monkeypatch.setattr(system_mod.sys, "base_prefix", "/home/user/venv/")
monkeypatch.setattr(system_mod.sys, "prefix", "/usr/bin/")
assert system_instance._check_virtual_env()
monkeypatch.setattr(system_mod.sys, "base_prefix", "/usr/bin/")
assert not system_instance._check_virtual_env()
def test_system_validate_python(system_instance: System,
monkeypatch: pytest.MonkeyPatch,
mocker: pytest_mock.MockerFixture) -> None:
""" Test :class:`lib.system.System` _validate_python method """
monkeypatch.setattr(system_mod, "VALID_PYTHON", (((3, 11), (3, 13))))
monkeypatch.setattr(system_mod.sys, "version_info", (3, 12, 0))
monkeypatch.setattr("builtins.input", lambda _: "")
system_instance.python_architecture = "64bit"
assert system_instance.validate_python()
assert system_instance.validate_python(max_version=(3, 12))
sys_exit = mocker.patch("lib.system.system.sys.exit")
system_instance.python_architecture = "32bit"
system_instance.validate_python()
assert sys_exit.called
system_instance.python_architecture = "64bit"
system_instance.validate_python(max_version=(3, 11))
assert sys_exit.called
for vers in ((3, 10, 0), (3, 14, 0)):
monkeypatch.setattr(system_mod.sys, "version_info", vers)
system_instance.validate_python()
assert sys_exit.called
@pytest.mark.parametrize("system_name, machine, is_conda, should_exit", [
("other", "x86_64", False, True), # Unsupported OS
("darwin", "arm64", True, False), # Apple Silicon inside conda
("darwin", "arm64", False, True), # Apple Silicon outside conda
("linux", "x86_64", True, False), # Supported
("windows", "x86_64", True, False), # Supported
])
def test_system_validate(system_instance: System,
mocker: pytest_mock.MockerFixture,
system_name,
machine,
is_conda,
should_exit) -> None:
""" Test :class:`lib.system.System` _validate method """
validate_python = mocker.patch("lib.system.System.validate_python")
system_instance.system = system_name
system_instance.machine = machine
system_instance.is_conda = is_conda
sys_exit = mocker.patch("lib.system.system.sys.exit")
system_instance.validate()
if should_exit:
assert sys_exit.called
else:
assert not sys_exit.called
assert validate_python.called
# Packages
@pytest.fixture(name="packages_instance")
def packages_fixture() -> Packages:
""" Single :class:`lib.system.Packages` object for tests """
return Packages()
def test_packages_init(packages_instance: Packages, mocker: pytest_mock.MockerFixture) -> None:
""" Test :class:`lib.system.Packages` __init__ and attributes """
assert isinstance(packages_instance, Packages)
attrs = ["_conda_exe", "_installed_python", "_installed_conda"]
assert all(a in packages_instance.__dict__ for a in attrs)
assert all(a in attrs for a in packages_instance.__dict__)
assert isinstance(packages_instance._conda_exe,
str) or packages_instance._conda_exe is None
assert isinstance(packages_instance._installed_python, dict)
assert isinstance(packages_instance._installed_conda,
list) or packages_instance._installed_conda is None
which = mocker.patch("lib.system.system.which")
Packages()
which.assert_called_once_with("conda")
def test_packages_properties(packages_instance: Packages) -> None:
""" Test :class:`lib.system.Packages` properties """
for prop in ("installed_python", "installed_conda"):
assert hasattr(packages_instance, prop)
assert isinstance(getattr(packages_instance, prop), dict)
pretty = f"{prop}_pretty"
assert hasattr(packages_instance, pretty)
assert isinstance(getattr(packages_instance, pretty), str)
def test_packages_get_installed_python(packages_instance: Packages,
mocker: pytest_mock.MockerFixture,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test :class:`lib.system.Packages` get_installed_python method """
lines_from_command = mocker.patch("lib.system.system._lines_from_command")
monkeypatch.setattr(system_mod.sys, "executable", "python")
out = packages_instance._get_installed_python()
lines_from_command.assert_called_once_with(["python", "-m", "pip", "freeze", "--local"])
assert isinstance(out, dict)
monkeypatch.setattr(system_mod, "_lines_from_command", lambda _: ["pacKage1==1.0.0",
"PACKAGE2==1.1.0",
"# Ignored",
"malformed=1.2.3",
"package3==0.2.1"])
out = packages_instance._get_installed_python()
assert out == {"package1": "1.0.0", "package2": "1.1.0", "package3": "0.2.1"}
def test_packages_get_installed_conda(packages_instance: Packages,
mocker: pytest_mock.MockerFixture,
monkeypatch: pytest.MonkeyPatch) -> None:
""" Test :class:`lib.system.Packages` get_installed_conda method """
packages_instance._conda_exe = None
packages_instance._installed_conda = None
packages_instance._get_installed_conda()
assert packages_instance._installed_conda is None
packages_instance._conda_exe = "conda"
lines_from_command = mocker.patch("lib.system.system._lines_from_command")
packages_instance._get_installed_conda()
lines_from_command.assert_called_once_with(["conda", "list", "--show-channel-urls"])
monkeypatch.setattr(system_mod, "_lines_from_command", lambda _: [])
packages_instance._get_installed_conda()
assert packages_instance._installed_conda == ["Could not get Conda package list"]
_pkgs = [
"package1 4.15.0 pypi_0 pypi",
"pkg2 2025b h78e105d_0 conda-forge",
"Packag3 3.1.3 pypi_0 defaults"]
monkeypatch.setattr(system_mod, "_lines_from_command", lambda _: _pkgs)
packages_instance._get_installed_conda()
assert packages_instance._installed_conda == _pkgs
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/system/system_test.py",
"license": "GNU General Public License v3.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/training/augmentation_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.training.augmentation` """
import typing as T
import cv2
import numpy as np
import pytest
import pytest_mock
from lib.config import ConfigValueType
from lib.training.augmentation import (ConstantsAugmentation, ConstantsColor, ConstantsTransform,
ConstantsWarp, ImageAugmentation)
from plugins.train.trainer import trainer_config as cfg
# pylint:disable=unused-import
from tests.lib.config.helpers import patch_config # noqa[F401]
# pylint:disable=protected-access,redefined-outer-name
MODULE_PREFIX = "lib.training.augmentation"
# CONSTANTS #
_CLAHE_CONF = (({"color_clahe_chance": 12, "color_clahe_max_size": 2}, 64),
({"color_clahe_chance": 25, "color_clahe_max_size": 4}, 128),
({"color_clahe_chance": 50, "color_clahe_max_size": 6}, 256),
({"color_clahe_chance": 75, "color_clahe_max_size": 8}, 384))
@pytest.mark.parametrize(("config", "size"), _CLAHE_CONF, ids=[x[-1] for x in _CLAHE_CONF])
def test_constants_get_clahe(config: dict[str, T.Any],
size: int,
patch_config) -> None: # noqa[F811]
""" Test ConstantsAugmentation._get_clahe works as expected """
patch_config(cfg, config)
contrast, chance, max_size = ConstantsAugmentation._get_clahe(size)
assert isinstance(contrast, int)
assert isinstance(chance, float)
assert isinstance(max_size, int)
assert contrast == max(2, size // 128)
assert chance == config["color_clahe_chance"] / 100.
assert max_size == config["color_clahe_max_size"]
_LAB_CONF = ({"color_lightness": 30, "color_ab": 8},
{"color_lightness": 8, "color_ab": 25},
{"color_lightness": 63, "color_ab": 12})
@pytest.mark.parametrize(("config"), _LAB_CONF)
def test_constants_get_lab(config: dict[str, T.Any], patch_config) -> None: # noqa[F811]
""" Test ConstantsAugmentation._get_lab works as expected """
patch_config(cfg, config)
lab_adjust = ConstantsAugmentation._get_lab()
assert isinstance(lab_adjust, np.ndarray)
assert lab_adjust.dtype == np.float32
assert lab_adjust.shape == (3, )
assert lab_adjust[0] == config["color_lightness"] / 100.
assert lab_adjust[1] == config["color_ab"] / 100.
assert lab_adjust[2] == config["color_ab"] / 100.
_CLAHE_LAB_CONF = (
{"color_clahe_chance": 50, "color_clahe_max_size": 4.0, "color_lightness": 30, "color_ab": 8},
{"color_clahe_chance": 30, "color_clahe_max_size": 6.0, "color_lightness": 20, "color_ab": 6},
{"color_clahe_chance": 75, "color_clahe_max_size": 8.0, "color_lightness": 10, "color_ab": 12})
@pytest.mark.parametrize(("config"), _CLAHE_LAB_CONF)
def test_constants_get_color(config: dict[str, T.Any],
patch_config, # noqa[F811]
mocker: pytest_mock.MockerFixture) -> None:
""" Test ConstantsAugmentation._get_color works as expected """
patch_config(cfg, config)
clahe_mock = mocker.patch(f"{MODULE_PREFIX}.ConstantsAugmentation._get_clahe",
return_value=(1, 2.0, 3))
lab_mock = mocker.patch(f"{MODULE_PREFIX}.ConstantsAugmentation._get_lab",
return_value=np.array([1.0, 2.0, 3.0], dtype="float32"))
color = ConstantsAugmentation._get_color(256)
clahe_mock.assert_called_once_with(256)
lab_mock.assert_called_once_with()
assert isinstance(color, ConstantsColor)
assert isinstance(color.clahe_base_contrast, int)
assert isinstance(color.clahe_chance, float)
assert isinstance(color.clahe_max_size, int)
assert isinstance(color.lab_adjust, np.ndarray)
assert color.clahe_base_contrast == clahe_mock.return_value[0]
assert color.clahe_chance == clahe_mock.return_value[1]
assert color.clahe_max_size == clahe_mock.return_value[2]
assert np.all(color.lab_adjust == lab_mock.return_value)
_TRANSFORM_CONF = (
({"rotation_range": 25, "zoom_amount": 1, "shift_range": 6, "flip_chance": 10}, 64),
({"rotation_range": 6, "zoom_amount": 2, "shift_range": 5, "flip_chance": 60}, 96),
({"rotation_range": 39, "zoom_amount": 3, "shift_range": 4, "flip_chance": 23}, 128),
({"rotation_range": 12, "zoom_amount": 4, "shift_range": 3, "flip_chance": 52}, 256),
({"rotation_range": 47, "zoom_amount": 5, "shift_range": 2, "flip_chance": 33}, 384),
({"rotation_range": 3, "zoom_amount": 6, "shift_range": 1, "flip_chance": 44}, 512))
@pytest.mark.parametrize(("config", "size"), _TRANSFORM_CONF)
def test_constants_get_transform(config: dict[str, T.Any],
size: int,
patch_config) -> None: # noqa[F811]
""" Test ConstantsAugmentation._get_transform works as expected """
patch_config(cfg, config)
transform = ConstantsAugmentation._get_transform(size)
assert isinstance(transform, ConstantsTransform)
assert isinstance(transform.rotation, int)
assert isinstance(transform.zoom, float)
assert isinstance(transform.shift, float)
assert isinstance(transform.flip, float)
assert transform.rotation == config["rotation_range"]
assert transform.zoom == config["zoom_amount"] / 100.
assert transform.shift == (config["shift_range"] / 100.) * size
assert transform.flip == config["flip_chance"] / 100.
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_constants_get_warp_to_landmarks(size: int, batch_size: int) -> None:
""" Test ConstantsAugmentation._get_warp_to_landmarks works as expected """
anchors, grids = ConstantsAugmentation._get_warp_to_landmarks(size, batch_size)
assert isinstance(anchors, np.ndarray)
assert isinstance(grids, np.ndarray)
assert anchors.dtype == np.int32
assert anchors.shape == (batch_size, 8, 2)
assert anchors.min() == 0
assert anchors.max() == size - 1
assert grids.dtype == np.float32
assert grids.shape == (2, size, size)
assert grids.min() == 0.
assert grids.max() == size - 1
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_constants_get_warp(size: int, batch_size: int, mocker: pytest_mock.MockerFixture) -> None:
""" Test ConstantsAugmentation._get_warp works as expected """
warp_lm_mock = mocker.patch(
f"{MODULE_PREFIX}.ConstantsAugmentation._get_warp_to_landmarks",
return_value=((np.random.random((batch_size, 8, 2)) * 100).astype("int32"),
(np.random.random((2, size, size))).astype("float32")))
warp_pad = int(1.25 * size)
warps = ConstantsAugmentation._get_warp(size, batch_size)
warp_lm_mock.assert_called_once_with(size, batch_size)
assert isinstance(warps, ConstantsWarp)
assert isinstance(warps.maps, np.ndarray)
assert warps.maps.dtype == "float32"
assert warps.maps.shape == (batch_size, 2, 5, 5)
assert warps.maps.min() == 0.
assert warps.maps.mean() == size / 2.
assert warps.maps.max() == size
assert isinstance(warps.pad, tuple)
assert len(warps.pad) == 2
assert all(isinstance(x, int) for x in warps.pad)
assert all(x == warp_pad for x in warps.pad)
assert isinstance(warps.slices, slice)
assert warps.slices.step is None
assert warps.slices.start == warp_pad // 10
assert warps.slices.stop == -warp_pad // 10
assert isinstance(warps.scale, float)
assert warps.scale == 5 / 256 * size
assert isinstance(warps.lm_edge_anchors, np.ndarray)
assert warps.lm_edge_anchors.dtype == warp_lm_mock.return_value[0].dtype
assert warps.lm_edge_anchors.shape == warp_lm_mock.return_value[0].shape
assert np.all(warps.lm_edge_anchors == warp_lm_mock.return_value[0])
assert isinstance(warps.lm_grids, np.ndarray)
assert warps.lm_grids.dtype == warp_lm_mock.return_value[1].dtype
assert warps.lm_grids.shape == warp_lm_mock.return_value[1].shape
assert np.all(warps.lm_grids == warp_lm_mock.return_value[1])
assert isinstance(warps.lm_scale, float)
assert warps.lm_scale == 2 / 256 * size
_CONFIG = T.cast(
dict[str, ConfigValueType],
{"color_clahe_chance": 50, "color_clahe_max_size": 4, "color_lightness": 30, "color_ab": 8,
"rotation_range": 10, "zoom_amount": 5, "shift_range": 5, "flip_chance": 50})
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_constants_from_config(size: int,
batch_size: int,
patch_config, # noqa[F811]
mocker: pytest_mock.MockerFixture
) -> None:
""" Test that ConstantsAugmentation.from_config executes correctly """
patch_config(cfg, _CONFIG)
constants = ConstantsAugmentation.from_config(size, batch_size)
assert isinstance(constants, ConstantsAugmentation)
assert isinstance(constants.color, ConstantsColor)
assert isinstance(constants.transform, ConstantsTransform)
assert isinstance(constants.warp, ConstantsWarp)
color_mock = mocker.patch(f"{MODULE_PREFIX}.ConstantsAugmentation._get_color")
transform_mock = mocker.patch(f"{MODULE_PREFIX}.ConstantsAugmentation._get_transform")
warp_mock = mocker.patch(f"{MODULE_PREFIX}.ConstantsAugmentation._get_warp")
ConstantsAugmentation.from_config(size, batch_size)
color_mock.assert_called_once_with(size)
transform_mock.assert_called_once_with(size)
warp_mock.assert_called_once_with(size, batch_size)
# IMAGE AUGMENTATION #
def get_batch(batch_size, size: int) -> np.ndarray:
""" Obtain a batch of random float32 image data for the given batch size and height/width """
return (np.random.random((batch_size, size, size, 3)) * 255).astype("uint8")
def get_instance(batch_size, size) -> ImageAugmentation:
""" Obtain an ImageAugmentation instance for the given batch size and size """
return ImageAugmentation(batch_size, size)
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_init(size: int,
batch_size: int,
patch_config) -> None: # noqa[F811]
""" Test ImageAugmentation initializes """
patch_config(cfg, _CONFIG)
attrs = {"_processing_size": int,
"_batch_size": int,
"_constants": ConstantsAugmentation}
instance = get_instance(batch_size, size)
assert all(x in instance.__dict__ for x in attrs)
assert all(x in attrs for x in instance.__dict__)
assert isinstance(instance._batch_size, int)
assert isinstance(instance._processing_size, int)
assert isinstance(instance._constants, ConstantsAugmentation)
assert instance._batch_size == batch_size
assert instance._processing_size == size
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_random_lab(size: int,
batch_size: int,
patch_config, # noqa[F811]
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation._random_lab executes as expected """
patch_config(cfg, _CONFIG)
batch = get_batch(batch_size, size)
original = batch.copy()
instance = get_instance(batch_size, size)
instance._random_lab(batch)
assert original.shape == batch.shape
assert original.dtype == batch.dtype
assert not np.allclose(original, batch)
randoms_mock = mocker.patch(f"{MODULE_PREFIX}.np.random.uniform")
instance._random_lab(batch)
randoms_mock.assert_called_once()
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_random_clahe(size: int, # pylint:disable=too-many-locals
batch_size: int,
patch_config, # noqa[F811]
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation._random_clahe executes as expected """
# Expected output
patch_config(cfg, _CONFIG)
batch = get_batch(batch_size, size)
original = batch.copy()
instance = get_instance(batch_size, size)
instance._random_clahe(batch)
assert original.shape == batch.shape
assert original.dtype == batch.dtype
assert not np.allclose(original, batch)
# Functions called
rand_ret = np.random.rand(batch_size)
rand_mock = mocker.patch(f"{MODULE_PREFIX}.np.random.rand",
return_value=rand_ret)
where_ret = np.where(rand_ret < instance._constants.color.clahe_chance)
where_mock = mocker.patch(f"{MODULE_PREFIX}.np.where",
return_value=where_ret)
randint_ret = np.random.randint(instance._constants.color.clahe_max_size,
size=where_ret[0].shape[0],
dtype="uint8")
randint_mock = mocker.patch(f"{MODULE_PREFIX}.np.random.randint",
return_value=randint_ret)
grid_sizes = (randint_ret *
(instance._constants.color.clahe_base_contrast //
2)) + instance._constants.color.clahe_base_contrast
clahe_calls = [mocker.call(clipLimit=2.0, tileGridSize=(grid, grid)) for grid in grid_sizes]
clahe_mock = mocker.patch(f"{MODULE_PREFIX}.cv2.createCLAHE",
return_value=cv2.createCLAHE(clipLimit=2.0, tileGridSize=(3, 3)))
batch = get_batch(batch_size, size)
instance._random_clahe(batch)
rand_mock.assert_called_once_with(batch_size)
where_mock.assert_called_once()
randint_mock.assert_called_once_with(instance._constants.color.clahe_max_size + 1,
size=where_ret[0].shape[0],
dtype="uint8")
clahe_mock.assert_has_calls(clahe_calls) # type:ignore
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_color_adjust(size: int,
batch_size: int,
patch_config, # noqa[F811]
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation._color_adjust executes as expected """
patch_config(cfg, _CONFIG)
batch = get_batch(batch_size, size)
output = get_instance(batch_size, size).color_adjust(batch)
assert output.shape == batch.shape
assert output.dtype == batch.dtype
assert not np.allclose(output, batch)
batch_convert_mock = mocker.patch(f"{MODULE_PREFIX}.batch_convert_color")
lab_mock = mocker.patch(f"{MODULE_PREFIX}.ImageAugmentation._random_lab")
clahe_mock = mocker.patch(f"{MODULE_PREFIX}.ImageAugmentation._random_clahe")
batch = get_batch(batch_size, size)
get_instance(batch_size, size).color_adjust(batch)
assert batch_convert_mock.call_count == 2
lab_mock.assert_called_once()
clahe_mock.assert_called_once()
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_transform(size: int,
batch_size: int,
patch_config, # noqa[F811]
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation.transform executes as expected """
patch_config(cfg, _CONFIG)
batch = get_batch(batch_size, size)
instance = get_instance(batch_size, size)
original = batch.copy()
instance.transform(batch)
assert original.shape == batch.shape
assert original.dtype == batch.dtype
assert not np.allclose(original, batch)
rand_ret = [np.random.uniform(-10, 10, size=batch_size).astype("float32"),
np.random.uniform(.95, 1.05, size=batch_size).astype("float32"),
np.random.uniform(-9.2, 9.2, size=(batch_size, 2)).astype("float32")]
rand_calls = [mocker.call(-instance._constants.transform.rotation,
instance._constants.transform.rotation,
size=batch_size),
mocker.call(1 - instance._constants.transform.zoom,
1 + instance._constants.transform.zoom,
size=batch_size),
mocker.call(-instance._constants.transform.shift,
instance._constants.transform.shift,
size=(batch_size, 2))]
rand_mock = mocker.patch(f"{MODULE_PREFIX}.np.random.uniform",
side_effect=rand_ret)
rotmat_mock = mocker.patch(
f"{MODULE_PREFIX}.cv2.getRotationMatrix2D",
return_value=np.array([[1.0, 0.0, -2.0], [-1.0, 1.0, 5.0]]).astype("float32"))
affine_mock = mocker.patch(f"{MODULE_PREFIX}.cv2.warpAffine")
batch = get_batch(batch_size, size)
get_instance(batch_size, size).transform(batch)
rand_mock.assert_has_calls(rand_calls) # type:ignore
assert rotmat_mock.call_count == batch_size
assert affine_mock.call_count == batch_size
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_random_flip(size: int,
batch_size: int,
patch_config, # noqa[F811]
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation.flip_chance executes as expected """
patch_config(cfg, _CONFIG)
batch = get_batch(batch_size, size)
original = batch.copy()
get_instance(batch_size, size).random_flip(batch)
assert original.shape == batch.shape
assert original.dtype == batch.dtype
assert not np.allclose(original, batch)
rand_ret = np.random.rand(batch_size)
rand_mock = mocker.patch(f"{MODULE_PREFIX}.np.random.rand", return_value=rand_ret)
where_mock = mocker.patch(f"{MODULE_PREFIX}.np.where")
batch = get_batch(batch_size, size)
get_instance(batch_size, size).random_flip(batch)
rand_mock.assert_called_once_with(batch_size)
where_mock.assert_called_once()
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_random_warp(size: int,
batch_size: int,
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation._random_warp executes as expected """
batch = get_batch(batch_size, size)
instance = get_instance(batch_size, size)
output = instance._random_warp(batch)
assert output.shape == batch.shape
assert output.dtype == batch.dtype
assert not np.allclose(output, batch)
rand_ret = np.random.normal(size=(batch_size, 2, 5, 5), scale=0.02).astype("float32")
rand_mock = mocker.patch(f"{MODULE_PREFIX}.np.random.normal", return_value=rand_ret)
eval_ret = np.ones_like(rand_ret)
eval_mock = mocker.patch(f"{MODULE_PREFIX}.ne.evaluate", return_value=eval_ret)
resize_ret = np.ones((size, size)).astype("float32")
resize_mock = mocker.patch(f"{MODULE_PREFIX}.cv2.resize", return_value=resize_ret)
remap_mock = mocker.patch(f"{MODULE_PREFIX}.cv2.remap")
instance._random_warp(batch)
rand_mock.assert_called_once_with(size=(batch_size, 2, 5, 5),
scale=instance._constants.warp.scale)
eval_mock.assert_called_once()
assert resize_mock.call_count == batch_size * 2
assert remap_mock.call_count == batch_size
@pytest.mark.parametrize(("size", "batch_size"), ((64, 16), (384, 32)))
def test_image_augmentation_random_warp_landmarks(size: int,
batch_size: int,
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation._random_warp_landmarks executes as expected """
src_points = np.random.random(size=(batch_size, 68, 2)).astype("float32") * size
dst_points = np.random.random(size=(batch_size, 68, 2)).astype("float32") * size
batch = get_batch(batch_size, size)
instance = get_instance(batch_size, size)
output = instance._random_warp_landmarks(batch, src_points, dst_points)
assert output.shape == batch.shape
assert output.dtype == batch.dtype
assert not np.allclose(output, batch)
rand_ret = np.random.normal(size=dst_points.shape, scale=0.01)
rand_mock = mocker.patch(f"{MODULE_PREFIX}.np.random.normal", return_value=rand_ret)
hull_ret = [cv2.convexHull(np.concatenate([src[17:], dst[17:]], axis=0))
for src, dst in zip(src_points.astype("int32"),
(dst_points + rand_ret).astype("int32"))]
hull_mock = mocker.patch(f"{MODULE_PREFIX}.cv2.convexHull", side_effect=hull_ret)
remap_mock = mocker.patch(f"{MODULE_PREFIX}.cv2.remap")
instance._random_warp_landmarks(batch, src_points, dst_points)
rand_mock.assert_called_once_with(size=(dst_points.shape),
scale=instance._constants.warp.lm_scale)
assert hull_mock.call_count == batch_size
assert remap_mock.call_count == batch_size
@pytest.mark.parametrize(("size", "batch_size", "to_landmarks"),
((64, 16, True), (384, 32, False)))
def test_image_augmentation_warp(size: int,
batch_size: int,
to_landmarks: bool,
mocker: pytest_mock.MockerFixture) -> None:
""" Test that ImageAugmentation.warp executes as expected """
kwargs = {}
if to_landmarks:
kwargs["batch_src_points"] = np.random.random(
size=(batch_size, 68, 2)).astype("float32") * size
kwargs["batch_dst_points"] = np.random.random(
size=(batch_size, 68, 2)).astype("float32") * size
batch = get_batch(batch_size, size)
output = get_instance(batch_size, size).warp(batch, to_landmarks, **kwargs)
assert output.shape == batch.shape
assert output.dtype == batch.dtype
assert not np.allclose(output, batch)
if to_landmarks:
with pytest.raises(AssertionError):
get_instance(batch_size, size).warp(batch,
to_landmarks,
batch_src_points=kwargs["batch_src_points"],
batch_dst_points=None)
with pytest.raises(AssertionError):
get_instance(batch_size, size).warp(batch,
to_landmarks,
batch_src_points=None,
batch_dst_points=kwargs["batch_dst_points"])
warp_mock = mocker.patch(f"{MODULE_PREFIX}.ImageAugmentation._random_warp")
warp_lm_mock = mocker.patch(f"{MODULE_PREFIX}.ImageAugmentation._random_warp_landmarks")
get_instance(batch_size, size).warp(batch, to_landmarks, **kwargs)
if to_landmarks:
warp_mock.assert_not_called()
warp_lm_mock.assert_called_once()
else:
warp_mock.assert_called_once()
warp_lm_mock.assert_not_called()
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/training/augmentation_test.py",
"license": "GNU General Public License v3.0",
"lines": 417,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/training/cache_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.training.cache` """
import os
import typing as T
from threading import Lock
import numpy as np
import pytest
import pytest_mock
from lib.align.constants import LandmarkType
from lib.training import cache as cache_mod
from lib.utils import FaceswapError
from plugins.train import train_config as cfg
from tests.lib.config.helpers import patch_config # # pylint:disable=unused-import # noqa[F401]
# pylint:disable=protected-access,invalid-name,redefined-outer-name
# ## HELPERS ###
MODULE_PREFIX = "lib.training.cache"
_DUMMY_IMAGE_LIST = ["/path/to/img1.png", "~/img2.png", "img3.png"]
def _get_config(centering="face", vertical_offset=0):
""" Return a fresh valid config """
return {"centering": centering,
"vertical_offset": vertical_offset}
STANDARD_CACHE_ARGS = (_DUMMY_IMAGE_LIST, 256, 1.0)
STANDARD_MASK_ARGS = (256, 1.0, "face")
# ## MASK PROCESSING ###
def get_mask_config(penalized_mask_loss=True,
learn_mask=True,
mask_type="extended",
mask_dilation=1.0,
mask_kernel=3,
mask_threshold=4,
mask_eye_multiplier=2,
mask_mouth_multiplier=3):
""" Generate the mask config dictionary with the given arguments """
return {"penalized_mask_loss": penalized_mask_loss,
"learn_mask": learn_mask,
"mask_type": mask_type,
"mask_dilation": mask_dilation,
"mask_blur_kernel": mask_kernel,
"mask_threshold": mask_threshold,
"eye_multiplier": mask_eye_multiplier,
"mouth_multiplier": mask_mouth_multiplier}
_MASK_CONFIG_PARAMS = (
(get_mask_config(True, True, "extended", 1.0, 3, 4, 2, 3), "pass-penalize|learn"),
(get_mask_config(True, False, "components", 0.0, 5, 4, 1, 2), "pass-penalize"),
(get_mask_config(False, True, "custom", -2.0, 6, 1, 3, 1), "pass-learn"),
(get_mask_config(True, True, None, 1.0, 6, 1, 3, 2), "pass-mask-disable1"),
(get_mask_config(False, False, "extended", 1.0, 6, 1, 3, 2), "pass-mask-disable2"),
(get_mask_config(True, True, "extended", 1.0, 1, 3, 1, 1), "pass-multiplier-disable"),
(get_mask_config("Error", True, "extended", 1.0, 1, 3, 2, 3), "fail-penalize"),
(get_mask_config(True, 1.4, "extended", 1.0, 1, 3, 2, 3), "fail-learn"),
(get_mask_config(True, True, 999, 1.0, 1, 3, 2, 3), "fail-type"),
(get_mask_config(True, True, "extended", 23, 1, 3, 2, 3), "fail-dilation"),
(get_mask_config(True, True, "extended", 1.0, 1.2, 3, 2, 3), "fail-kernel"),
(get_mask_config(True, True, "extended", 1.0, 1, "fail", 2, 3), "fail-threshold"),
(get_mask_config(True, True, "extended", 1.0, 1, 3, 3.9, 3), "fail-eye-multi"),
(get_mask_config(True, True, "extended", 1.0, 1, 3, 2, "fail"), "fail-mouth-multi"))
_MASK_CONFIG_IDS = [x[-1] for x in _MASK_CONFIG_PARAMS]
@pytest.mark.parametrize(("config", "status"), _MASK_CONFIG_PARAMS, ids=_MASK_CONFIG_IDS)
def test_MaskConfig(config: dict[str, T.Any],
status: str,
patch_config) -> None: # noqa[F811]
""" Test that cache._MaskConfig dataclass initializes from config """
patch_config(cfg.Loss, config)
retval = cache_mod._MaskConfig()
if status.startswith("pass-mask-disable"):
assert not retval.mask_enabled
else:
assert retval.mask_enabled
if status == "pass-multiplier-disable" or not config["penalized_mask_loss"]:
assert not retval.multiplier_enabled
else:
assert retval.multiplier_enabled
_MASK_INIT_PARAMS = ((64, 0.5, "face", "pass"),
(128, 0.75, "head", "pass"),
(384, 1.0, "legacy", "pass"),
(69.42, 0.75, "head", "fail-size"),
(128, "fail", "head", "fail-coverage"),
(128, 0.75, "fail", "fail-centering"))
_MASK_INIT_IDS = [x[-1] for x in _MASK_INIT_PARAMS]
@pytest.mark.parametrize(("size", "coverage", "centering", "status"),
_MASK_INIT_PARAMS, ids=_MASK_INIT_IDS)
def test_MaskProcessing_init(size,
coverage,
centering,
status: str,
mocker: pytest_mock.MockerFixture) -> None:
""" Test cache._MaskProcessing correctly initializes """
mock_maskconfig = mocker.MagicMock()
mocker.patch(f"{MODULE_PREFIX}._MaskConfig", new=mock_maskconfig)
if not status == "pass":
with pytest.raises(AssertionError):
cache_mod._MaskProcessing(size, coverage, centering)
return
instance = cache_mod._MaskProcessing(size, coverage, centering)
attrs = {"_size": int,
"_coverage": float,
"_centering": str,
"_config": mocker.MagicMock} # Our mocked _MaskConfig
for attr, dtype in attrs.items():
assert attr in instance.__dict__
assert isinstance(instance.__dict__[attr], dtype)
assert all(x in attrs for x in instance.__dict__)
assert instance._size == size
assert instance._coverage == coverage
assert instance._centering == centering
mock_maskconfig.assert_called_once()
def test_MaskProcessing_check_mask_exists(mocker: pytest_mock.MockerFixture) -> None:
""" Test cache._MaskProcessing._check_mask_exists functions as expected """
mock_det_face = mocker.MagicMock()
mock_det_face.mask = ["extended", "components"]
instance = cache_mod._MaskProcessing(*STANDARD_MASK_ARGS) # type:ignore[arg-type]
instance._check_mask_exists("", mock_det_face)
mock_det_face.mask = []
with pytest.raises(FaceswapError):
instance._check_mask_exists("", mock_det_face)
@pytest.mark.parametrize(("dilation", "kernel", "threshold"),
((1.0, 3, 4), (-2.5, 5, 2), (3.3, 7, 9)))
def test_MaskProcessing_preprocess(dilation: float,
kernel: int,
threshold: int,
mocker: pytest_mock.MockerFixture,
patch_config) -> None: # noqa[F811]
""" Test cache._MaskProcessing._preprocess functions as expected """
mock_mask = mocker.MagicMock()
mock_det_face = mocker.MagicMock()
mock_det_face.mask = {"extended": mock_mask}
patch_config(cfg.Loss, get_mask_config(mask_dilation=dilation,
mask_kernel=kernel,
mask_threshold=threshold))
instance = cache_mod._MaskProcessing(*STANDARD_MASK_ARGS) # type:ignore[arg-type]
instance._preprocess(mock_det_face, "extended")
mock_mask.set_dilation.assert_called_once_with(dilation)
mock_mask.set_blur_and_threshold.assert_called_once_with(blur_kernel=kernel,
threshold=threshold)
@pytest.mark.parametrize(
("mask_centering", "train_centering", "coverage", "y_offset", "size", "mask_size"),
(("face", "legacy", 0.75, 0.0, 256, 64),
("legacy", "head", 0.66, -0.25, 128, 128),
("head", "face", 1.0, 0.33, 64, 256)))
def test_MaskProcessing_crop_and_resize(mask_centering: str, # pylint:disable=too-many-locals
train_centering: T.Literal["legacy", "face", "head"],
coverage: float,
y_offset: float,
size: int,
mask_size: int,
mocker: pytest_mock.MockerFixture) -> None:
""" Test cache._MaskProcessing._crop_and_resize functions as expected """
mock_pose = mocker.MagicMock()
mock_pose.offset = {"face": "face_centering",
"legacy": "legacy_centering",
"head": "head_centering"}
mock_det_face = mocker.MagicMock()
mock_det_face.aligned.pose = mock_pose
mock_det_face.aligned.y_offset = y_offset
mock_face_mask = mocker.MagicMock()
mock_face_mask.__get_item__ = mock_face_mask
mock_face_mask.shape = (mask_size, mask_size)
mock_mask = mocker.MagicMock()
mock_mask.stored_centering = mask_centering
mock_mask.stored_size = mask_size
mock_mask.mask = mock_face_mask
mock_cv2_resize_result = mocker.MagicMock()
mock_cv2_resize_item = mocker.MagicMock()
mock_cv2_resize = mocker.patch(f"{MODULE_PREFIX}.cv2.resize",
return_value=mock_cv2_resize_result)
mock_cv2_resize_result.__getitem__.return_value = mock_cv2_resize_item
mock_cv2_cubic = mocker.patch(f"{MODULE_PREFIX}.cv2.INTER_CUBIC")
mock_cv2_area = mocker.patch(f"{MODULE_PREFIX}.cv2.INTER_AREA")
instance = cache_mod._MaskProcessing(size, coverage, train_centering)
retval = instance._crop_and_resize(mock_det_face, mock_mask)
mock_mask.set_sub_crop.assert_called_once_with(mock_pose.offset[mask_centering],
mock_pose.offset[train_centering],
train_centering,
coverage,
y_offset)
if mask_size == size:
assert retval is mock_face_mask
mock_cv2_resize.assert_not_called()
return
assert retval is mock_cv2_resize_item
interp_used = mock_cv2_cubic if mask_size < size else mock_cv2_area
mock_cv2_resize.assert_called_once_with(mock_face_mask,
(size, size),
interpolation=interp_used)
@pytest.mark.parametrize("mask_type", (None, "extended", "components"))
def test_MaskProcessing_get_face_mask(mask_type: str | None,
mocker: pytest_mock.MockerFixture,
patch_config) -> None: # noqa[F811]
""" Test cache._MaskProcessing._get_face_mask functions as expected """
patch_config(cfg, _get_config())
patch_config(cfg.Loss, get_mask_config(mask_type=mask_type))
instance = cache_mod._MaskProcessing(*STANDARD_MASK_ARGS) # type:ignore[arg-type]
assert instance._config.mask_type == mask_type # sanity check
instance._check_mask_exists = mocker.MagicMock() # type:ignore[method-assign]
preprocess_return = "test_preprocess_return"
instance._preprocess = mocker.MagicMock( # type:ignore[method-assign]
return_value="test_preprocess_return")
crop_and_resize_return = mocker.MagicMock()
crop_and_resize_return.shape = (256, 256, 1)
instance._crop_and_resize = mocker.MagicMock( # type:ignore[method-assign]
return_value=crop_and_resize_return)
filename = "test_filename"
detected_face = "test_detected_face"
if mask_type is None: # Mask disabled
assert not instance._config.mask_enabled
retval1 = instance._get_face_mask(filename, detected_face) # type:ignore[arg-type]
assert retval1 is None
instance._check_mask_exists.assert_not_called() # type:ignore[attr-defined]
instance._preprocess.assert_not_called() # type:ignore[attr-defined]
instance._crop_and_resize.assert_not_called() # type:ignore[attr-defined]
else: # Mask enabled
assert instance._config.mask_enabled
retval2 = instance._get_face_mask(filename, detected_face) # type:ignore[arg-type]
assert retval2 is crop_and_resize_return
instance._check_mask_exists.assert_called_once_with( # type:ignore[attr-defined]
filename, detected_face)
instance._preprocess.assert_called_once_with( # type:ignore[attr-defined]
detected_face, instance._config.mask_type)
instance._crop_and_resize.assert_called_once_with( # type:ignore[attr-defined]
detected_face, preprocess_return)
@pytest.mark.parametrize(("eye_multiplier", "mouth_multiplier", "size", "enabled"),
((0, 0, 64, False),
(1, 1, 64, False),
(1, 2, 64, True),
(2, 1, 96, True),
(2, 3, 128, True),
(3, 1, 256, True)))
def test_MaskProcessing_get_localized_mask(eye_multiplier: int,
mouth_multiplier: int,
size: int,
enabled: bool,
mocker: pytest_mock.MockerFixture,
patch_config) -> None: # noqa[F811]
""" Test cache._MaskProcessing._get_localized_mask functions as expected """
args = STANDARD_MASK_ARGS[1:]
patch_config(cfg.Loss, get_mask_config(mask_eye_multiplier=eye_multiplier,
mask_mouth_multiplier=mouth_multiplier))
instance = cache_mod._MaskProcessing(size, *args) # type:ignore[arg-type]
filename = "filename"
detected_face = mocker.MagicMock()
landmark_mask_return_value = mocker.MagicMock()
detected_face.get_landmark_mask = mocker.MagicMock(return_value=landmark_mask_return_value)
for area in ("mouth", "eye"):
retval = instance._get_localized_mask(filename, detected_face, area)
if not enabled:
assert retval is None
detected_face.get_landmark_mask.assert_not_called()
else:
assert retval is landmark_mask_return_value
if enabled:
detected_face.get_landmark_mask.assert_called_with(area, size // 16, 2.5)
if enabled:
assert detected_face.get_landmark_mask.call_count == 2
def test_MaskProcessing_call(mocker: pytest_mock.MockerFixture) -> None:
""" Test cache._MaskProcessing.__call__ functions as expected """
instance = cache_mod._MaskProcessing(*STANDARD_MASK_ARGS) # type:ignore[arg-type]
face_return = "face_mask"
area_return = "area_mask"
instance._get_face_mask = mocker.MagicMock( # type:ignore[method-assign]
return_value=face_return) # type:ignore[method-assign]
instance._get_localized_mask = mocker.MagicMock( # type:ignore[method-assign]
return_value=area_return) # type:ignore[method-assign]
filename = "test_filename"
detected_face = mocker.MagicMock()
detected_face.store_training_masks = mocker.MagicMock()
instance(filename, detected_face)
instance._get_face_mask.assert_called_once_with( # type:ignore[attr-defined]
filename, detected_face)
expected_localized_calls = [mocker.call(filename, detected_face, "eye"),
mocker.call(filename, detected_face, "mouth")]
instance._get_localized_mask.assert_has_calls( # type:ignore[attr-defined]
expected_localized_calls, any_order=False) # pyright:ignore[reportArgumentType]
assert instance._get_localized_mask.call_count == 2 # type:ignore[attr-defined]
detected_face.store_training_masks.assert_called_once_with(
[face_return, area_return, area_return],
delete_masks=True)
# ## CACHE PROCESSING ###
@pytest.fixture
def face_cache_reset_scenario(mocker: pytest_mock.MockerFixture,
request: pytest.FixtureRequest):
""" Build a scenario for cache._check_reset.
request.param = {"caches": dict(Literal["a", "b"], bool],
"side": Literal["a", "b"]}
If the key "a" or "b" exist in the caches dict, then that cache exists in the mocked
cache._FACE_CACHES with a mock representing the return value of the cache.Cache.check_reset()
value as given
The mocked Cache item for the currently testing side is returned, or a default mocked item if
the given side is not meant to be in the _FACE_CACHES dict
"""
cache_dict = {}
for side, val in request.param["caches"].items():
check_mock = mocker.MagicMock()
check_mock.check_reset.return_value = val
cache_dict[side] = check_mock
mocker.patch(f"{MODULE_PREFIX}._FACE_CACHES", new=cache_dict)
return cache_dict.get(request.param["side"], mocker.MagicMock())
_RESET_PARAMS = [({"side": side, "caches": caches}, expected, f"{name}-{side}")
for side in ("a", "b")
for caches, expected, name in [
({}, False, "no-cache"),
({"a": False}, False, "a-exists"),
({"b": False}, False, "b-exists"),
({"a": True, "b": False}, side == "b", "a-reset"),
({"a": False, "b": True}, side == "a", "b-reset"),
({"a": True, "b": True}, True, "both-reset"),
({"a": False, "b": False}, False, "no-reset")]]
_RESET_IDS = [x[-1] for x in _RESET_PARAMS]
_RESET_PARAMS = [x[:-1] for x in _RESET_PARAMS] # type:ignore[misc]
@pytest.mark.parametrize(("face_cache_reset_scenario", "expected"),
_RESET_PARAMS,
ids=_RESET_IDS,
indirect=["face_cache_reset_scenario"])
def test_check_reset(face_cache_reset_scenario, expected): # pylint:disable=redefined-outer-name
""" Test that cache._check_reset functions as expected """
this_cache = face_cache_reset_scenario
assert cache_mod._check_reset(this_cache) == expected
@pytest.mark.parametrize(
("filenames", "size", "coverage_ratio", "centering"),
[(_DUMMY_IMAGE_LIST, 256, 1.0, "face"),
(_DUMMY_IMAGE_LIST[:-1], 96, .75, "head"),
(_DUMMY_IMAGE_LIST[2:], 384, .66, "legacy")])
def test_Cache_init(filenames, size, coverage_ratio, centering, patch_config): # noqa[F811]
""" Test that cache.Cache correctly initializes """
attrs = {"_lock": type(Lock()),
"_cache_info": dict,
"_config": cache_mod._CacheConfig,
"_partially_loaded": list,
"_image_count": int,
"_cache": dict,
"_aligned_landmarks": dict,
"_extract_version": float,
"_mask_prepare": cache_mod._MaskProcessing}
patch_config(cfg, _get_config(centering=centering))
instance = cache_mod.Cache(filenames, size, coverage_ratio)
for attr, attr_type in attrs.items():
assert attr in instance.__dict__
assert isinstance(getattr(instance, attr), attr_type)
for key in instance.__dict__:
assert key in attrs
assert set(instance._cache_info) == {"cache_full", "has_reset"}
assert all(x is False for x in instance._cache_info.values())
assert not instance._partially_loaded
assert not instance._cache
assert instance._image_count == len(filenames)
assert not instance._aligned_landmarks
assert instance._extract_version == 0.0
assert instance._config.size == size
assert instance._config.centering == centering
assert instance._config.coverage == coverage_ratio
def test_Cache_cache_full(mocker: pytest_mock.MockerFixture):
""" Test that cache.Cache.cache_full property behaves correctly """
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
instance._lock = mocker.MagicMock()
is_full1 = instance.cache_full
assert not is_full1
instance._lock.__enter__.assert_called_once() # type:ignore[attr-defined]
instance._lock.__exit__.assert_called_once() # type:ignore[attr-defined]
instance._cache_info["cache_full"] = True
is_full2 = instance.cache_full
assert is_full2
# lock not called when cache is full
instance._lock.__enter__.assert_called_once() # type:ignore[attr-defined]
instance._lock.__exit__.assert_called_once() # type:ignore[attr-defined]
def test_Cache_aligned_landmarks(mocker: pytest_mock.MockerFixture):
""" Test that cache.Cache.aligned_landmarks property behaves correcly """
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
instance._lock = mocker.MagicMock()
for fname in _DUMMY_IMAGE_LIST:
mock_face = mocker.MagicMock()
mock_face.aligned.landmarks = f"landmarks_for_{fname}"
instance._cache[fname] = mock_face
retval1 = instance.aligned_landmarks
assert len(_DUMMY_IMAGE_LIST) == len(retval1)
assert retval1 == {fname: f"landmarks_for_{fname}" for fname in _DUMMY_IMAGE_LIST}
instance._lock.__enter__.assert_called_once() # type:ignore[attr-defined]
instance._lock.__exit__.assert_called_once() # type:ignore[attr-defined]
retval2 = instance.aligned_landmarks
assert len(_DUMMY_IMAGE_LIST) == len(retval1)
assert retval2 == {fname: f"landmarks_for_{fname}" for fname in _DUMMY_IMAGE_LIST}
# lock not called after first call has populated
instance._lock.__enter__.assert_called_once() # type:ignore[attr-defined]
instance._lock.__exit__.assert_called_once() # type:ignore[attr-defined]
@pytest.mark.parametrize("size", (64, 96, 128, 256, 384))
def test_Cache_size(size):
""" Test that cache.Cache.size property returns correctly """
instance = cache_mod.Cache(_DUMMY_IMAGE_LIST, size, 1.0)
assert instance.size == size
def test_Cache_check_reset():
""" Test that cache.Cache.check_reset behaves correctly """
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
retval1 = instance.check_reset()
assert not retval1
assert not instance._cache_info["has_reset"]
instance._cache_info["has_reset"] = True
retval2 = instance.check_reset()
assert retval2
assert not instance._cache_info["has_reset"]
@pytest.mark.parametrize("filenames",
(_DUMMY_IMAGE_LIST, _DUMMY_IMAGE_LIST[:-1], _DUMMY_IMAGE_LIST[2:]))
def test_Cache_get_items(filenames: list[str]) -> None:
""" Test that cache.Cache.get_items returns correctly """
instance = cache_mod.Cache(filenames, 256, 1.0)
instance._cache = {os.path.basename(f): f"faces_for_{f}" # type:ignore[misc]
for f in filenames}
retval = instance.get_items(filenames)
assert retval == [f"faces_for_{f}" for f in filenames]
@pytest.mark.parametrize("set_flag", (True, False), ids=("set-flag", "no-set-flag"))
def test_Cache_reset_cache(set_flag: bool,
mocker: pytest_mock.MockerFixture,
patch_config) -> None: # noqa[F811]
""" Test that cache.Cache._reset_cache functions correctly """
patch_config(cfg, _get_config(centering="head"))
mock_warn = mocker.MagicMock()
mocker.patch(f"{MODULE_PREFIX}.logger.warning", mock_warn)
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
instance._cache = {"test": "cache"} # type:ignore[dict-item]
instance._cache_info["cache_full"] = True
assert instance._config.centering != "legacy"
assert instance._cache
assert instance._cache_info["cache_full"]
instance._reset_cache(set_flag)
assert instance._config.centering == "legacy"
assert not instance._cache
assert instance._cache_info["cache_full"] is False
if set_flag:
mock_warn.assert_called_once()
@pytest.mark.parametrize("png_meta",
({"source": {"alignments_version": 1.0}},
{"source": {"alignments_version": 2.0}},
{"source": {"alignments_version": 2.2}}),
ids=("v1.0", "v2.0", "v2.2"))
def test_Cache_validate_version(png_meta, mocker):
""" Test that cache.Cache._validate_version executes correctly """
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
instance._reset_cache = mocker.MagicMock()
fname = "test_filename.png"
version = png_meta["source"]["alignments_version"]
if version == 1.0:
for centering in ("legacy", "face"):
instance._extract_version = 0.0
instance._config.centering = centering
instance._validate_version(png_meta, fname)
if centering == "legacy":
instance._reset_cache.assert_not_called()
else:
instance._reset_cache.assert_called_once_with(True)
assert instance._extract_version == version
else:
instance._validate_version(png_meta, fname)
instance._reset_cache.assert_not_called()
assert instance._extract_version == version
instance._extract_version = 1.0 # Legacy alignments have been seen
if version > 1.0: # Newer alignments inbound
with pytest.raises(FaceswapError):
instance._validate_version(png_meta, fname)
else:
instance._validate_version(png_meta, fname)
instance._extract_version = 2.0 # Newer alignments have been seen
if version < 2.0: # Legacy alignments inbound
with pytest.raises(FaceswapError):
instance._validate_version(png_meta, fname)
return # Exit early on 1.0 because cannot pass any more tests
instance._validate_version(png_meta, fname)
if version > 2.0:
assert instance._extract_version == 2.0 # Defaulted to lowest version
instance._extract_version = 2.5
instance._validate_version(png_meta, fname)
assert instance._extract_version == version # Defaulted to lowest version
_DET_FACE_PARAMS = ((64, 0.5, 0, 1.0),
(96, 0.75, 1, 1.0),
(256, 0.66, 2, 2.0),
(384, 1.0, 3.0, 2.2))
_DET_FACE_IDS = [f"size:{x[0]}|coverage:{x[1]}|y-offset:{x[2]}|extract-vers:{x[3]}"
for x in _DET_FACE_PARAMS]
@pytest.mark.parametrize(("size", "coverage", "y_offset", "extract_version"),
_DET_FACE_PARAMS,
ids=_DET_FACE_IDS)
def test_Cache_load_detected_face(size: int,
coverage: float,
y_offset: int | float,
extract_version: float,
mocker: pytest_mock.MockerFixture,
patch_config) -> None: # noqa[F811]
""" Test that cache.Cache._load_detected_faces executes correctly """
patch_config(cfg, _get_config(vertical_offset=y_offset))
instance = cache_mod.Cache(_DUMMY_IMAGE_LIST, size, coverage)
instance._extract_version = extract_version
alignments = {} # type:ignore[var-annotated]
mock_det_face = mocker.MagicMock()
mock_det_face.from_png_meta = mocker.MagicMock()
mock_det_face.load_aligned = mocker.MagicMock()
mocker.patch(f"{MODULE_PREFIX}.DetectedFace", return_value=mock_det_face)
retval = instance._load_detected_face("", alignments) # type:ignore[arg-type]
assert retval is mock_det_face
mock_det_face.from_png_meta.assert_called_once_with(alignments)
mock_det_face.load_aligned.assert_called_once_with(None,
size=instance._config.size,
centering=instance._config.centering,
coverage_ratio=instance._config.coverage,
y_offset=y_offset / 100.,
is_aligned=True,
is_legacy=extract_version == 1.0)
@pytest.mark.parametrize("partially_loaded", (True, False), ids=("partial", "full"))
def test_Cache_populate_cache(partially_loaded: bool,
mocker: pytest_mock.MockerFixture) -> None:
""" Test that cache.Cache._populate_cache executes correctly """
already_cached = ["/path/to/img4.png", "/path/img5.png"]
needs_cache = _DUMMY_IMAGE_LIST
filenames = _DUMMY_IMAGE_LIST + already_cached
metadata = [{"alignments": f"{f}_alignments"} for f in filenames]
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
instance._validate_version = mocker.MagicMock() # type:ignore[method-assign]
instance._mask_prepare = mocker.MagicMock()
instance._cache = {os.path.basename(f): "existing" # type:ignore[misc]
for f in filenames if f not in needs_cache}
mock_detected_faces = {f: mocker.MagicMock() for f in needs_cache}
if partially_loaded:
instance._cache.update({os.path.basename(f): mock_detected_faces[f] for f in needs_cache})
instance._partially_loaded = [os.path.basename(f) for f in filenames] # Add our partials
else:
instance._load_detected_face = mocker.MagicMock( # type:ignore[method-assign]
side_effect=[mock_detected_faces[f] for f in needs_cache])
# Call the function
instance._populate_cache(needs_cache, metadata, filenames) # type:ignore[arg-type]
expected_validate = [mocker.call(metadata[idx], f) for idx, f in enumerate(needs_cache)]
instance._validate_version.assert_has_calls(expected_validate, # type:ignore[attr-defined]
any_order=False)
assert instance._validate_version.call_count == len(needs_cache) # type:ignore[attr-defined]
expected_mask_prepare = [mocker.call(f, mock_detected_faces[f]) for f in needs_cache]
instance._mask_prepare.assert_has_calls(expected_mask_prepare, # type:ignore[attr-defined]
any_order=False)
assert instance._mask_prepare.call_count == len(needs_cache) # type:ignore[attr-defined]
assert len(instance._cache) == len(filenames)
for filename in filenames:
key = os.path.basename(filename)
assert key in instance._cache
if filename in needs_cache: # item got added/updated
assert instance._cache[key] == mock_detected_faces[filename]
else: # item pre-existed
assert instance._cache[key] == "existing"
if partially_loaded:
assert instance._partially_loaded == [os.path.basename(f) for f in filenames
if f not in needs_cache]
@pytest.mark.parametrize("scenario", ("read-error", "size-error", "success"))
def test_Cache_get_batch_with_metadata(scenario: str, mocker: pytest_mock.MockerFixture) -> None:
""" Test that cache.Cache._get_batch_with_metadata executes correctly """
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
filenames = ["list", "of", "test", "filenames"]
mock_read_image_batch = mocker.MagicMock()
if scenario == "read-error":
mock_read_image_batch.side_effect = ValueError("inhomogeneous")
else:
mock_return = (mocker.MagicMock(), {"test": "meta"})
if scenario == "size-error":
mock_return[0].shape = (len(filenames), )
else:
mock_return[0].shape = (len(filenames), 64, 64, 3)
mock_read_image_batch.return_value = mock_return
mocker.patch(f"{MODULE_PREFIX}.read_image_batch", new=mock_read_image_batch)
if scenario != "success":
with pytest.raises(FaceswapError):
instance._get_batch_with_metadata(filenames)
mock_read_image_batch.assert_called_once_with(filenames, with_metadata=True)
return
retval = instance._get_batch_with_metadata(filenames)
mock_read_image_batch.assert_called_once_with(filenames, with_metadata=True)
assert retval == mock_return # pyright:ignore[reportPossiblyUnboundVariable]
@pytest.mark.parametrize("scenario", ("full", "not-full", "partial"))
def test_Cache_update_cache_full(scenario: bool, mocker: pytest_mock.MockerFixture) -> None:
""" Test that cache.Cache._update_cache_full executes correctly """
mock_verbose = mocker.patch(f"{MODULE_PREFIX}.logger.verbose")
filenames = ["test", "file", "names"]
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
instance._image_count = 10
assert instance._cache_info["cache_full"] is False
assert not instance._cache
assert not instance._partially_loaded
if scenario == "full":
instance._cache = {i: i for i in range(10)} # type:ignore[misc]
if scenario == "patial":
instance._cache = {i: i for i in range(10)} # type:ignore[misc]
instance._partially_loaded = filenames.copy()
instance._update_cache_full(filenames)
if scenario == "full":
assert instance._cache_info["cache_full"] is True
mock_verbose.assert_called_once()
else:
assert instance._cache_info["cache_full"] is False
mock_verbose.assert_not_called()
@pytest.mark.parametrize("scenario", ("full", "partial", "empty", "needs-reset"))
def test_Cache_cache_metadata(scenario: str, mocker: pytest_mock.MockerFixture) -> None:
""" Test that cache.Cache.cache_metadata executes correctly """
mock_check_reset = mocker.patch(f"{MODULE_PREFIX}._check_reset")
mock_check_reset.return_value = scenario == "needs-reset"
mock_return_batch = mocker.MagicMock()
mock_read_image_batch = mocker.patch(f"{MODULE_PREFIX}.read_image_batch",
return_value=mock_return_batch)
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
filenames = _DUMMY_IMAGE_LIST.copy()
if scenario in ("full", "partial"):
instance._cache = {os.path.basename(f): f for f in filenames} # type:ignore[misc]
if scenario == "partial":
instance._partially_loaded = [os.path.basename(f) for f in filenames]
instance._lock = mocker.MagicMock()
instance._reset_cache = mocker.MagicMock() # type:ignore[method-assign]
returned_meta = {"test": "meta"}
instance._get_batch_with_metadata = mocker.MagicMock( # type:ignore[method-assign]
return_value=(mock_return_batch, returned_meta))
instance._populate_cache = mocker.MagicMock() # type:ignore[method-assign]
instance._update_cache_full = mocker.MagicMock() # type:ignore[method-assign]
retval = instance.cache_metadata(filenames) # Call
instance._lock.__enter__.assert_called_once() # type:ignore[attr-defined]
instance._lock.__exit__.assert_called_once() # type:ignore[attr-defined]
mock_check_reset.assert_called_once_with(instance)
if scenario == "needs-reset":
instance._reset_cache.assert_called_once_with(False) # type:ignore[attr-defined]
else:
instance._reset_cache.assert_not_called() # type:ignore[attr-defined]
if scenario == "full":
mock_read_image_batch.assert_called_once_with(filenames)
instance._get_batch_with_metadata.assert_not_called() # type:ignore[attr-defined]
instance._populate_cache.assert_not_called() # type:ignore[attr-defined]
instance._update_cache_full.assert_not_called() # type:ignore[attr-defined]
else:
mock_read_image_batch.assert_not_called()
instance._get_batch_with_metadata.assert_called_once_with( # type:ignore[attr-defined]
filenames)
instance._populate_cache.assert_called_once_with( # type:ignore[attr-defined]
filenames, returned_meta, filenames)
instance._update_cache_full.assert_called_once_with(filenames) # type:ignore[attr-defined]
assert retval is mock_return_batch
@pytest.mark.parametrize("scenario", ("fail-meta", "fail-landmarks", "success"))
def test_Cache_pre_fill(scenario: str, mocker: pytest_mock.MockerFixture) -> None:
""" Test that cache.Cache.prefill executes correctly """
filenames = _DUMMY_IMAGE_LIST.copy()
mock_read_image_batch = mocker.patch(f"{MODULE_PREFIX}.read_image_meta_batch")
side_effect_read_image_batch = [(f, {}) for f in filenames] # type:ignore[var-annotated]
if scenario != "fail-meta": # Set successful return data
for effect in side_effect_read_image_batch:
effect[1]["itxt"] = {"alignments": [1, 2, 3]}
mock_read_image_batch.side_effect = [side_effect_read_image_batch]
instance = cache_mod.Cache(*STANDARD_CACHE_ARGS)
instance._lock = mocker.MagicMock()
instance._validate_version = mocker.MagicMock() # type:ignore[method-assign]
mock_detected_faces = [mocker.MagicMock() for _ in filenames]
for m in mock_detected_faces:
m.aligned.landmark_type = (LandmarkType.LM_2D_68 if scenario == "success" else "fail")
instance._load_detected_face = mocker.MagicMock( # type:ignore[method-assign]
side_effect=mock_detected_faces)
if scenario in ("fail-meta", "fail-landmarks"):
with pytest.raises(FaceswapError):
instance.pre_fill(filenames, "a")
instance._lock.__enter__.assert_called_once() # type:ignore[attr-defined]
instance._lock.__exit__.assert_called_once() # type:ignore[attr-defined]
mock_read_image_batch.assert_called_once_with(filenames)
if scenario == "fail-meta":
instance._validate_version.assert_not_called() # type:ignore[attr-defined]
instance._load_detected_face.assert_not_called() # type:ignore[attr-defined]
else:
meta = side_effect_read_image_batch[0][1]["itxt"]
instance._validate_version.assert_called_once_with( # type:ignore[attr-defined]
meta, filenames[0])
instance._load_detected_face.assert_called_once_with( # type:ignore[attr-defined]
filenames[0], meta["alignments"])
return
# success
instance.pre_fill(filenames, "a")
instance._lock.__enter__.assert_called_once() # type:ignore[attr-defined]
instance._lock.__exit__.assert_called_once() # type:ignore[attr-defined]
mock_read_image_batch.assert_called_once_with(filenames)
fname_calls = [x[0] for x in side_effect_read_image_batch]
meta_calls = [x[1]["itxt"] for x in side_effect_read_image_batch]
call_validate = [mocker.call(l, f) for f, l in zip(fname_calls, meta_calls)]
call_det_face = [mocker.call(f, l["alignments"]) for f, l in zip(fname_calls, meta_calls)]
instance._validate_version.assert_has_calls( # type:ignore[attr-defined]
call_validate, any_order=False) # type:ignore[attr-defined]
assert instance._validate_version.call_count == len(filenames) # type:ignore[attr-defined]
instance._load_detected_face.assert_has_calls( # type:ignore[attr-defined]
call_det_face, any_order=False) # type:ignore[attr-defined]
assert instance._load_detected_face.call_count == len(filenames) # type:ignore[attr-defined]
assert instance._cache == {os.path.basename(f): d for f, d in zip(filenames,
mock_detected_faces)}
assert instance._partially_loaded == [os.path.basename(f) for f in filenames]
_PARAMS_GET = (("a", _DUMMY_IMAGE_LIST, 256, 1.),
("b", _DUMMY_IMAGE_LIST, 256, 1.),
("c", _DUMMY_IMAGE_LIST, 256, 1.),
("a", None, 256, 1,),
("a", _DUMMY_IMAGE_LIST, None, 1.),
("a", _DUMMY_IMAGE_LIST, 256, None))
_IDS_GET = ("pass-a", "pass-b", "fail-side", "fail-no-filenames",
"fail-no-size", "fail-no-coverage")
@pytest.mark.parametrize(("side", "filenames", "size", "coverage_ratio", "status"),
(x + (y,) for x, y in zip(_PARAMS_GET, _IDS_GET)),
ids=_IDS_GET)
def test_get_cache_initial(side: str,
filenames: list[str],
size: int,
coverage_ratio: float,
status: str,
mocker: pytest_mock.MockerFixture) -> None:
""" Test cache.get_cache function when the cache does not yet exist """
mocker.patch(f"{MODULE_PREFIX}._FACE_CACHES", new={})
patched_cache = mocker.patch(f"{MODULE_PREFIX}.Cache")
if status.startswith("fail"):
with pytest.raises(AssertionError):
cache_mod.get_cache(side, filenames, size, coverage_ratio) # type:ignore[arg-type]
patched_cache.assert_not_called()
return
retval = cache_mod.get_cache(side, filenames, size, coverage_ratio) # type:ignore[arg-type]
assert side in cache_mod._FACE_CACHES
patched_cache.assert_called_once_with(filenames, size, coverage_ratio)
assert cache_mod._FACE_CACHES[side] is patched_cache.return_value
assert retval is patched_cache.return_value
retval2 = cache_mod.get_cache(side, filenames, size, coverage_ratio) # type:ignore[arg-type]
patched_cache.assert_called_once() # Not called again
assert retval2 is retval
_IDS_GET2 = ("pass-a", "pass-b", "fail-side", "pass-no-filenames",
"pass-no-size", "pass-no-coverage")
@pytest.mark.parametrize(("side", "filenames", "size", "coverage_ratio", "status"),
(x + (y,) for x, y in zip(_PARAMS_GET, _IDS_GET2)),
ids=_IDS_GET2)
def test_get_cache_exists(side: str,
filenames: list[str],
size: int,
coverage_ratio: float,
status: str,
mocker: pytest_mock.MockerFixture) -> None:
""" Test cache.get_cache function when the cache exists """
mocker.patch(f"{MODULE_PREFIX}._FACE_CACHES", new={"a": mocker.MagicMock(),
"b": mocker.MagicMock()})
patched_cache = mocker.patch(f"{MODULE_PREFIX}.Cache")
if status.startswith("fail"):
with pytest.raises(AssertionError):
cache_mod.get_cache(side, filenames, size, coverage_ratio) # type:ignore[arg-type]
patched_cache.assert_not_called()
return
retval = cache_mod.get_cache(side, filenames, size, coverage_ratio) # type:ignore[arg-type]
patched_cache.assert_not_called()
assert retval is cache_mod._FACE_CACHES[side]
# ## Ring Buffer ## #
_RING_BUFFER_PARAMS = ((2, (384, 384, 3), 2, "uint8"),
(16, (128, 128, 3), 5, "float32"),
(32, (64, 64, 3), 4, "int32"))
_RING_BUFFER_IDS = [f"bs{x[0]}|{x[1][0]}px|buffer-size{x[2]}|dtype-{x[3]}"
for x in _RING_BUFFER_PARAMS]
@pytest.mark.parametrize(("batch_size", "image_shape", "buffer_size", "dtype"),
((2, (384, 384, 3), 2, "uint8"),
(16, (128, 128, 3), 5, "float32"),
(32, (64, 64, 3), 4, "int32")),
ids=_RING_BUFFER_IDS)
def test_RingBuffer_init(batch_size, image_shape, buffer_size, dtype):
""" test cache.RingBuffer initializes correctly """
attrs = {"_max_index": int, "_index": int, "_buffer": list}
instance = cache_mod.RingBuffer(batch_size, image_shape, buffer_size, dtype)
for attr, attr_type in attrs.items():
assert attr in instance.__dict__
assert isinstance(getattr(instance, attr), attr_type)
for key in instance.__dict__:
assert key in attrs
assert instance._max_index == buffer_size - 1
assert instance._index == 0
assert len(instance._buffer) == buffer_size
assert all(isinstance(b, np.ndarray) for b in instance._buffer)
assert all(b.shape == (batch_size, *image_shape) for b in instance._buffer)
assert all(b.dtype == dtype for b in instance._buffer)
@pytest.mark.parametrize(("batch_size", "image_shape", "buffer_size", "dtype"),
((2, (384, 384, 3), 2, "uint8"),
(16, (128, 128, 3), 5, "float32"),
(32, (64, 64, 3), 4, "int32")),
ids=_RING_BUFFER_IDS)
def test_RingBuffer_call(batch_size, image_shape, buffer_size, dtype):
""" Test calling cache.RingBuffer works correctly """
instance = cache_mod.RingBuffer(batch_size, image_shape, buffer_size, dtype)
for i in range(buffer_size * 3):
retval = instance()
assert isinstance(retval, np.ndarray)
assert retval.shape == (batch_size, *image_shape)
assert retval.dtype == dtype
if i % buffer_size == buffer_size - 1:
assert instance._index == 0
else:
assert instance._index == i % buffer_size + 1
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/training/cache_test.py",
"license": "GNU General Public License v3.0",
"lines": 782,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/training/lr_finder_test.py | #! /usr/env/bin/python3
""" Unit tests for Learning Rate Finder. """
import pytest
import pytest_mock
import numpy as np
from lib.training.lr_finder import LearningRateFinder
from plugins.train import train_config as cfg
# pylint:disable=unused-import
from tests.lib.config.helpers import patch_config # noqa:[F401]
# pylint:disable=protected-access,invalid-name,redefined-outer-name
@pytest.fixture
def _trainer_mock(patch_config, mocker: pytest_mock.MockFixture): # noqa:[F811]
""" Generate a mocked model and feeder object and patch user config items """
def _apply_patch(iters=1000, mode="default", strength="default"):
patch_config(cfg, {"lr_finder_iterations": iters})
patch_config(cfg, {"lr_finder_mode": mode})
patch_config(cfg, {"lr_finder_strength": strength})
trainer = mocker.MagicMock()
model = mocker.MagicMock()
model.name = "TestModel"
optimizer = mocker.MagicMock()
trainer._plugin.model = model
trainer._plugin.model.model.optimizer = optimizer
return trainer, model, optimizer
return _apply_patch
_STRENGTH_LOOKUP = {"default": 10, "aggressive": 5, "extreme": 2.5}
_LR_CONF = ((20, "graph_and_set", "default"),
(500, "set", "aggressive"),
(1000, "graph_and_exit", "extreme"))
_LR_CONF_PARAMS = ("iters", "mode", "strength")
_LR_CMDS = ((4, 0.98), (8, 0.66), (2, 0.33)
)
_LR_CMDS_PARAMS = ("stop_factor", "beta")
_LR_CMDS_IDS = [f"stop:{x[0]}|beta:{x[1]}" for x in _LR_CMDS]
@pytest.mark.parametrize(_LR_CONF_PARAMS, _LR_CONF)
@pytest.mark.parametrize(_LR_CMDS_PARAMS, _LR_CMDS, ids=_LR_CMDS_IDS)
def test_LearningRateFinder_init(iters, mode, strength, stop_factor, beta, _trainer_mock):
""" Test lib.train.LearingRateFinder.__init__ """
trainer, model, optimizer = _trainer_mock(iters, mode, strength)
lrf = LearningRateFinder(trainer, stop_factor=stop_factor, beta=beta)
assert lrf._trainer is trainer
assert lrf._model is model
assert lrf._optimizer is optimizer
assert lrf._start_lr == 1e-10
assert lrf._stop_factor == stop_factor
assert lrf._beta == beta
_BATCH_END = ((1, 0.01, 1e-5, 0.5),
(27, 0.01, 1e-5, 1e-6),
(42, 0.001, 1e-5, 0.002),)
_BATCH_END_PARAMS = ("iteration", "loss", "learning_rate", "best")
_BATCH_END_IDS = [f"iter:{x[0]}|loss:{x[1]}|lr:{x[2]}" for x in _BATCH_END]
@pytest.mark.parametrize(_LR_CMDS_PARAMS, _LR_CMDS, ids=_LR_CMDS_IDS)
@pytest.mark.parametrize(_BATCH_END_PARAMS, _BATCH_END, ids=_BATCH_END_IDS)
def test_LearningRateFinder_on_batch_end(iteration,
loss,
learning_rate,
best,
stop_factor,
beta,
_trainer_mock,
mocker):
""" Test lib.train.LearingRateFinder._on_batch_end """
trainer, model, optimizer = _trainer_mock()
lrf = LearningRateFinder(trainer, stop_factor=stop_factor, beta=beta)
optimizer.learning_rate.assign = mocker.MagicMock()
optimizer.learning_rate.numpy = mocker.MagicMock(return_value=learning_rate)
initial_avg = lrf._loss["avg"]
lrf._loss["best"] = best
lrf._on_batch_end(iteration, loss)
assert lrf._metrics["learning_rates"][-1] == learning_rate
assert lrf._loss["avg"] == (lrf._beta * initial_avg) + ((1 - lrf._beta) * loss)
assert lrf._metrics["losses"][-1] == lrf._loss["avg"] / (1 - (lrf._beta ** iteration))
if iteration > 1 and lrf._metrics["losses"][-1] > lrf._stop_factor * lrf._loss["best"]:
assert model.model.stop_training is True
optimizer.learning_rate.assign.assert_not_called()
return
if iteration == 1:
assert lrf._loss["best"] == lrf._metrics["losses"][-1]
assert model.model.stop_training is not True
optimizer.learning_rate.assign.assert_called_with(
learning_rate * lrf._lr_multiplier)
@pytest.mark.parametrize(_LR_CONF_PARAMS, _LR_CONF)
def test_LearningRateFinder_train(iters, # pylint:disable=too-many-locals
mode,
strength,
_trainer_mock,
mocker):
""" Test lib.train.LearingRateFinder._train """
trainer, _, _ = _trainer_mock(iters, mode, strength)
mock_loss_return = np.random.rand(2).tolist()
trainer.train_one_batch = mocker.MagicMock(return_value=mock_loss_return)
lrf = LearningRateFinder(trainer)
lrf._on_batch_end = mocker.MagicMock()
lrf._update_description = mocker.MagicMock()
lrf._train()
trainer.train_one_batch.assert_called()
assert trainer.train_one_batch.call_count == iters
train_call_args = [mocker.call(x + 1, mock_loss_return[0]) for x in range(iters)]
assert lrf._on_batch_end.call_args_list == train_call_args
lrf._update_description.assert_called()
assert lrf._update_description.call_count == iters
# NaN break
mock_loss_return = (np.nan, np.nan)
trainer.train_one_batch = mocker.MagicMock(return_value=mock_loss_return)
lrf._train()
assert trainer.train_one_batch.call_count == 1 # Called once
assert lrf._update_description.call_count == iters # Not called
assert lrf._on_batch_end.call_count == iters # Not called
def test_LearningRateFinder_rebuild_optimizer(_trainer_mock):
""" Test lib.train.LearingRateFinder._rebuild_optimizer """
trainer, _, _ = _trainer_mock()
lrf = LearningRateFinder(trainer)
class Dummy:
""" Dummy Optimizer"""
name = "test"
def get_config(self):
"""Dummy get_config"""
return {}
opt = Dummy()
new_opt = lrf._rebuild_optimizer(opt)
assert isinstance(new_opt, Dummy) and opt is not new_opt
@pytest.mark.parametrize(_LR_CONF_PARAMS, _LR_CONF)
@pytest.mark.parametrize("new_lr", (1e-4, 3.5e-5, 9.3e-6))
def test_LearningRateFinder_reset_model(iters, mode, strength, new_lr, _trainer_mock, mocker):
""" Test lib.train.LearingRateFinder._reset_model """
trainer, model, optimizer = _trainer_mock(iters, mode, strength)
model.state.add_lr_finder = mocker.MagicMock()
model.state.save = mocker.MagicMock()
model.model.load_weights = mocker.MagicMock()
old_optimizer = optimizer
new_optimizer = mocker.MagicMock()
def compile_side_effect(*args, **kwargs): # pylint:disable=unused-argument
""" Side effect for model.compile"""
model.model.optimizer = new_optimizer
model.model.compile.side_effect = compile_side_effect
lrf = LearningRateFinder(trainer)
lrf._rebuild_optimizer = mocker.MagicMock()
lrf._reset_model(1e-5, new_lr)
model.state.add_lr_finder.assert_called_with(new_lr)
model.state.save.assert_called_once()
if mode == "graph_and_exit":
lrf._rebuild_optimizer.assert_not_called()
model.model.compile.assert_not_called()
model.model.load_weights.assert_not_called()
assert model.model.optimizer is old_optimizer
new_optimizer.learning_rate.assign.assert_not_called()
else:
lrf._rebuild_optimizer.assert_called_once_with(old_optimizer)
model.model.load_weights.assert_called_once()
model.model.compile.assert_called_once()
assert model.model.optimizer is new_optimizer
new_optimizer.learning_rate.assign.assert_called_once_with(new_lr)
_LR_FIND = (
(True, [0.100, 0.050, 0.025], 0.025, [1e-5, 1e-4, 1e-3], "model_exist"),
(False, [0.100, 0.050, 0.025], 0.025, [1e-5, 1e-4, 1e-3], "no_model"),
(True, [0.100, 0.050, 0.025], 0.025, [1e-5, 1e-4, 1e-10], "low_lr"),
)
_LR_PARAMS_FIND = ("exists", "losses", "best", "learning_rates")
@pytest.mark.parametrize(_LR_PARAMS_FIND,
[x[:-1] for x in _LR_FIND],
ids=[x[-1] for x in _LR_FIND])
@pytest.mark.parametrize(_LR_CONF_PARAMS, _LR_CONF)
@pytest.mark.parametrize(_LR_CMDS_PARAMS, _LR_CMDS[0:1])
def test_LearningRateFinder_find(iters, # pylint:disable=too-many-arguments,too-many-positional-arguments # noqa[E501]
mode,
strength,
stop_factor,
beta,
exists,
losses,
best,
learning_rates,
_trainer_mock,
mocker):
""" Test lib.train.LearingRateFinder.find """
# pylint:disable=too-many-locals
trainer, model, optimizer = _trainer_mock(iters, mode, strength)
model.io.model_exists = exists
model.io.save = mocker.MagicMock()
original_lr = float(np.random.rand())
optimizer.learning_rate.numpy = mocker.MagicMock(return_value=original_lr)
optimizer.learning_rate.assign = mocker.MagicMock()
mocker.patch("shutil.rmtree")
lrf = LearningRateFinder(trainer, stop_factor=stop_factor, beta=beta)
train_mock = mocker.MagicMock()
plot_mock = mocker.MagicMock()
reset_mock = mocker.MagicMock()
lrf._train = train_mock
lrf._plot_loss = plot_mock
lrf._reset_model = reset_mock
lrf._metrics = {"losses": losses, "learning_rates": learning_rates}
lrf._loss = {"best": best}
result = lrf.find()
if exists:
model.io.save_assert_not_called()
else:
model.io.save.assert_called_once()
optimizer.learning_rate.assign.assert_called_with(lrf._start_lr)
train_mock.assert_called_once()
new_lr = learning_rates[losses.index(best)] / _STRENGTH_LOOKUP[strength]
if new_lr < 1e-9:
plot_mock.assert_not_called()
reset_mock.assert_not_called()
assert not result
return
plot_mock.assert_called_once()
reset_mock.assert_called_once_with(original_lr, new_lr)
assert result
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/training/lr_finder_test.py",
"license": "GNU General Public License v3.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/training/lr_warmup_test.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`lib.training.lr_warmup` """
import pytest
import pytest_mock
from keras.layers import Input, Dense
from keras.models import Model
from keras.optimizers import SGD
from lib.training import LearningRateWarmup
# pylint:disable=protected-access,redefined-outer-name
@pytest.fixture
def model_fixture():
""" Model fixture for testing LR Warmup """
inp = Input((4, 4, 3))
var_x = Dense(8)(inp)
model = Model(inputs=inp, outputs=var_x)
model.compile(optimizer=SGD(), loss="mse")
return model
_LR_STEPS = [(1e-5, 100),
(3.4e-6, 250),
(9e-4, 599),
(6e-5, 1000)]
_LR_STEPS_IDS = [f"lr:{x[0]}|steps:{x[1]}" for x in _LR_STEPS]
@pytest.mark.parametrize(("target_lr", "steps"), _LR_STEPS, ids=_LR_STEPS_IDS)
def test_init(model_fixture: Model, target_lr: float, steps: int) -> None:
""" Test class initializes correctly """
instance = LearningRateWarmup(model_fixture, target_lr, steps)
attrs = ["_model", "_target_lr", "_steps", "_current_lr", "_current_step", "_reporting_points"]
assert all(a in instance.__dict__ for a in attrs)
assert all(a in attrs for a in instance.__dict__)
assert instance._current_lr == 0.0
assert instance._current_step == 0
assert isinstance(instance._model, Model)
assert instance._target_lr == target_lr
assert instance._steps == steps
assert len(instance._reporting_points) == 11
assert all(isinstance(x, int) for x in instance._reporting_points)
assert instance._reporting_points == [int(steps * i / 10) for i in range(11)]
_NOTATION = [(1e-5, "1.0e-05"),
(3.45489e-6, "3.5e-06"),
(0.0004, "4.0e-04"),
(0.1234, "1.2e-01")]
@pytest.mark.parametrize(("value", "expected"), _NOTATION, ids=[x[1] for x in _NOTATION])
def test_format_notation(value: float, expected: str) -> None:
""" Test floats format to string correctly """
result = LearningRateWarmup._format_notation(value)
assert result == expected
_LR_STEPS_CURRENT = [(1e-5, 100, 79),
(3.4e-6, 250, 250),
(9e-4, 599, 0),
(6e-5, 1000, 12)]
_LR_STEPS_CURRENT_IDS = [f"lr:{x[0]}|steps:{x[1]}|current_step:{x[2]}" for x in _LR_STEPS_CURRENT]
@pytest.mark.parametrize(("target_lr", "steps", "current_step"),
_LR_STEPS_CURRENT,
ids=_LR_STEPS_CURRENT_IDS)
def test_set_current_learning_rate(model_fixture: Model,
target_lr: float,
steps: int,
current_step: int) -> None:
""" Test that learning rate is set correctly """
instance = LearningRateWarmup(model_fixture, target_lr, steps)
instance._current_step = current_step
instance._set_learning_rate()
assert instance._current_lr == instance._current_step / instance._steps * instance._target_lr
assert instance._model.optimizer.learning_rate.value.cpu().numpy() == instance._current_lr
_STEPS_CURRENT = [(1000, 1, "start"),
(250, 250, "end"),
(500, 69, "unreported"),
(1000, 200, "reported")]
_STEPS_CURRENT_ID = [f"steps:{x[0]}|current_step:{x[1]}|action:{x[2]}" for x in _STEPS_CURRENT]
@pytest.mark.parametrize(("steps", "current_step", "action"),
_STEPS_CURRENT,
ids=_STEPS_CURRENT_ID)
def test_output_status(model_fixture: Model,
steps: int,
current_step: int,
action: str,
mocker: pytest_mock.MockerFixture) -> None:
""" Test that information is output correctly """
mock_logger = mocker.patch("lib.training.lr_warmup.logger.info")
mock_print = mocker.patch("builtins.print")
instance = LearningRateWarmup(model_fixture, 5e-5, steps)
instance._current_step = current_step
instance._format_notation = mocker.MagicMock() # type:ignore[method-assign]
instance._output_status()
if action == "unreported":
assert current_step not in instance._reporting_points
mock_logger.assert_not_called()
instance._format_notation.assert_not_called() # type:ignore[attr-defined]
mock_print.assert_not_called()
return
mock_logger.assert_called_once()
log_message: str = mock_logger.call_args.args[0]
assert log_message.startswith("[Learning Rate Warmup] ")
instance._format_notation.assert_called() # type:ignore[attr-defined]
notation_args = [
x.args for x in instance._format_notation.call_args_list] # type:ignore[attr-defined]
assert all(len(a) == 1 for a in notation_args)
assert all(isinstance(a[0], float) for a in notation_args)
if action == "start":
mock_print.assert_not_called()
assert all(x in log_message for x in ("Start: ", "Target: ", "Steps: "))
assert instance._format_notation.call_count == 2 # type:ignore[attr-defined]
return
if action == "end":
mock_print.assert_called()
assert "Final Learning Rate: " in log_message
instance._format_notation.assert_called_once() # type:ignore[attr-defined]
return
if action == "reported":
mock_print.assert_called()
assert current_step in instance._reporting_points
assert all(x in log_message for x in ("Step: ", "Current: ", "Target: "))
assert instance._format_notation.call_count == 2 # type:ignore[attr-defined]
_STEPS_CURRENT_CALL = [(0, 500, "disabled"),
(1000, 500, "progress"),
(1000, 1000, "completed"),
(1000, 1111, "completed2")]
_STEPS_CURRENT_CALL_ID = [f"steps:{x[0]}|current_step:{x[1]}|action:{x[2]}"
for x in _STEPS_CURRENT_CALL]
@pytest.mark.parametrize(("steps", "current_step", "action"),
_STEPS_CURRENT_CALL,
ids=_STEPS_CURRENT_CALL_ID)
def test__call__(model_fixture: Model,
steps: int,
current_step: int,
action: str,
mocker: pytest_mock.MockerFixture) -> None:
""" Test calling the instance works correctly """
instance = LearningRateWarmup(model_fixture, 5e-5, steps)
instance._current_step = current_step
instance._set_learning_rate = mocker.MagicMock() # type:ignore[method-assign]
instance._output_status = mocker.MagicMock() # type:ignore[method-assign]
instance()
if action in ("disabled", "completed", "completed2"):
assert instance._current_step == current_step
instance._set_learning_rate.assert_not_called() # type:ignore[attr-defined]
instance._output_status.assert_not_called() # type:ignore[attr-defined]
else:
assert instance._current_step == current_step + 1
instance._set_learning_rate.assert_called_once() # type:ignore[attr-defined]
instance._output_status.assert_called_once() # type:ignore[attr-defined]
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/training/lr_warmup_test.py",
"license": "GNU General Public License v3.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/lib/training/tensorboard_test.py | #! /usr/env/bin/python3
""" Unit test for :mod:`lib.training.tensorboard` """
import os
import pytest
from keras import layers, Sequential
import numpy as np
from tensorboard.compat.proto import event_pb2
from torch.utils.tensorboard import SummaryWriter
from lib.training import tensorboard as mod_tb
# pylint:disable=protected-access,invalid-name
@pytest.fixture()
def _gen_events_file(tmpdir):
log_dir = tmpdir.mkdir("logs")
def _apply(keys=["test1"], # pylint:disable=dangerous-default-value
values=[0.42],
global_steps=[4]):
writer = SummaryWriter(log_dir)
for key, val, step in zip(keys, values, global_steps):
writer.add_scalar(key, val, global_step=step)
writer.flush()
return os.path.join(log_dir, os.listdir(log_dir)[0])
return _apply
@pytest.mark.parametrize("entries", ({"loss1": np.random.rand()},
{f"test{i}": np.random.rand() for i in range(4)},
{f"another_test{i}": np.random.rand() for i in range(10)}))
@pytest.mark.parametrize("batch", [1, 42, 69, 1024, 143432])
@pytest.mark.parametrize("is_live", (True, False), ids=("live", "not_live"))
def test_RecordIterator(entries, batch, is_live, _gen_events_file):
""" Test that our :class:`lib.training.tensorboard.RecordIterator` returns expected results """
keys = list(entries)
vals = list(entries.values())
batches = [batch + i for i in range(len(keys))]
file = _gen_events_file(keys, vals, batches)
iterator = mod_tb.RecordIterator(file, is_live=is_live)
results = list(event_pb2.Event.FromString(v) for v in iterator)
valid = [r for r in results if r.summary.value]
assert len(valid) == len(keys)
for entry, key, val, btc in zip(valid, keys, vals, batches):
assert len(entry.summary.value) == 1
assert entry.step == btc
assert entry.summary.value[0].tag == key
assert np.isclose(entry.summary.value[0].simple_value, val)
if is_live:
assert iterator._is_live is True
assert os.path.getsize(file) == iterator._position # At end of file
else:
assert iterator._is_live is False
assert iterator._position == 0
@pytest.fixture()
def _get_ttb_instance(tmpdir):
log_dir = tmpdir.mkdir("logs")
def _apply(write_graph=False, update_freq="batch"):
instance = mod_tb.TorchTensorBoard(log_dir=log_dir,
write_graph=write_graph,
update_freq=update_freq)
return log_dir, instance
return _apply
def _get_logs(temp_path):
train_logs = os.path.join(temp_path, "train")
log_files = os.listdir(train_logs)
assert len(log_files) == 1
records = [event_pb2.Event.FromString(record)
for record in mod_tb.RecordIterator(os.path.join(train_logs, log_files[0]))]
return records
@pytest.mark.parametrize("write_graph", (True, False), ids=("write_graph", "no_write_graph"))
def test_TorchTensorBoard_set_model(write_graph, _get_ttb_instance):
""" Test that :class:`lib.training.tensorboard.set_model` functions """
log_dir, instance = _get_ttb_instance(write_graph=write_graph)
model = Sequential()
model.add(layers.Input(shape=(8, )))
model.add(layers.Dense(4))
model.add(layers.Dense(4))
assert not os.path.exists(os.path.join(log_dir, "train"))
instance.set_model(model)
instance.on_save()
logs = [x for x in _get_logs(os.path.join(log_dir))
if x.summary.value]
if not write_graph:
assert not logs
return
# Only a single logged entry
assert len(logs) == 1 and len(logs[0].summary.value) == 1
# Should be our Keras model summary
assert logs[0].summary.value[0].tag == "keras/text_summary"
def test_TorchTensorBoard_on_train_begin(_get_ttb_instance):
""" Test that :class:`lib.training.tensorboard.on_train_begin` functions """
_, instance = _get_ttb_instance()
instance.on_train_begin()
assert instance._global_train_batch == 0
assert instance._previous_epoch_iterations == 0
@pytest.mark.parametrize("batch", (1, 3, 57, 124))
@pytest.mark.parametrize("logs", ({"loss_a": 2.45, "loss_b": 1.56},
{"loss_c": 0.54, "loss_d": 0.51},
{"loss_c": 0.69, "loss_d": 0.42, "loss_g": 2.69}))
def test_TorchTensorBoard_on_train_batch_end(batch, logs, _get_ttb_instance):
""" Test that :class:`lib.training.tensorboard.on_train_batch_end` functions """
log_dir, instance = _get_ttb_instance()
assert not os.path.exists(os.path.join(log_dir, "train"))
instance.on_train_batch_end(batch, logs)
instance.on_save()
tb_logs = [x for x in _get_logs(os.path.join(log_dir))
if x.summary.value]
assert len(tb_logs) == len(logs)
for (k, v), out in zip(logs.items(), tb_logs):
assert len(out.summary.value) == 1
assert out.summary.value[0].tag == f"batch_{k}"
assert np.isclose(out.summary.value[0].simple_value, v)
assert out.step == batch
def test_TorchTensorBoard_on_save(_get_ttb_instance, mocker):
""" Test that :class:`lib.training.tensorboard.on_save` functions """
# Implicitly checked in other tests, so just make sure it calls flush on the writer
_, instance = _get_ttb_instance()
instance._train_writer.flush = mocker.MagicMock()
instance.on_save()
instance._train_writer.flush.assert_called_once()
def test_TorchTensorBoard_on_train_end(_get_ttb_instance, mocker):
""" Test that :class:`lib.training.tensorboard.on_train_end` functions """
# Saving is already implicitly checked in other tests, so just make sure it calls flush and
# close on the train writer
_, instance = _get_ttb_instance()
instance._train_writer.flush = mocker.MagicMock()
instance._train_writer.close = mocker.MagicMock()
instance.on_train_end()
instance._train_writer.flush.assert_called_once()
instance._train_writer.close.assert_called_once()
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/lib/training/tensorboard_test.py",
"license": "GNU General Public License v3.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/plugins/train/trainer/test_distributed.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`plugins.train.trainer.distributed` Trainer plug in """
# pylint:disable=protected-access, invalid-name
import numpy as np
import pytest
import pytest_mock
import torch
from plugins.train.trainer import distributed as mod_distributed
from plugins.train.trainer import original as mod_original
from plugins.train.trainer import _base as mod_base
_MODULE_PREFIX = "plugins.train.trainer.distributed"
@pytest.mark.parametrize("batch_size", (4, 8, 16, 32, 64))
@pytest.mark.parametrize("outputs", (1, 2, 4))
def test_WrappedModel(batch_size, outputs, mocker):
""" Test that the wrapped model calls preds and loss """
model = mocker.MagicMock()
instance = mod_distributed.WrappedModel(model)
assert instance._keras_model is model
loss_return = [torch.from_numpy((np.random.random((1, )))) for _ in range(outputs * 2)]
model.loss = [mocker.MagicMock(return_value=ret) for ret in loss_return]
test_dims = (batch_size, 16, 16, 3)
inp_a = torch.from_numpy(np.random.random(test_dims))
inp_b = torch.from_numpy(np.random.random(test_dims))
targets = [torch.from_numpy(np.random.random(test_dims))
for _ in range(outputs * 2)]
preds = [*torch.from_numpy(np.random.random((outputs * 2, *test_dims)))]
model.return_value = preds
# Call forwards
result = instance.forward(inp_a, inp_b, *targets)
# Confirm model was called once forward with correct args
model.assert_called_once()
model_args, model_kwargs = model.call_args
assert model_kwargs == {"training": True}
assert len(model_args) == 1
assert len(model_args[0]) == 2
for real, expected in zip(model_args[0], [inp_a, inp_b]):
assert np.allclose(real.numpy(), expected.numpy())
# Confirm ZeroGrad called
model.zero_grad.assert_called_once()
# Confirm loss functions correctly called
expected_targets = targets[0::2] + targets[1::2]
for target, pred, loss in zip(expected_targets, preds, model.loss):
loss.assert_called_once()
loss_args, loss_kwargs = loss.call_args
assert not loss_kwargs
assert len(loss_args) == 2
for actual, expected in zip(loss_args, [target, pred]):
assert np.allclose(actual.numpy(), expected.numpy())
# Check that the result comes out as we put it in
for expected, actual in zip(loss_return, result.squeeze()):
assert np.isclose(expected.numpy(), actual.numpy())
@pytest.fixture
def _trainer_mocked(mocker: pytest_mock.MockFixture): # noqa:[F811]
""" Generate a mocked model and feeder object and patch torch GPU count """
def _apply_patch(gpus=2, batch_size=8):
patched_cuda_device = mocker.patch(f"{_MODULE_PREFIX}.torch.cuda.device_count")
patched_cuda_device.return_value = gpus
patched_parallel = mocker.patch(f"{_MODULE_PREFIX}.torch.nn.DataParallel")
patched_parallel.return_value = mocker.MagicMock()
model = mocker.MagicMock()
instance = mod_distributed.Trainer(model, batch_size)
return instance, patched_parallel
return _apply_patch
@pytest.mark.parametrize("gpu_count", (2, 3, 5, 8))
@pytest.mark.parametrize("batch_size", (4, 8, 16, 32, 64))
def test_Trainer(gpu_count, batch_size, _trainer_mocked):
""" Test that original trainer creates correctly """
instance, patched_parallel = _trainer_mocked(gpus=gpu_count, batch_size=batch_size)
assert isinstance(instance, mod_base.TrainerBase)
assert isinstance(instance, mod_original.Trainer)
# Confirms that _validate_batch_size executed correctly
assert instance.batch_size == max(gpu_count, batch_size)
assert hasattr(instance, "train_batch")
# Confirms that _set_distributed executed correctly
assert instance._distributed_model is patched_parallel.return_value
@pytest.mark.parametrize("gpu_count", (2, 3, 5, 8), ids=[f"gpus:{x}" for x in (2, 3, 5, 8)])
@pytest.mark.parametrize("outputs", (1, 2, 4))
@pytest.mark.parametrize("batch_size", (4, 8, 16, 32, 64))
def test_Trainer_forward(gpu_count, batch_size, outputs, _trainer_mocked, mocker):
""" Test that original trainer _forward calls the correct model methods """
instance, _ = _trainer_mocked(gpus=gpu_count, batch_size=batch_size)
test_dims = (2, batch_size, 16, 16, 3)
inputs = torch.from_numpy(np.random.random(test_dims))
targets = [torch.from_numpy(np.random.random(test_dims)) for _ in range(outputs)]
loss_return = torch.rand((gpu_count * 2 * outputs))
instance._distributed_model = mocker.MagicMock(return_value=loss_return)
# Call the forward pass
result = instance._forward(inputs, targets).cpu().numpy()
# Make sure multi-outs are enabled
if outputs > 1:
assert instance._is_multi_out is True
else:
assert instance._is_multi_out is False
# Make sure that our wrapped distributed model was called in the correct order
instance._distributed_model.assert_called_once()
call_args, call_kwargs = instance._distributed_model.call_args
assert not call_kwargs
assert len(call_args) == len(inputs) + (len(targets) * 2)
expected_tgts = [t[i].cpu().numpy() for t in targets for i in range(2)]
for expected, actual in zip([*inputs, *expected_tgts], call_args):
assert np.allclose(expected, actual)
# Make sure loss gets grouped, summed and scaled correctly
expected = loss_return.cpu().numpy()
expected = expected.reshape((gpu_count, 2, -1)).sum(axis=0).flatten() / gpu_count
assert np.allclose(result, expected)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/plugins/train/trainer/test_distributed.py",
"license": "GNU General Public License v3.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:tests/plugins/train/trainer/test_original.py | #!/usr/bin python3
""" Pytest unit tests for :mod:`plugins.train.trainer.original` Trainer plug in """
# pylint:disable=protected-access,invalid-name
import numpy as np
import pytest
import pytest_mock
import torch
from plugins.train.trainer import original as mod_original
from plugins.train.trainer import _base as mod_base
@pytest.fixture
def _trainer_mocked(mocker: pytest_mock.MockFixture): # noqa:[F811]
""" Generate a mocked model and feeder object and patch user config items """
def _apply_patch(batch_size=8):
model = mocker.MagicMock()
instance = mod_original.Trainer(model, batch_size)
return instance
return _apply_patch
@pytest.mark.parametrize("batch_size", (4, 8, 16, 32, 64))
def test_Trainer(batch_size, _trainer_mocked):
""" Test that original trainer creates correctly """
instance = _trainer_mocked(batch_size=batch_size)
assert isinstance(instance, mod_base.TrainerBase)
assert instance.batch_size == batch_size
assert hasattr(instance, "train_batch")
def test_Trainer_train_batch(_trainer_mocked, mocker):
""" Test that original trainer calls the forward and backwards methods """
instance = _trainer_mocked()
loss_return = float(np.random.rand())
instance._forward = mocker.MagicMock(return_value=loss_return)
instance._backwards_and_apply = mocker.MagicMock()
ret_val = instance.train_batch("TEST_INPUT", "TEST_TARGET")
assert ret_val == loss_return
instance._forward.assert_called_once_with("TEST_INPUT", "TEST_TARGET")
instance._backwards_and_apply.assert_called_once_with(loss_return)
@pytest.mark.parametrize("outputs", (1, 2, 4))
@pytest.mark.parametrize("batch_size", (4, 8, 16, 32, 64))
def test_Trainer_forward(batch_size, # pylint:disable=too-many-locals
outputs,
_trainer_mocked,
mocker):
""" Test that original trainer _forward calls the correct model methods """
instance = _trainer_mocked(batch_size=batch_size)
loss_returns = [torch.from_numpy(np.random.random((1, ))) for _ in range(outputs * 2)]
mock_preds = [torch.from_numpy(np.random.random((batch_size, 16, 16, 3)))
for _ in range(outputs * 2)]
instance.model.model.return_value = mock_preds
instance.model.model.zero_grad = mocker.MagicMock()
instance.model.model.loss = [mocker.MagicMock(return_value=ret) for ret in loss_returns]
inputs = torch.from_numpy(np.random.random((2, batch_size, 16, 16, 3)))
targets = [torch.from_numpy(np.random.random((2, batch_size, 16, 16, 3)))
for _ in range(outputs)]
# Call forwards
result = instance._forward(inputs, targets)
# Output comes from loss functions
assert (np.allclose(e.numpy(), a.numpy()) for e, a in zip(result, loss_returns))
# Model was zero'd
instance.model.model.zero_grad.assert_called_once()
# model forward pass called with inputs split
train_call = instance.model.model
call_args, call_kwargs = train_call.call_args
assert call_kwargs == {"training": True}
expected_inputs = [a.numpy() for a in inputs]
actual_inputs = [a.numpy() for a in call_args[0]]
assert (np.allclose(e, a) for e, a in zip(expected_inputs, actual_inputs))
# losses called with targets split
loss_calls = instance.model.model.loss
expected_targets = [t[i].numpy() for i in range(2) for t in targets]
expected_preds = [p.numpy() for p in mock_preds]
for loss_call, pred, target in zip(loss_calls, expected_preds, expected_targets):
loss_call.assert_called_once()
call_args, call_kwargs = loss_call.call_args
assert not call_kwargs
assert len(call_args) == 2
actual_target = call_args[0].numpy()
actual_pred = call_args[1].numpy()
assert np.allclose(pred, actual_pred)
assert np.allclose(target, actual_target)
def test_Trainer_backwards_and_apply(_trainer_mocked, mocker):
""" Test that original trainer _backwards_and_apply calls the correct model methods """
instance = _trainer_mocked()
mock_loss = mocker.MagicMock()
instance.model.model.optimizer.scale_loss = mocker.MagicMock(return_value=mock_loss)
instance.model.model.optimizer.app = mocker.MagicMock(return_value=mock_loss)
all_loss = np.random.rand()
instance._backwards_and_apply(all_loss)
scale_mock = instance.model.model.optimizer.scale_loss
scale_mock.assert_called_once()
assert not scale_mock.call_args[1]
assert len(scale_mock.call_args[0]) == 1
assert np.isclose(all_loss, scale_mock.call_args[0][0].cpu().numpy())
mock_loss.backward.assert_called_once()
instance.model.model.optimizer.apply.assert_called_once()
| {
"repo_id": "deepfakes/faceswap",
"file_path": "tests/plugins/train/trainer/test_original.py",
"license": "GNU General Public License v3.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepfakes/faceswap:lib/training/lr_warmup.py | #! /usr/env/bin/python3
""" Handles Learning Rate Warmup when training a model """
from __future__ import annotations
import logging
import typing as T
from lib.utils import get_module_objects
if T.TYPE_CHECKING:
from keras import models
logger = logging.getLogger(__name__)
class LearningRateWarmup():
""" Handles the updating of the model's learning rate during Learning Rate Warmup
Parameters
----------
model : :class:`keras.models.Model`
The keras model that is to be trained
target_learning_rate : float
The final learning rate at the end of warmup
steps : int
The number of iterations to warmup the learning rate for
"""
def __init__(self, model: models.Model, target_learning_rate: float, steps: int) -> None:
self._model = model
self._target_lr = target_learning_rate
self._steps = steps
self._current_lr = 0.0
self._current_step = 0
self._reporting_points = [int(self._steps * i / 10) for i in range(11)]
logger.debug("Initialized %s", self)
def __repr__(self) -> str:
""" Pretty string representation for logging """
call_args = ", ".join(f"{k}={v}" for k, v in {"model": self._model,
"target_learning_rate": self._target_lr,
"steps": self._steps}.items())
current_params = ", ".join(f"{k[1:]}: {v}" for k, v in self.__dict__.items()
if k not in ("_model", "_target_lr", "_steps"))
return f"{self.__class__.__name__}({call_args}) [{current_params}]"
@classmethod
def _format_notation(cls, value: float) -> str:
""" Format a float to scientific notation at 1 decimal place
Parameters
----------
value : float
The value to format
Returns
-------
str
The formatted float in scientific notation at 1 decimal place
"""
return f"{value:.1e}"
def _set_learning_rate(self) -> None:
""" Set the learning rate for the current step """
self._current_lr = self._current_step / self._steps * self._target_lr
self._model.optimizer.learning_rate.assign(self._current_lr)
logger.debug("Learning rate set to %s for step %s/%s",
self._current_lr, self._current_step, self._steps)
def _output_status(self) -> None:
""" Output the progress of Learning Rate Warmup at set intervals """
if self._current_step == 1:
logger.info("[Learning Rate Warmup] Start: %s, Target: %s, Steps: %s",
self._format_notation(self._current_lr),
self._format_notation(self._target_lr), self._steps)
return
if self._current_step == self._steps:
print()
logger.info("[Learning Rate Warmup] Final Learning Rate: %s",
self._format_notation(self._target_lr))
return
if self._current_step in self._reporting_points:
print()
progress = int(round(100 / (len(self._reporting_points) - 1) *
self._reporting_points.index(self._current_step), 0))
logger.info("[Learning Rate Warmup] Step: %s/%s (%s), Current: %s, Target: %s",
self._current_step,
self._steps,
f"{progress}%",
self._format_notation(self._current_lr),
self._format_notation(self._target_lr))
def __call__(self) -> None:
""" If a learning rate update is required, update the model's learning rate, otherwise
do nothing """
if self._steps == 0 or self._current_step >= self._steps:
return
self._current_step += 1
self._set_learning_rate()
self._output_status()
__all__ = get_module_objects(__name__)
| {
"repo_id": "deepfakes/faceswap",
"file_path": "lib/training/lr_warmup.py",
"license": "GNU General Public License v3.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-hf/run_dpsk_ocr.py | from transformers import AutoModel, AutoTokenizer
import torch
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
model_name = 'deepseek-ai/DeepSeek-OCR'
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModel.from_pretrained(model_name, _attn_implementation='flash_attention_2', trust_remote_code=True, use_safetensors=True)
model = model.eval().cuda().to(torch.bfloat16)
# prompt = "<image>\nFree OCR. "
prompt = "<image>\n<|grounding|>Convert the document to markdown. "
image_file = 'your_image.jpg'
output_path = 'your/output/dir'
# infer(self, tokenizer, prompt='', image_file='', output_path = ' ', base_size = 1024, image_size = 640, crop_mode = True, test_compress = False, save_results = False):
# Tiny: base_size = 512, image_size = 512, crop_mode = False
# Small: base_size = 640, image_size = 640, crop_mode = False
# Base: base_size = 1024, image_size = 1024, crop_mode = False
# Large: base_size = 1280, image_size = 1280, crop_mode = False
# Gundam: base_size = 1024, image_size = 640, crop_mode = True
res = model.infer(tokenizer, prompt=prompt, image_file=image_file, output_path = output_path, base_size = 1024, image_size = 640, crop_mode=True, save_results = True, test_compress = True)
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-hf/run_dpsk_ocr.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/config.py | # TODO: change modes
# Tiny: base_size = 512, image_size = 512, crop_mode = False
# Small: base_size = 640, image_size = 640, crop_mode = False
# Base: base_size = 1024, image_size = 1024, crop_mode = False
# Large: base_size = 1280, image_size = 1280, crop_mode = False
# Gundam: base_size = 1024, image_size = 640, crop_mode = True
BASE_SIZE = 1024
IMAGE_SIZE = 640
CROP_MODE = True
MIN_CROPS= 2
MAX_CROPS= 6 # max:9; If your GPU memory is small, it is recommended to set it to 6.
MAX_CONCURRENCY = 100 # If you have limited GPU memory, lower the concurrency count.
NUM_WORKERS = 64 # image pre-process (resize/padding) workers
PRINT_NUM_VIS_TOKENS = False
SKIP_REPEAT = True
MODEL_PATH = 'deepseek-ai/DeepSeek-OCR' # change to your model path
# TODO: change INPUT_PATH
# .pdf: run_dpsk_ocr_pdf.py;
# .jpg, .png, .jpeg: run_dpsk_ocr_image.py;
# Omnidocbench images path: run_dpsk_ocr_eval_batch.py
INPUT_PATH = ''
OUTPUT_PATH = ''
PROMPT = '<image>\n<|grounding|>Convert the document to markdown.'
# PROMPT = '<image>\nFree OCR.'
# TODO commonly used prompts
# document: <image>\n<|grounding|>Convert the document to markdown.
# other image: <image>\n<|grounding|>OCR this image.
# without layouts: <image>\nFree OCR.
# figures in document: <image>\nParse the figure.
# general: <image>\nDescribe this image in detail.
# rec: <image>\nLocate <|ref|>xxxx<|/ref|> in the image.
# 'ε
倩δΈδΉεΏ§θεΏ§'
# .......
from transformers import AutoTokenizer
TOKENIZER = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/config.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepencoder/build_linear.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import copy
class MlpProjector(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
if cfg.projector_type == "identity":
modules = nn.Identity()
elif cfg.projector_type == "linear":
modules = nn.Linear(cfg.input_dim, cfg.n_embed)
elif cfg.projector_type == "mlp_gelu":
mlp_depth = cfg.get("depth", 1)
modules = [nn.Linear(cfg.input_dim, cfg.n_embed)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
modules = nn.Sequential(*modules)
elif cfg.projector_type == "normlayer_downsample_mlp_gelu":
mlp_depth = cfg.get("depth", 1)
mlp_ratio = cfg.get("mlp_ratio", 1)
modules = [
nn.LayerNorm(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio),
nn.Linear(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio, cfg.n_embed * mlp_ratio)
]
for _ in range(1, mlp_depth - 1):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed * mlp_ratio))
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed))
modules = nn.Sequential(*modules)
elif cfg.projector_type == "downsample_mlp_gelu":
mlp_depth = cfg.get("depth", 1)
mlp_ratio = cfg.get("mlp_ratio", 1)
modules = [nn.Linear(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio, cfg.n_embed * mlp_ratio)]
for _ in range(1, mlp_depth - 1):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed * mlp_ratio))
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed))
modules = nn.Sequential(*modules)
elif cfg.projector_type == "low_high_hybrid_split_mlp_gelu":
mlp_depth = cfg.get("depth", 1)
self.high_up_proj = nn.Linear(cfg.input_dim, cfg.n_embed // 2)
self.low_up_proj = nn.Linear(cfg.input_dim, cfg.n_embed // 2)
modules = []
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
modules = nn.Sequential(*modules)
elif cfg.projector_type == "hybrid_split_feature_mlp_gelu":
mlp_depth = cfg.get("depth", 1)
channel_div = cfg.get("channel_div", 0.5)
self.high_up_proj = nn.Linear(cfg.input_dim[0], int(cfg.n_embed * channel_div))
self.low_up_proj = nn.Linear(cfg.input_dim[1], cfg.n_embed - int(cfg.n_embed * channel_div))
modules = []
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
modules = nn.Sequential(*modules)
elif cfg.projector_type == "low_high_split_mlp_gelu":
mlp_depth = cfg.get("depth", 1)
modules = []
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(cfg.n_embed // 2, cfg.n_embed // 2))
modules = nn.Sequential(*modules)
self.high_layers = nn.Sequential(*modules)
self.low_layers = copy.deepcopy(modules)
else:
raise ValueError(f"Unknown projector type: {cfg.projector_type}")
if cfg.get("token_pooling", False):
self.token_pooling_layer = nn.Linear(cfg.input_dim * 4, cfg.input_dim)
if cfg.get("conv_fusion_high_low_features", False):
self.fusion_layer = nn.Linear(cfg.input_dim, cfg.input_dim)
self.layers = modules
def forward(self, x):
if self.cfg.get("token_pooling", False):
batch_size, wxh, channels = x.shape
w = h = int(wxh**0.5)
x = x.view(batch_size, w, h, channels)
x = x.permute(0, 3, 1, 2)
# import ipdb; ipdb.set_trace()
patches = x.unfold(2, 2, 2).unfold(3, 2, 2)
batch_size, channels, h_patches, w_patches, _, _ = patches.size()
# ε¨ιιη»΄εΊ¦δΈζΌζ₯
patches = patches.contiguous().view(batch_size, channels, h_patches * w_patches, -1)
# ιθΏηΊΏζ§ε±
patches = patches.permute(0, 2, 1, 3).contiguous()
patches = patches.view(batch_size, h_patches * w_patches, channels * 4)
x = self.token_pooling_layer(patches)
if self.cfg.get("conv_fusion_high_low_features", False):
x = self.fusion_layer(x[:, 0]) + x[:, 1]
if self.cfg.projector_type == 'low_high_hybrid_split_mlp_gelu':
high_x, low_x = x[0], x[1]
high_x = self.high_up_proj(high_x)
low_x = self.low_up_proj(low_x)
x = torch.concat([high_x, low_x], dim=-1)
if self.cfg.projector_type == 'hybrid_split_feature_mlp_gelu':
high_x = x[...,:self.cfg.input_dim[0]]
low_x = x[...,self.cfg.input_dim[0]:]
high_x = self.high_up_proj(high_x)
low_x = self.low_up_proj(low_x)
x = torch.concat([high_x, low_x], dim=-1)
if self.cfg.projector_type == 'low_high_split_mlp_gelu':
high_x, low_x = x[0], x[1]
high_x = self.high_layers(high_x)
low_x = self.low_layers(low_x)
x = torch.concat([high_x, low_x], dim=-1)
return x
if self.cfg.projector_type == 'downsample_mlp_gelu' or self.cfg.projector_type == 'normlayer_downsample_mlp_gelu':
bs, hw, input_dim = x.shape
h = w = int((hw) ** 0.5)
"""compute padding"""
if h % self.cfg.downsample_ratio:
pad = self.cfg.downsample_ratio - h % self.cfg.downsample_ratio
else:
pad = 0
x = x.reshape(bs, h, w, input_dim)
if pad > 0:
x = F.pad(x, (0, 0, 0, pad, 0, pad), "constant", 0)
"""4 to 1 concat"""
x = x.permute(0, 3, 1, 2) # B, C, H, W
x = F.unfold(x, kernel_size=self.cfg.downsample_ratio, stride=self.cfg.downsample_ratio, padding=0) # B, C*4, HW // 4
x = x.permute(0, 2, 1)
return self.layers(x)
@staticmethod
def get_flops_per_sample(cfg):
if cfg.projector_type == "linear":
fwd = 2 * cfg.input_dim * cfg.n_embed
elif "mlp_gelu" in cfg.projector_type :
mlp_depth = cfg.get("depth", 1)
downsample_ratio = cfg.get("downsample_ratio", 1)
input_dim = sum(cfg.input_dim) if isinstance(cfg.input_dim, list) else cfg.input_dim
input_dim = input_dim * downsample_ratio * downsample_ratio
fwd = 2 * input_dim * cfg.n_embed + (mlp_depth - 1) * 2 * cfg.n_embed * cfg.n_embed
else:
fwd = 0
return fwd * 3
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepencoder/build_linear.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepencoder/clip_sdpa.py | from contextlib import nullcontext
import math
from typing import Optional, Tuple
# from megatron.model import LayerNorm
from easydict import EasyDict as adict
import torch
from torch.nn import functional as F
from torch import nn
from flash_attn import flash_attn_qkvpacked_func, flash_attn_func
# from optimus import flash_attn_func
# from megatron.core import tensor_parallel
# from megatron.core import parallel_state as mpu
# from megatron.core.utils import make_viewless_tensor, divide
# from megatron.model.fused_rms_norm import RMSNorm
# from megatron.model.transformer import (
# FlashSelfAttention,
# NoopTransformerLayer,
# _cfg_to_kwargs,
# )
# from megatron.model.enums import AttnMaskType, AttnType
# from megatron.model.fused_softmax import FusedScaleMaskSoftmax
# from megatron.model.utils import attention_mask_func
# from megatron.model.module import MegatronModule
# try:
# from einops import rearrange
# except ImportError:
# rearrange = None
# from flash_attn import flash_attn_varlen_func as flash_attn_unpadded_func
# try:
# # flash attention 2.x
# from flash_attn import flash_attn_varlen_func as flash_attn_unpadded_func
# except ImportError:
# try:
# # flash attention 1.x
# from flash_attn.flash_attn_interface import flash_attn_unpadded_func
# except ImportError:
# flash_attn_unpadded_func = None
# try:
# from flash_attn.flash_attn_interface import flash_attn_unpadded_relative_attention_bias_func
# except ImportError:
# flash_attn_unpadded_relative_attention_bias_func = None
# try:
# from flash_attn.flash_attn_interface import mask_flash_attn_unpadded_func
# except ImportError:
# mask_flash_attn_unpadded_func = None
class LayerNormfp32(torch.nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
def get_abs_pos(abs_pos, tgt_size):
# abs_pos: L, C
# tgt_size: M
# return: M, C
# print(tgt_size)
# print(abs_pos.shape)
# exit()
dim = abs_pos.size(-1)
# print(dim)
abs_pos_new = abs_pos.squeeze(0)
cls_token, old_pos_embed = abs_pos_new[:1], abs_pos_new[1:]
src_size = int(math.sqrt(abs_pos_new.shape[0] - 1))
tgt_size = int(math.sqrt(tgt_size))
dtype = abs_pos.dtype
if src_size != tgt_size:
old_pos_embed = old_pos_embed.view(1, src_size, src_size, dim).permute(0, 3, 1,
2).contiguous()
old_pos_embed = old_pos_embed.to(torch.float32)
new_pos_embed = F.interpolate(
old_pos_embed,
size=(tgt_size, tgt_size),
mode='bicubic',
antialias=True,
align_corners=False,
).to(dtype)
new_pos_embed = new_pos_embed.permute(0, 2, 3, 1)
new_pos_embed = new_pos_embed.view(tgt_size * tgt_size, dim)
vision_pos_embed = torch.cat([cls_token, new_pos_embed], dim=0)
vision_pos_embed = vision_pos_embed.view(1, tgt_size * tgt_size + 1, dim)
return vision_pos_embed
else:
return abs_pos
@torch.jit.script
def quick_gelu(x):
return x * torch.sigmoid(1.702 * x)
class CLIPVisionEmbeddings(nn.Module):
def __init__(self, hidden_size=1024, image_size=224, patch_size=14, num_channels=3):
super().__init__()
self.embed_dim = hidden_size
self.image_size = image_size
self.patch_size = patch_size
self.class_embedding = torch.nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = torch.nn.Conv2d(
in_channels=num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = torch.nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer(
"position_ids", torch.arange(self.num_positions).expand((1, -1))
)
def forward(self, pixel_values, patch_embeds):
batch_size = pixel_values.shape[0]
# patch_embeds = self.patch_embedding(
# pixel_values
# ) # shape = [*, width, grid, grid]
if patch_embeds is not None:
patch_embeds = patch_embeds
# print(patch_embeds.shape)
else:
patch_embeds = self.patch_embedding(pixel_values)
# print(111111)
# shape = [*, width, grid, grid]
# patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
# x = torch.cat([cls_token, x], dim=1)
embeddings = embeddings + get_abs_pos(self.position_embedding(self.position_ids), embeddings.size(1))
# embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
class NoTPFeedForward(nn.Module):
def __init__(
self,
cfg,
dim: int,
hidden_dim: int,
):
super().__init__()
self.fc1 = torch.nn.Linear(dim, hidden_dim, bias=True)
self.fc2 = torch.nn.Linear(hidden_dim, dim, bias=True)
def forward(self, x):
output = self.fc2(quick_gelu(self.fc1(x)))
return output
# from optimus.flash_attn_interface import flash_attn_qkvpacked_func
# class NoTPAttention(nn.Module):
# def __init__(self, cfg):
# super().__init__()
# self.num_heads = cfg.num_attention_heads
# self.n_local_heads = cfg.num_attention_heads
# self.head_dim = cfg.hidden_size // cfg.num_attention_heads
# self.max_seq_len = cfg.seq_length
# self.use_flash_attention = cfg.use_flash_attn
# self.qkv_proj = torch.nn.Linear(cfg.hidden_size, cfg.hidden_size * 3, bias=True)
# self.out_proj = torch.nn.Linear(cfg.hidden_size, cfg.hidden_size, bias=True)
# # self.core_attention = CoreAttention(cfg, AttnType.self_attn)
# self.attn_drop = cfg.attention_dropout
# def forward(
# self,
# x: torch.Tensor,
# ):
# bsz, seqlen, _ = x.shape
# xqkv = self.qkv_proj(x)
# xqkv = xqkv.view(bsz, seqlen, 3, self.num_heads, self.head_dim)
# if self.use_flash_attention:
# output = flash_attn_qkvpacked_func(xqkv)
# output = output.view(bsz, seqlen, -1)
# else:
# xq, xk, xv = torch.split(xqkv, 1, dim=2)
# xq = xq.squeeze(2)
# xk = xk.squeeze(2)
# xv = xv.squeeze(2)
# # xq, xk, xv = xqkv[:, :, 0, ...], xqkv[:, :, 1, ...], xqkv[:, :, 2, ...]
# # οΌB, num_head, S, head_size)
# xq = xq.permute(0, 2, 1, 3)
# xk = xk.permute(0, 2, 1, 3)
# xv = xv.permute(0, 2, 1, 3)
# output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None)
# utput = output.permute(0, 2, 1, 3).view(bsz, seqlen, -1)
# output = self.out_proj(output)
# return output
# from optimus.flash_attn_interface import flash_attn_qkvpacked_func
class NoTPAttention(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.num_heads = cfg.num_attention_heads
self.n_local_heads = cfg.num_attention_heads
self.head_dim = cfg.hidden_size // cfg.num_attention_heads
self.max_seq_len = cfg.seq_length
self.use_flash_attention = cfg.use_flash_attn
self.qkv_proj = torch.nn.Linear(cfg.hidden_size, cfg.hidden_size * 3, bias=True)
self.out_proj = torch.nn.Linear(cfg.hidden_size, cfg.hidden_size, bias=True)
# self.core_attention = CoreAttention(cfg, AttnType.self_attn)
self.attn_drop = cfg.attention_dropout
def forward(
self,
x: torch.Tensor,
):
bsz, seqlen, _ = x.shape
xqkv = self.qkv_proj(x)
xqkv = xqkv.view(bsz, seqlen, 3, self.num_heads, self.head_dim)
if self.use_flash_attention:
output = flash_attn_qkvpacked_func(xqkv)
output = output.view(bsz, seqlen, -1)
# xq, xk, xv = torch.split(xqkv, 1, dim=2)
# xq = xq.squeeze(2)
# xk = xk.squeeze(2)
# xv = xv.squeeze(2)
# # xq, xk, xv = xqkv[:, :, 0, ...], xqkv[:, :, 1, ...], xqkv[:, :, 2, ...]
# # οΌB, num_head, S, head_size)
# xq = xq.permute(0, 2, 1, 3)
# xk = xk.permute(0, 2, 1, 3)
# xv = xv.permute(0, 2, 1, 3)
# # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
# output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None)
# output = output.permute(0, 2, 1, 3).reshape(bsz, seqlen, -1)
# output = output.permute(0, 2, 1, 3).contiguous().view(bsz, seqlen, -1)
else:
# output = flash_attn_qkvpacked_func(xqkv)
xq, xk, xv = torch.split(xqkv, 1, dim=2)
xq = xq.squeeze(2)
xk = xk.squeeze(2)
xv = xv.squeeze(2)
# xq, xk, xv = xqkv[:, :, 0, ...], xqkv[:, :, 1, ...], xqkv[:, :, 2, ...]
# οΌB, num_head, S, head_size)
xq = xq.permute(0, 2, 1, 3)
xk = xk.permute(0, 2, 1, 3)
xv = xv.permute(0, 2, 1, 3)
# with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None)
output = output.permute(0, 2, 1, 3).reshape(bsz, seqlen, -1)
output = self.out_proj(output)
return output
class NoTPTransformerBlock(nn.Module):
def __init__(self, cfg, layer_id: int, multiple_of=256):
super().__init__()
self.n_heads = cfg.num_attention_heads
self.dim = cfg.hidden_size
self.head_dim = cfg.hidden_size // cfg.num_attention_heads
self.self_attn = NoTPAttention(cfg)
self.mlp = NoTPFeedForward(
cfg, dim=cfg.hidden_size, hidden_dim=cfg.ffn_hidden_size
)
self.layer_id = layer_id
self.layer_norm1 = torch.nn.LayerNorm(
cfg.hidden_size, eps=cfg.layernorm_epsilon
)
self.layer_norm2 = torch.nn.LayerNorm(
cfg.hidden_size, eps=cfg.layernorm_epsilon
)
def forward(self, x: torch.Tensor):
residual = self.self_attn.forward(self.layer_norm1(x))
h = x + residual
out = h + self.mlp.forward(self.layer_norm2(h))
return out
class NoTPTransformer(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
# self.recompute_list = self.cfg.get("recompute_list", [])
self.num_layers = cfg.num_layers # _get_num_layers(cfg)
self.layers = torch.nn.ModuleList()
for layer_id in range(self.num_layers):
self.layers.append(
NoTPTransformerBlock(
cfg,
layer_id + 1,
)
)
def forward(
self,
hidden_states,
):
for lid, layer in enumerate(self.layers):
# if lid in self.recompute_list:
# def custom(layer_id):
# def custom_forward(*args, **kwargs):
# x_ = self.layers[layer_id](*args, **kwargs)
# return x_
# return custom_forward
# assert hidden_states.requires_grad == True, logger.warning(
# "When using recalculation, the input must have grad fn"
# )
# hidden_states = tensor_parallel.checkpoint(
# custom(lid),
# False,
# hidden_states.contiguous()
# )
# else:
hidden_states = layer(hidden_states)
return hidden_states
# from megatron.core.tensor_parallel.layers import non_tensor_paralleled, local_dp_reduce, local_dp_scatter
class VitModel(nn.Module):
def __init__(
self,
cfg,
freeze_embed=False,
freeze_pre_norm=False
) -> None:
super().__init__()
self.embeddings = CLIPVisionEmbeddings(hidden_size=cfg.hidden_size, image_size=cfg.image_size, patch_size=cfg.patch_size)
if freeze_embed:
for name, param in self.embeddings.named_parameters():
param.requires_grad = False
self.transformer = NoTPTransformer(cfg=cfg)
if cfg.get("fp32norm", False):
logger.info("Load fp32 layernorm for ViT.")
self.pre_layrnorm = LayerNormfp32(
cfg.hidden_size,
eps=cfg.get("pre_layernorm_epsilon", 1e-5),
)
else:
self.pre_layrnorm = torch.nn.LayerNorm(
cfg.hidden_size,
eps=cfg.get("pre_layernorm_epsilon", 1e-5),
)
# self.pre_layrnorm = RMSNorm(
# cfg.hidden_size,
# eps=cfg.get("pre_layernorm_epsilon", 1e-5),
# sequence_parallel=False,
# use_fp32=True,
# use_optimus=True,
# )
if freeze_pre_norm:
for name, param in self.pre_layrnorm.named_parameters():
param.requires_grad = False
for p in self.parameters():
p.micro_dp = True
def set_input_tensor(self, input_tensor):
if not isinstance(input_tensor, list):
input_tensor = [input_tensor]
self.transformer.set_input_tensor(input_tensor[0])
def __str__(self) -> str:
return "open_clip"
def forward(
self,
x,
patch_embeds
):
x = self.embeddings(x, patch_embeds)
hidden_states = self.pre_layrnorm(x)
# hidden_states, dis = local_dp_scatter(hidden_states)
output = self.transformer(hidden_states)
# output = local_dp_reduce(output, dis)
return output
vit_model_cfg = adict(
num_layers=24,
hidden_size=1024,
num_heads = 16,
num_attention_heads=16,
ffn_hidden_size=4096,
seq_length=256,
max_position_embeddings=256,
use_flash_attn=False,
understand_projector_stride=2,
hidden_dropout = 0.0,
attention_dropout = 0.0,
no_persist_layer_norm = False,
layernorm_epsilon = 1e-5,
pre_layernorm_epsilon = 1e-5,
image_size = 224,
patch_size = 14,
recompute_list = []
)
def build_clip_l():
return VitModel(
cfg=vit_model_cfg,
freeze_embed=False,
freeze_pre_norm=False,
)
if __name__ == '__main__':
from mmgpt.model.vision_encoder.sam_b import build_sam_vit_b
vit_model_cfg = adict(
num_layers=24,
hidden_size=1024,
num_attention_heads=16,
ffn_hidden_size=4096,
seq_length=256,
max_position_embeddings=256,
use_flash_attn=False,
understand_projector_stride=2,
hidden_dropout = 0.0,
attention_dropout = 0.0,
no_persist_layer_norm = False,
layernorm_epsilon = 1e-5,
pre_layernorm_epsilon = 1e-5,
image_size = 224,
patch_size = 14,
recompute_list = []
)
sam_model = build_sam_vit_b()
vision_model = VitModel(
cfg=vit_model_cfg,
freeze_embed=False,
freeze_pre_norm=False,
)
# model = VitModel(1344)
# x = torch.zeros(2, 3, 224, 224)
x = torch.zeros(2, 3, 1024, 1024)
with torch.no_grad():
# y = vision_model(x)
patch_embed = sam_model(x)
print(patch_embed.shape)
y = vision_model(x, patch_embed)
print(y.shape)
image_feature = torch.add(y[:, 1:], patch_embed.flatten(2).permute(0, 2, 1))
print(image_feature.shape)
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepencoder/clip_sdpa.py",
"license": "MIT License",
"lines": 402,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepencoder/sam_vary_sdpa.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from functools import partial
from flash_attn import flash_attn_qkvpacked_func
# from .common import LayerNorm2d, MLPBlock
# from mmgpt.model.vision_encoder.flash_4 import _attention_rel_h_rel_w
def get_abs_pos(abs_pos, tgt_size):
dtype = abs_pos.dtype
src_size = abs_pos.size(1)
if src_size != tgt_size:
old_pos_embed = abs_pos.permute(0, 3, 1, 2)
old_pos_embed = old_pos_embed.to(torch.float32)
new_pos_embed = F.interpolate(
old_pos_embed,
size=(tgt_size, tgt_size),
mode='bicubic',
antialias=True,
align_corners=False,
).to(dtype)
new_pos_embed = new_pos_embed.permute(0, 2, 3, 1)
return new_pos_embed
else:
return abs_pos
class MLPBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
mlp_dim: int,
act: Type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
self.net_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
self.net_3 = nn.Conv2d(512, 1024, kernel_size=3, stride=2, padding=1, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
# x = x + self.pos_embed
x = x + get_abs_pos(self.pos_embed, x.size(1))
for blk in self.blocks:
x = blk(x)
neck_output = self.neck(x.permute(0, 3, 1, 2))
conv2_output = self.net_2(neck_output)
# print(f"conv2_output shape: {conv2_output.shape}")
conv3_output = self.net_3(conv2_output)
return conv3_output
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then
use global attention.
input_size (tuple(int, int) or None): Input resolution for calculating the relative
positional parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.norm2 = norm_layer(dim)
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
self.window_size = window_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + x
x = x + self.mlp(self.norm2(x))
return x
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (tuple(int, int) or None): Input resolution for calculating the relative
positional parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
rel_h, rel_w = None, None
if self.use_rel_pos:
rel_h, rel_w = add_decomposed_rel_pos(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
q = q.view(B, self.num_heads, H * W, -1)
k = k.view(B, self.num_heads, H * W, -1)
v = v.view(B, self.num_heads, H * W, -1)
if self.use_rel_pos:
rel_h = rel_h.view(B, self.num_heads, rel_h.size(1), rel_h.size(2), rel_h.size(3))
rel_w = rel_w.view(B, self.num_heads, rel_w.size(1), rel_w.size(2), rel_w.size(3))
attn_bias = (rel_h + rel_w).view(B, self.num_heads, rel_h.size(2), rel_h.size(3) * rel_w.size(4))
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias)
# x = _attention_rel_h_rel_w(q, k, v, rel_h, rel_w)
else:
x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
# qkv = torch.stack([q, k, v], dim=1).transpose(1, 3).reshape(B, H * W, 3, self.num_heads, -1)
# x = flash_attn_qkvpacked_func(qkv, dropout_p=0.0, causal=False).transpose(1, 2)
x = x.view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
dtype = rel_pos.dtype
rel_pos = rel_pos.to(torch.float32)
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
).to(dtype)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size, device=rel_pos.device)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size, device=rel_pos.device)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
Args:
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
rel_h = rel_h.unsqueeze(-1)
rel_w = rel_w.unsqueeze(-2)
rel_h = rel_h.reshape(B, q_h * q_w, k_h, 1)
rel_w = rel_w.reshape(B, q_h * q_w, 1, k_w)
return rel_h, rel_w
class PatchEmbed(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
kernel_size: Tuple[int, int] = (16, 16),
stride: Tuple[int, int] = (16, 16),
padding: Tuple[int, int] = (0, 0),
in_chans: int = 3,
embed_dim: int = 768,
) -> None:
"""
Args:
kernel_size (Tuple): kernel size of the projection layer.
stride (Tuple): stride of the projection layer.
padding (Tuple): padding size of the projection layer.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
"""
super().__init__()
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
# B C H W -> B H W C
x = x.permute(0, 2, 3, 1)
return x
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
)
if checkpoint is not None:
# with open(checkpoint, "rb") as f:
state_dict = torch.load(checkpoint)
# print(state_dict.keys())
# for key in state_dict:
# image_encoder.load_state_dict({k[14:]: v for k, v in state_dict.items() if 'image_encoder' in k}, strict=False)
# ocr-anyting
# image_encoder.load_state_dict(state_dict, strict=True)
# tob
image_encoder.load_state_dict({k[30:]: v for k, v in state_dict.items() if 'vision_tower_high' in k}, strict=True)
print(checkpoint)
return image_encoder | {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepencoder/sam_vary_sdpa.py",
"license": "MIT License",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepseek_ocr.py |
"""Inference-only Deepseek-OCR model compatible with HuggingFace weights."""
import math
from collections.abc import Iterable, Mapping, Sequence
from typing import List, Literal, Optional, Set, Tuple, TypedDict, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from transformers import BatchFeature
from vllm.config import VllmConfig
from vllm.model_executor import SamplingMetadata
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (MultiModalDataDict, MultiModalFieldConfig,
MultiModalKwargs, NestedTensors)
from vllm.multimodal.parse import (ImageEmbeddingItems, ImageProcessorItems,
ImageSize, MultiModalDataItems)
from vllm.multimodal.processing import (BaseMultiModalProcessor,
BaseProcessingInfo, PromptReplacement,
PromptUpdate)
from vllm.multimodal.profiling import BaseDummyInputsBuilder
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs.deepseek_vl2 import (DeepseekVLV2Config,
MlpProjectorConfig,
VisionEncoderConfig)
from process.image_process import (
DeepseekOCRProcessor, count_tiles)
from vllm.transformers_utils.tokenizer import cached_tokenizer_from_config
# from vllm.utils import is_list_of
from vllm.model_executor.models.interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP
from vllm.model_executor.models.utils import (AutoWeightsLoader, WeightsMapper, flatten_bn,
init_vllm_registered_model, maybe_prefix,
merge_multimodal_embeddings)
from deepencoder.sam_vary_sdpa import build_sam_vit_b
from deepencoder.clip_sdpa import build_clip_l
from deepencoder.build_linear import MlpProjector
from addict import Dict
# import time
from config import IMAGE_SIZE, BASE_SIZE, CROP_MODE, PRINT_NUM_VIS_TOKENS, PROMPT
# The image token id may be various
_IMAGE_TOKEN = "<image>"
class DeepseekOCRProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config(DeepseekVLV2Config)
def get_hf_processor(self, **kwargs: object):
return self.ctx.get_hf_processor(DeepseekOCRProcessor, **kwargs)
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
return {"image": None}
def get_num_image_tokens(self,
*,
image_width: int,
image_height: int,
cropping: bool = True) -> int:
hf_processor = self.get_hf_processor()
# image_size = hf_processor.image_size
# patch_size = hf_processor.patch_size
# downsample_ratio = hf_processor.downsample_ratio
image_size = IMAGE_SIZE
base_size = BASE_SIZE
patch_size = 16
downsample_ratio = 4
if CROP_MODE:
if image_width <= 640 and image_height <= 640:
crop_ratio = [1, 1]
else:
# images_crop_raw, crop_ratio = hf_processor.dynamic_preprocess(image)
# find the closest aspect ratio to the target
crop_ratio = count_tiles(image_width, image_height, image_size=IMAGE_SIZE)
# print('===========')
# print('crop_ratio ', crop_ratio)
# print('============')
num_width_tiles, num_height_tiles = crop_ratio
else:
num_width_tiles = num_height_tiles = 1
h = w = math.ceil((base_size // patch_size) / downsample_ratio)
h2 = w2 = math.ceil((image_size // patch_size) / downsample_ratio)
global_views_tokens = h * (w + 1)
if num_width_tiles >1 or num_height_tiles>1:
local_views_tokens = (num_height_tiles * h2) * (num_width_tiles * w2 + 1)
else:
local_views_tokens = 0
return global_views_tokens + local_views_tokens + 1
def get_image_size_with_most_features(self) -> ImageSize:
if IMAGE_SIZE == 1024 and BASE_SIZE == 1280:
return ImageSize(width=1024*2, height=1024*2)
return ImageSize(width=640*2, height=640*2)
class DeepseekOCRDummyInputsBuilder(
BaseDummyInputsBuilder[DeepseekOCRProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
processor = self.info.get_hf_processor()
image_token = processor.image_token
return image_token * num_images
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
max_image_size = self.info.get_image_size_with_most_features()
if '<image>' in PROMPT:
return {
"image":
DeepseekOCRProcessor().tokenize_with_images(images = self._get_dummy_images(width=max_image_size.width,
height=max_image_size.height,
num_images=num_images), bos=True, eos=True, cropping=CROP_MODE)
}
else:
return {
"image": []
}
class DeepseekOCRMultiModalProcessor(
BaseMultiModalProcessor[DeepseekOCRProcessingInfo]):
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
) -> BatchFeature:
# print(mm_data)
if mm_data:
processed_outputs = self.info.ctx.call_hf_processor(
self.info.get_hf_processor(**mm_kwargs),
dict(prompt=prompt, **mm_data),
mm_kwargs,
)
else:
tokenizer = self.info.get_tokenizer()
processed_outputs = tokenizer(prompt,
add_special_tokens=True,
return_tensors="pt")
return processed_outputs
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return dict(
pixel_values=MultiModalFieldConfig.batched("image"),
images_spatial_crop=MultiModalFieldConfig.batched("image"),
# image_embeds=MultiModalFieldConfig.batched("image2"),
images_crop=MultiModalFieldConfig.batched("image"),
)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]:
hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
image_token_id = hf_processor.image_token_id
assert isinstance(image_token_id, int)
def get_replacement_deepseek_vl2(item_idx: int):
images = mm_items.get_items(
"image", (ImageEmbeddingItems, ImageProcessorItems))
if isinstance(images, ImageEmbeddingItems):
num_image_tokens = images.get_feature_size(item_idx)
else:
width = images[0][-1][0][0]
height = images[0][-1][0][1]
num_image_tokens = self.info.get_num_image_tokens(
image_width=width,
image_height=height,
# flag = True,
cropping=CROP_MODE,
)
return [image_token_id] * num_image_tokens
return [
PromptReplacement(
modality="image",
target=[image_token_id],
replacement=get_replacement_deepseek_vl2,
)
]
def _cached_apply_hf_processor(
self,
prompt: Union[str, list[int]],
mm_data_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
) -> tuple[list[int], MultiModalKwargs, bool]:
# The processor logic is different for len(images) <= 2 vs > 2
# Since the processing cache assumes that the processor output is
# invariant of how many images are passed per prompt, we only
# perform caching for the most common case
if mm_data_items.get_count("image", strict=False) > 2:
# This code path corresponds to the cache being disabled
return self._apply_hf_processor_main(
prompt=prompt,
mm_items=mm_data_items,
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
enable_hf_prompt_update=True,
)
return super()._cached_apply_hf_processor(
prompt=prompt,
mm_data_items=mm_data_items,
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
)
@MULTIMODAL_REGISTRY.register_processor(
DeepseekOCRMultiModalProcessor,
info=DeepseekOCRProcessingInfo,
dummy_inputs=DeepseekOCRDummyInputsBuilder)
class DeepseekOCRForCausalLM(nn.Module, SupportsMultiModal, SupportsPP):
hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={
"language.": "language_model.",
})
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config: DeepseekVLV2Config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
# config.model_type ='deepseek_vl_v2'
self.config = config
self.multimodal_config = multimodal_config
self.vision_config = config.vision_config
self.projector_config = config.projector_config
self.text_config = config.text_config
model_config = vllm_config.model_config
tokenizer = cached_tokenizer_from_config(model_config)
self.image_token_id = tokenizer.vocab[_IMAGE_TOKEN]
self.sam_model = build_sam_vit_b()
self.vision_model = build_clip_l()
n_embed = 1280
self.projector = MlpProjector(Dict(projector_type="linear", input_dim=2048, n_embed=n_embed))
self.tile_tag = config.tile_tag
self.global_view_pos = config.global_view_pos
# self.sam_model = torch.compile(self.sam_model, mode="reduce-overhead")
# self.vision_model = torch.compile(self.vision_model, mode="reduce-overhead")
# self.projector = torch.compile(self.projector, mode="max-autotune")
# special token for image token sequence format
embed_std = 1 / torch.sqrt(torch.tensor(n_embed, dtype=torch.float32))
if self.tile_tag == "2D":
# <|view_separator|>, <|\n|>
self.image_newline = nn.Parameter(torch.randn(n_embed) * embed_std)
self.view_seperator = nn.Parameter(torch.randn(n_embed) * embed_std)
else:
raise ValueError(
f"Only 2D tile_tag is supported currently, got: {self.tile_tag}"
)
if self.text_config.topk_method == "noaux_tc":
architectures = ["DeepseekV3ForCausalLM"]
elif not self.text_config.use_mla:
architectures = ["DeepseekForCausalLM"]
else:
architectures = ["DeepseekV2ForCausalLM"]
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=self.text_config,
prefix=maybe_prefix(prefix, "language"),
architectures=architectures,
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors)
def _parse_and_validate_image_input(
self, **kwargs: object):
pixel_values = kwargs.pop("pixel_values", None)
images_spatial_crop = kwargs.pop("images_spatial_crop", None)
images_crop = kwargs.pop("images_crop", None)
if pixel_values is None or torch.sum(pixel_values).item() == 0:
return None
if pixel_values is not None:
if not isinstance(pixel_values, (torch.Tensor, list)):
raise ValueError("Incorrect type of pixel values. "
f"Got type: {type(pixel_values)}")
if not isinstance(images_spatial_crop, (torch.Tensor, list)):
raise ValueError("Incorrect type of image sizes. "
f"Got type: {type(images_spatial_crop)}")
if not isinstance(images_crop, (torch.Tensor, list)):
raise ValueError("Incorrect type of image crop. "
f"Got type: {type(images_crop)}")
return [pixel_values, images_crop, images_spatial_crop]
raise AssertionError("This line should be unreachable.")
def _pixel_values_to_embedding(
self,
pixel_values: torch.Tensor,
images_crop: torch.Tensor,
images_spatial_crop: torch.Tensor,
) -> NestedTensors:
# Pixel_values (global view): [n_image, batch_size, 3, height, width]
# images_spatial_crop: [n_image, batch_size, [num_tiles_w, num_tiles_h]]
# images_crop (local view): [n_image, batch_size, num_pathes, 3, h, w]
# split the pixel and image_crop, all batch_size = 1
images_in_this_batch = []
# print(type(images_crop))
# print(pixel_values.shape)
with torch.no_grad():
for jdx in range(images_spatial_crop.size(0)):
# with torch.set_grad_enabled(False):
patches = images_crop[jdx][0].to(torch.bfloat16) # batch_size = 1
image_ori = pixel_values[jdx]
crop_shape = images_spatial_crop[jdx][0]
if torch.sum(patches).item() != 0: # if all values = 0, no crop
# P, C, H, W = patches.shape
# crop_flag = 1
local_features_1 = self.sam_model(patches)
#TODO del patches
# torch.compiler.cudagraph_mark_step_begin()
local_features_2 = self.vision_model(patches, local_features_1)
local_features = torch.cat((local_features_2[:, 1:], local_features_1.flatten(2).permute(0, 2, 1)), dim=-1)
local_features = self.projector(local_features)
global_features_1 = self.sam_model(image_ori)
global_features_2 = self.vision_model(image_ori, global_features_1)
global_features = torch.cat((global_features_2[:, 1:], global_features_1.flatten(2).permute(0, 2, 1)), dim=-1)
global_features = self.projector(global_features)
if PRINT_NUM_VIS_TOKENS:
print('=====================')
print('BASE: ', global_features.shape)
print('PATCHES: ', local_features.shape)
print('=====================')
_, hw, n_dim = global_features.shape
h = w = int(hw ** 0.5)
_2, hw2, n_dim2 = local_features.shape
h2 = w2 = int(hw2 ** 0.5)
width_crop_num, height_crop_num = crop_shape[0], crop_shape[1]
global_features = global_features.view(h, w, n_dim)
global_features = torch.cat(
[global_features, self.image_newline[None, None, :].expand(h, 1, n_dim)], dim=1
)
global_features = global_features.view(-1, n_dim)
local_features = local_features.view(height_crop_num, width_crop_num, h2, w2, n_dim2).permute(0, 2, 1, 3, 4).reshape(height_crop_num*h2, width_crop_num*w2, n_dim2)
local_features = torch.cat(
[local_features, self.image_newline[None, None, :].expand(height_crop_num * h2, 1, n_dim2)], dim=1
)
local_features = local_features.view(-1, n_dim2)
global_local_features = torch.cat([local_features, global_features, self.view_seperator[None, :]], dim=0)
else:
global_features_1 = self.sam_model(image_ori)
global_features_2 = self.vision_model(image_ori, global_features_1)
global_features = torch.cat((global_features_2[:, 1:], global_features_1.flatten(2).permute(0, 2, 1)), dim=-1)
global_features = self.projector(global_features)
if PRINT_NUM_VIS_TOKENS:
print('=====================')
print('BASE: ', global_features.shape)
print('NO PATCHES')
print('=====================')
_, hw, n_dim = global_features.shape
h = w = int(hw ** 0.5)
global_features = global_features.view(h, w, n_dim)
global_features = torch.cat(
[global_features, self.image_newline[None, None, :].expand(h, 1, n_dim)], dim=1
)
global_features = global_features.view(-1, n_dim)
global_local_features = torch.cat([global_features, self.view_seperator[None, :]], dim=0)
images_in_this_batch.append(global_local_features)
return images_in_this_batch
def _process_image_input(
self, image_input) -> torch.Tensor:
# image_input: [pixel_values, images_crop, images_spatial_crop]
pixel_values = image_input[0].to(torch.bfloat16)
# print(image_input[1][0].shape)
# print(type(image_input[1]))
# exit()
# images_crop = image_input[1].to(torch.bfloat16)
images_crop = image_input[1]
# images_crop = image_input[1]
images_spatial_crop = image_input[2].to(dtype=torch.long)
# local_start = time.time()
vision_features = self._pixel_values_to_embedding(
pixel_values=pixel_values, images_crop = images_crop, images_spatial_crop=images_spatial_crop)
# local_total_time = time.time() - local_start
# print('encoder_time: ', local_total_time)
# exit()
return vision_features
def get_language_model(self) -> torch.nn.Module:
return self.language_model
def get_multimodal_embeddings(
self, **kwargs: object) -> Optional[MultiModalEmbeddings]:
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return None
vision_embeddings = self._process_image_input(image_input)
return vision_embeddings
def get_input_embeddings(
self,
input_ids: torch.Tensor,
multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
) -> torch.Tensor:
inputs_embeds = self.language_model.get_input_embeddings(input_ids)
if multimodal_embeddings is not None:
inputs_embeds = merge_multimodal_embeddings(
input_ids, inputs_embeds, multimodal_embeddings,
self.image_token_id)
# print(len(multimodal_embeddings))
# print(input_ids.shape)
# print(type(inputs_embeds))
# print(inputs_embeds.shape)
return inputs_embeds
def forward(self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
**kwargs: object):
if intermediate_tensors is not None:
inputs_embeds = None
# NOTE: In v1, inputs_embeds is always generated at model runner, this
# condition is for v0 compatibility
elif inputs_embeds is None:
vision_embeddings = self.get_multimodal_embeddings(**kwargs)
inputs_embeds = self.get_input_embeddings(input_ids,
vision_embeddings)
input_ids = None
hidden_states = self.language_model(input_ids,
positions,
intermediate_tensors,
inputs_embeds=inputs_embeds)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
sampling_metadata: SamplingMetadata,
) -> Optional[torch.Tensor]:
return self.language_model.compute_logits(hidden_states,
sampling_metadata)
def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]:
processed_weights = []
for name, tensor in weights:
if 'sam_model' in name or 'vision_model' in name or 'projector' in name or 'image_newline' in name or 'view_seperator' in name:
new_name = name.replace('model.', '', 1)
else:
new_name = 'language.' + name
processed_weights.append((new_name, tensor))
loader = AutoWeightsLoader(self)
autoloaded_weights = loader.load_weights(processed_weights, mapper=self.hf_to_vllm_mapper)
return autoloaded_weights
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/deepseek_ocr.py",
"license": "MIT License",
"lines": 425,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/process/image_process.py | import math
from typing import List, Tuple
import torch
import torchvision.transforms as T
from PIL import Image, ImageOps
from transformers import AutoProcessor, BatchFeature, LlamaTokenizerFast
from transformers.processing_utils import ProcessorMixin
from config import IMAGE_SIZE, BASE_SIZE, CROP_MODE, MIN_CROPS, MAX_CROPS, PROMPT, TOKENIZER
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
best_ratio_diff = float('inf')
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
elif ratio_diff == best_ratio_diff:
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
best_ratio = ratio
# print(f'width: {width}, height: {height}, best_ratio: {best_ratio}')
return best_ratio
def count_tiles(orig_width, orig_height, min_num=MIN_CROPS, max_num=MAX_CROPS, image_size=640, use_thumbnail=False):
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
# print(target_ratios)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
return target_aspect_ratio
def dynamic_preprocess(image, min_num=MIN_CROPS, max_num=MAX_CROPS, image_size=640, use_thumbnail=False):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
# print(target_ratios)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
# print(target_aspect_ratio)
# calculate the target width and height
target_width = image_size * target_aspect_ratio[0]
target_height = image_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
# resize the image
resized_img = image.resize((target_width, target_height))
processed_images = []
for i in range(blocks):
box = (
(i % (target_width // image_size)) * image_size,
(i // (target_width // image_size)) * image_size,
((i % (target_width // image_size)) + 1) * image_size,
((i // (target_width // image_size)) + 1) * image_size
)
# split the image
split_img = resized_img.crop(box)
processed_images.append(split_img)
assert len(processed_images) == blocks
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = image.resize((image_size, image_size))
processed_images.append(thumbnail_img)
return processed_images, target_aspect_ratio
class ImageTransform:
def __init__(self,
mean: Tuple[float, float, float] = (0.5, 0.5, 0.5),
std: Tuple[float, float, float] = (0.5, 0.5, 0.5),
normalize: bool = True):
self.mean = mean
self.std = std
self.normalize = normalize
transform_pipelines = [T.ToTensor()]
if normalize:
transform_pipelines.append(T.Normalize(mean, std))
self.transform = T.Compose(transform_pipelines)
def __call__(self, pil_img: Image.Image):
x = self.transform(pil_img)
return x
class DeepseekOCRProcessor(ProcessorMixin):
tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
attributes = ["tokenizer"]
def __init__(
self,
tokenizer: LlamaTokenizerFast = TOKENIZER,
candidate_resolutions: Tuple[Tuple[int, int]] = [[1024, 1024]],
patch_size: int = 16,
downsample_ratio: int = 4,
image_mean: Tuple[float, float, float] = (0.5, 0.5, 0.5),
image_std: Tuple[float, float, float] = (0.5, 0.5, 0.5),
normalize: bool = True,
image_token: str = "<image>",
pad_token: str = "<ο½βpadβο½>",
add_special_token: bool = False,
sft_format: str = "deepseek",
mask_prompt: bool = True,
ignore_id: int = -100,
**kwargs,
):
# self.candidate_resolutions = candidate_resolutions # placeholder no use
self.image_size = IMAGE_SIZE
self.base_size = BASE_SIZE
# self.patch_size = patch_size
self.patch_size = 16
self.image_mean = image_mean
self.image_std = image_std
self.normalize = normalize
# self.downsample_ratio = downsample_ratio
self.downsample_ratio = 4
self.image_transform = ImageTransform(mean=image_mean, std=image_std, normalize=normalize)
self.tokenizer = tokenizer
# self.tokenizer = add_special_token(tokenizer)
self.tokenizer.padding_side = 'left' # must set thisοΌpadding side with make a difference in batch inference
# add the pad_token as special token to use 'tokenizer.pad_token' and 'tokenizer.pad_token_id'
if self.tokenizer.pad_token is None:
self.tokenizer.add_special_tokens({'pad_token': pad_token})
# add image token
# image_token_id = self.tokenizer.vocab.get(image_token)
# if image_token_id is None:
# special_tokens = [image_token]
# special_tokens_dict = {"additional_special_tokens": special_tokens}
# self.tokenizer.add_special_tokens(special_tokens_dict)
self.image_token_id = self.tokenizer.vocab.get(image_token)
# add five special tokens for grounding-related tasks
# <|ref|>, <|/ref|>, <|det|>, <|/det|>, <|grounding|>
# special_tokens = ['<|ref|>', '<|/ref|>', '<|det|>', '<|/det|>', '<|grounding|>']
# special_tokens_dict = {"additional_special_tokens": special_tokens}
# special_tokens = ['<image>','<|ref|>', '<|/ref|>', '<|det|>', '<|/det|>', '<|grounding|>', '<td>', '</td>', '<tr>', '</tr>']
# special_tokens_dict = {"additional_special_tokens": special_tokens}
# self.tokenizer.add_special_tokens(special_tokens_dict)
# # add special tokens for SFT data
# special_tokens = ["<|User|>", "<|Assistant|>"]
# special_tokens_dict = {"additional_special_tokens": special_tokens}
# self.tokenizer.add_special_tokens(special_tokens_dict)
self.image_token = image_token
self.pad_token = pad_token
self.add_special_token = add_special_token
self.sft_format = sft_format
self.mask_prompt = mask_prompt
self.ignore_id = ignore_id
super().__init__(
tokenizer,
**kwargs,
)
# def select_best_resolution(self, image_size):
# # used for cropping
# original_width, original_height = image_size
# best_fit = None
# max_effective_resolution = 0
# min_wasted_resolution = float("inf")
# for width, height in self.candidate_resolutions:
# scale = min(width / original_width, height / original_height)
# downscaled_width, downscaled_height = int(
# original_width * scale), int(original_height * scale)
# effective_resolution = min(downscaled_width * downscaled_height,
# original_width * original_height)
# wasted_resolution = (width * height) - effective_resolution
# if effective_resolution > max_effective_resolution or (
# effective_resolution == max_effective_resolution
# and wasted_resolution < min_wasted_resolution):
# max_effective_resolution = effective_resolution
# min_wasted_resolution = wasted_resolution
# best_fit = (width, height)
# return best_fit
@property
def bos_id(self):
return self.tokenizer.bos_token_id
@property
def eos_id(self):
return self.tokenizer.eos_token_id
@property
def pad_id(self):
return self.tokenizer.pad_token_id
def encode(self, text: str, bos: bool = True, eos: bool = False):
t = self.tokenizer.encode(text, add_special_tokens=False)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int], **kwargs) -> str:
return self.tokenizer.decode(t, **kwargs)
def process_one(
self,
prompt: str,
images: List,
inference_mode: bool = True,
**kwargs,
):
"""
Args:
prompt (str): the formatted prompt;
conversations (List[Dict]): conversations with a list of messages;
images (List[ImageType]): the list of images;
inference_mode (bool): if True, then remove the last eos token;
system_prompt (str): the system prompt;
**kwargs:
Returns:
outputs (BaseProcessorOutput): the output of the processor,
- input_ids (torch.LongTensor): [N + image tokens]
- target_ids (torch.LongTensor): [N + image tokens]
- pixel_values (torch.FloatTensor): [n_patches, 3, H, W]
- image_id (int): the id of the image token
- num_image_tokens (List[int]): the number of image tokens
"""
assert (prompt is not None and images is not None
), "prompt and images must be used at the same time."
sft_format = prompt
input_ids, pixel_values, images_crop, images_seq_mask, images_spatial_crop, num_image_tokens, _ = images[0]
return {
"input_ids": input_ids,
"pixel_values": pixel_values,
"images_crop": images_crop,
"images_seq_mask": images_seq_mask,
"images_spatial_crop": images_spatial_crop,
"num_image_tokens": num_image_tokens,
}
# prepare = BatchFeature(
# data=dict(
# input_ids=input_ids,
# pixel_values=pixel_values,
# images_crop = images_crop,
# images_seq_mask=images_seq_mask,
# images_spatial_crop=images_spatial_crop,
# num_image_tokens=num_image_tokens,
# ),
# tensor_type="pt",
# )
# return prepare
def __call__(
self,
*,
prompt: str,
images: List,
inference_mode: bool = True,
**kwargs,
):
"""
Args:
prompt (str): the formatted prompt;
images (List[ImageType]): the list of images;
inference_mode (bool): if True, then remove the last eos token;
**kwargs:
Returns:
outputs (BaseProcessorOutput): the output of the processor,
- input_ids (torch.LongTensor): [N + image tokens]
- images (torch.FloatTensor): [n_images, 3, H, W]
- image_id (int): the id of the image token
- num_image_tokens (List[int]): the number of image tokens
"""
prepare = self.process_one(
prompt=prompt,
images=images,
inference_mode=inference_mode,
)
return prepare
def tokenize_with_images(
self,
# conversation: str,
images: List[Image.Image],
bos: bool = True,
eos: bool = True,
cropping: bool = True,
):
"""Tokenize text with <image> tags."""
# print(conversation)
conversation = PROMPT
assert conversation.count(self.image_token) == len(images)
text_splits = conversation.split(self.image_token)
images_list, images_crop_list, images_seq_mask, images_spatial_crop = [], [], [], []
image_shapes = []
num_image_tokens = []
tokenized_str = []
# print('image: ', len(images))
for text_sep, image in zip(text_splits, images):
"""encode text_sep"""
tokenized_sep = self.encode(text_sep, bos=False, eos=False)
tokenized_str += tokenized_sep
images_seq_mask += [False] * len(tokenized_sep)
"""select best resolution for anyres"""
# if cropping:
# best_width, best_height = self.select_best_resolution(image.size)
# else:
# best_width, best_height = self.image_size, self.image_size
image_shapes.append(image.size)
if image.size[0] <= 640 and image.size[1] <= 640:
crop_ratio = [1, 1]
else:
if cropping:
# print('image-size: ', image.size)
# best_width, best_height = select_best_resolution(image.size, self.candidate_resolutions)
# print('image ', image.size)
# print('open_size:', image.size)
images_crop_raw, crop_ratio = dynamic_preprocess(image, image_size=IMAGE_SIZE)
# print('crop_ratio: ', crop_ratio)
else:
# best_width, best_height = self.image_size, self.image_size
crop_ratio = [1, 1]
# print(image.size, (best_width, best_height)) # check the select_best_resolutions func
# print(crop_ratio)
"""process the global view"""
# if cropping
if self.image_size <= 640 and not cropping:
# print('directly resize')
image = image.resize((self.image_size, self.image_size))
global_view = ImageOps.pad(image, (self.base_size, self.base_size),
color=tuple(int(x * 255) for x in self.image_transform.mean))
images_list.append(self.image_transform(global_view))
"""record height / width crop num"""
# width_crop_num, height_crop_num = best_width // self.image_size, best_height // self.image_size
num_width_tiles, num_height_tiles = crop_ratio
images_spatial_crop.append([num_width_tiles, num_height_tiles])
if num_width_tiles > 1 or num_height_tiles > 1:
"""process the local views"""
# local_view = ImageOps.pad(image, (best_width, best_height),
# color=tuple(int(x * 255) for x in self.image_transform.mean))
# for i in range(0, best_height, self.image_size):
# for j in range(0, best_width, self.image_size):
# images_crop_list.append(
# self.image_transform(local_view.crop((j, i, j + self.image_size, i + self.image_size))))
for i in range(len(images_crop_raw)):
images_crop_list.append(self.image_transform(images_crop_raw[i]))
# """process the global view"""
# global_view = ImageOps.pad(image, (self.image_size, self.image_size),
# color=tuple(int(x * 255) for x in self.image_transform.mean))
# images_list.append(self.image_transform(global_view))
# """process the local views"""
# local_view = ImageOps.pad(image, (best_width, best_height),
# color=tuple(int(x * 255) for x in self.image_transform.mean))
# for i in range(0, best_height, self.image_size):
# for j in range(0, best_width, self.image_size):
# images_list.append(
# self.image_transform(local_view.crop((j, i, j + self.image_size, i + self.image_size))))
# """add image tokens"""
"""add image tokens"""
num_queries = math.ceil((self.image_size // self.patch_size) / self.downsample_ratio)
num_queries_base = math.ceil((self.base_size // self.patch_size) / self.downsample_ratio)
tokenized_image = ([self.image_token_id] * num_queries_base + [self.image_token_id]) * num_queries_base
tokenized_image += [self.image_token_id]
if num_width_tiles > 1 or num_height_tiles > 1:
tokenized_image += ([self.image_token_id] * (num_queries * num_width_tiles) + [self.image_token_id]) * (
num_queries * num_height_tiles)
tokenized_str += tokenized_image
images_seq_mask += [True] * len(tokenized_image)
num_image_tokens.append(len(tokenized_image))
"""process the last text split"""
tokenized_sep = self.encode(text_splits[-1], bos=False, eos=False)
tokenized_str += tokenized_sep
images_seq_mask += [False] * len(tokenized_sep)
"""add the bos and eos tokens"""
if bos:
tokenized_str = [self.bos_id] + tokenized_str
images_seq_mask = [False] + images_seq_mask
if eos:
tokenized_str = tokenized_str + [self.eos_id]
images_seq_mask = images_seq_mask + [False]
assert len(tokenized_str) == len(
images_seq_mask), f"tokenize_with_images func: tokenized_str's length {len(tokenized_str)} is not equal to imags_seq_mask's length {len(images_seq_mask)}"
masked_tokenized_str = []
for token_index in tokenized_str:
if token_index != self.image_token_id:
masked_tokenized_str.append(token_index)
else:
masked_tokenized_str.append(self.ignore_id)
assert len(tokenized_str) == len(images_seq_mask) == len(masked_tokenized_str), \
(f"tokenized_str's length {len(tokenized_str)}, input_ids' length {len(masked_tokenized_str)}, "
f"imags_seq_mask's length {len(images_seq_mask)}, are not equal")
input_ids = torch.LongTensor(tokenized_str)
target_ids = torch.LongTensor(masked_tokenized_str)
images_seq_mask = torch.tensor(images_seq_mask, dtype=torch.bool)
# set input_ids < 0 | input_ids == self.image_token_id as ignore_id
target_ids[(input_ids < 0) |
(input_ids == self.image_token_id)] = self.ignore_id
input_ids[input_ids < 0] = self.pad_id
inference_mode = True
if inference_mode:
# Remove the ending eos token
assert input_ids[-1] == self.eos_id
input_ids = input_ids[:-1]
target_ids = target_ids[:-1]
images_seq_mask = images_seq_mask[:-1]
if len(images_list) == 0:
pixel_values = torch.zeros((1, 3, self.base_size, self.base_size))
images_spatial_crop = torch.zeros((1, 1), dtype=torch.long)
images_crop = torch.zeros((1, 3, self.image_size, self.image_size)).unsqueeze(0)
else:
pixel_values = torch.stack(images_list, dim=0)
images_spatial_crop = torch.tensor(images_spatial_crop, dtype=torch.long)
if images_crop_list:
images_crop = torch.stack(images_crop_list, dim=0).unsqueeze(0)
else:
images_crop = torch.zeros((1, 3, self.image_size, self.image_size)).unsqueeze(0)
input_ids = input_ids.unsqueeze(0)
return [[input_ids, pixel_values, images_crop, images_seq_mask, images_spatial_crop, num_image_tokens, image_shapes]]
AutoProcessor.register("DeepseekVLV2Processor", DeepseekOCRProcessor)
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/process/image_process.py",
"license": "MIT License",
"lines": 401,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/process/ngram_norepeat.py | import torch
from transformers import LogitsProcessor
from transformers.generation.logits_process import _calc_banned_ngram_tokens
from typing import List, Set
class NoRepeatNGramLogitsProcessor(LogitsProcessor):
def __init__(self, ngram_size: int, window_size: int = 100, whitelist_token_ids: set = None):
if not isinstance(ngram_size, int) or ngram_size <= 0:
raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
if not isinstance(window_size, int) or window_size <= 0:
raise ValueError(f"`window_size` has to be a strictly positive integer, but is {window_size}")
self.ngram_size = ngram_size
self.window_size = window_size
self.whitelist_token_ids = whitelist_token_ids or set()
def __call__(self, input_ids: List[int], scores: torch.FloatTensor) -> torch.FloatTensor:
if len(input_ids) < self.ngram_size:
return scores
current_prefix = tuple(input_ids[-(self.ngram_size - 1):])
search_start = max(0, len(input_ids) - self.window_size)
search_end = len(input_ids) - self.ngram_size + 1
banned_tokens = set()
for i in range(search_start, search_end):
ngram = tuple(input_ids[i:i + self.ngram_size])
if ngram[:-1] == current_prefix:
banned_tokens.add(ngram[-1])
banned_tokens = banned_tokens - self.whitelist_token_ids
if banned_tokens:
scores = scores.clone()
for token in banned_tokens:
scores[token] = -float("inf")
return scores | {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/process/ngram_norepeat.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/run_dpsk_ocr_eval_batch.py | import os
import re
from tqdm import tqdm
import torch
if torch.version.cuda == '11.8':
os.environ["TRITON_PTXAS_PATH"] = "/usr/local/cuda-11.8/bin/ptxas"
os.environ['VLLM_USE_V1'] = '0'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
from config import MODEL_PATH, INPUT_PATH, OUTPUT_PATH, PROMPT, MAX_CONCURRENCY, CROP_MODE, NUM_WORKERS
from concurrent.futures import ThreadPoolExecutor
import glob
from PIL import Image
from deepseek_ocr import DeepseekOCRForCausalLM
from vllm.model_executor.models.registry import ModelRegistry
from vllm import LLM, SamplingParams
from process.ngram_norepeat import NoRepeatNGramLogitsProcessor
from process.image_process import DeepseekOCRProcessor
ModelRegistry.register_model("DeepseekOCRForCausalLM", DeepseekOCRForCausalLM)
llm = LLM(
model=MODEL_PATH,
hf_overrides={"architectures": ["DeepseekOCRForCausalLM"]},
block_size=256,
enforce_eager=False,
trust_remote_code=True,
max_model_len=8192,
swap_space=0,
max_num_seqs = MAX_CONCURRENCY,
tensor_parallel_size=1,
gpu_memory_utilization=0.9,
)
logits_processors = [NoRepeatNGramLogitsProcessor(ngram_size=40, window_size=90, whitelist_token_ids= {128821, 128822})] #window for fastοΌwhitelist_token_ids: <td>,</td>
sampling_params = SamplingParams(
temperature=0.0,
max_tokens=8192,
logits_processors=logits_processors,
skip_special_tokens=False,
)
class Colors:
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
RESET = '\033[0m'
def clean_formula(text):
formula_pattern = r'\\\[(.*?)\\\]'
def process_formula(match):
formula = match.group(1)
formula = re.sub(r'\\quad\s*\([^)]*\)', '', formula)
formula = formula.strip()
return r'\[' + formula + r'\]'
cleaned_text = re.sub(formula_pattern, process_formula, text)
return cleaned_text
def re_match(text):
pattern = r'(<\|ref\|>(.*?)<\|/ref\|><\|det\|>(.*?)<\|/det\|>)'
matches = re.findall(pattern, text, re.DOTALL)
# mathes_image = []
mathes_other = []
for a_match in matches:
mathes_other.append(a_match[0])
return matches, mathes_other
def process_single_image(image):
"""single image"""
prompt_in = prompt
cache_item = {
"prompt": prompt_in,
"multi_modal_data": {"image": DeepseekOCRProcessor().tokenize_with_images(images = [image], bos=True, eos=True, cropping=CROP_MODE)},
}
return cache_item
if __name__ == "__main__":
# INPUT_PATH = OmniDocBench images path
os.makedirs(OUTPUT_PATH, exist_ok=True)
# print('image processing until processing prompts.....')
print(f'{Colors.RED}glob images.....{Colors.RESET}')
images_path = glob.glob(f'{INPUT_PATH}/*')
images = []
for image_path in images_path:
image = Image.open(image_path).convert('RGB')
images.append(image)
prompt = PROMPT
# batch_inputs = []
# for image in tqdm(images):
# prompt_in = prompt
# cache_list = [
# {
# "prompt": prompt_in,
# "multi_modal_data": {"image": Image.open(image).convert('RGB')},
# }
# ]
# batch_inputs.extend(cache_list)
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
batch_inputs = list(tqdm(
executor.map(process_single_image, images),
total=len(images),
desc="Pre-processed images"
))
outputs_list = llm.generate(
batch_inputs,
sampling_params=sampling_params
)
output_path = OUTPUT_PATH
os.makedirs(output_path, exist_ok=True)
for output, image in zip(outputs_list, images_path):
content = output.outputs[0].text
mmd_det_path = output_path + image.split('/')[-1].replace('.jpg', '_det.md')
with open(mmd_det_path, 'w', encoding='utf-8') as afile:
afile.write(content)
content = clean_formula(content)
matches_ref, mathes_other = re_match(content)
for idx, a_match_other in enumerate(tqdm(mathes_other, desc="other")):
content = content.replace(a_match_other, '').replace('\n\n\n\n', '\n\n').replace('\n\n\n', '\n\n').replace('<center>', '').replace('</center>', '')
mmd_path = output_path + image.split('/')[-1].replace('.jpg', '.md')
with open(mmd_path, 'w', encoding='utf-8') as afile:
afile.write(content)
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/run_dpsk_ocr_eval_batch.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/run_dpsk_ocr_image.py | import asyncio
import re
import os
import torch
if torch.version.cuda == '11.8':
os.environ["TRITON_PTXAS_PATH"] = "/usr/local/cuda-11.8/bin/ptxas"
os.environ['VLLM_USE_V1'] = '0'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
from vllm import AsyncLLMEngine, SamplingParams
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.model_executor.models.registry import ModelRegistry
import time
from deepseek_ocr import DeepseekOCRForCausalLM
from PIL import Image, ImageDraw, ImageFont, ImageOps
import numpy as np
from tqdm import tqdm
from process.ngram_norepeat import NoRepeatNGramLogitsProcessor
from process.image_process import DeepseekOCRProcessor
from config import MODEL_PATH, INPUT_PATH, OUTPUT_PATH, PROMPT, CROP_MODE
ModelRegistry.register_model("DeepseekOCRForCausalLM", DeepseekOCRForCausalLM)
def load_image(image_path):
try:
image = Image.open(image_path)
corrected_image = ImageOps.exif_transpose(image)
return corrected_image
except Exception as e:
print(f"error: {e}")
try:
return Image.open(image_path)
except:
return None
def re_match(text):
pattern = r'(<\|ref\|>(.*?)<\|/ref\|><\|det\|>(.*?)<\|/det\|>)'
matches = re.findall(pattern, text, re.DOTALL)
mathes_image = []
mathes_other = []
for a_match in matches:
if '<|ref|>image<|/ref|>' in a_match[0]:
mathes_image.append(a_match[0])
else:
mathes_other.append(a_match[0])
return matches, mathes_image, mathes_other
def extract_coordinates_and_label(ref_text, image_width, image_height):
try:
label_type = ref_text[1]
cor_list = eval(ref_text[2])
except Exception as e:
print(e)
return None
return (label_type, cor_list)
def draw_bounding_boxes(image, refs):
image_width, image_height = image.size
img_draw = image.copy()
draw = ImageDraw.Draw(img_draw)
overlay = Image.new('RGBA', img_draw.size, (0, 0, 0, 0))
draw2 = ImageDraw.Draw(overlay)
# except IOError:
font = ImageFont.load_default()
img_idx = 0
for i, ref in enumerate(refs):
try:
result = extract_coordinates_and_label(ref, image_width, image_height)
if result:
label_type, points_list = result
color = (np.random.randint(0, 200), np.random.randint(0, 200), np.random.randint(0, 255))
color_a = color + (20, )
for points in points_list:
x1, y1, x2, y2 = points
x1 = int(x1 / 999 * image_width)
y1 = int(y1 / 999 * image_height)
x2 = int(x2 / 999 * image_width)
y2 = int(y2 / 999 * image_height)
if label_type == 'image':
try:
cropped = image.crop((x1, y1, x2, y2))
cropped.save(f"{OUTPUT_PATH}/images/{img_idx}.jpg")
except Exception as e:
print(e)
pass
img_idx += 1
try:
if label_type == 'title':
draw.rectangle([x1, y1, x2, y2], outline=color, width=4)
draw2.rectangle([x1, y1, x2, y2], fill=color_a, outline=(0, 0, 0, 0), width=1)
else:
draw.rectangle([x1, y1, x2, y2], outline=color, width=2)
draw2.rectangle([x1, y1, x2, y2], fill=color_a, outline=(0, 0, 0, 0), width=1)
text_x = x1
text_y = max(0, y1 - 15)
text_bbox = draw.textbbox((0, 0), label_type, font=font)
text_width = text_bbox[2] - text_bbox[0]
text_height = text_bbox[3] - text_bbox[1]
draw.rectangle([text_x, text_y, text_x + text_width, text_y + text_height],
fill=(255, 255, 255, 30))
draw.text((text_x, text_y), label_type, font=font, fill=color)
except:
pass
except:
continue
img_draw.paste(overlay, (0, 0), overlay)
return img_draw
def process_image_with_refs(image, ref_texts):
result_image = draw_bounding_boxes(image, ref_texts)
return result_image
async def stream_generate(image=None, prompt=''):
engine_args = AsyncEngineArgs(
model=MODEL_PATH,
hf_overrides={"architectures": ["DeepseekOCRForCausalLM"]},
block_size=256,
max_model_len=8192,
enforce_eager=False,
trust_remote_code=True,
tensor_parallel_size=1,
gpu_memory_utilization=0.75,
)
engine = AsyncLLMEngine.from_engine_args(engine_args)
logits_processors = [NoRepeatNGramLogitsProcessor(ngram_size=30, window_size=90, whitelist_token_ids= {128821, 128822})] #whitelist: <td>, </td>
sampling_params = SamplingParams(
temperature=0.0,
max_tokens=8192,
logits_processors=logits_processors,
skip_special_tokens=False,
# ignore_eos=False,
)
request_id = f"request-{int(time.time())}"
printed_length = 0
if image and '<image>' in prompt:
request = {
"prompt": prompt,
"multi_modal_data": {"image": image}
}
elif prompt:
request = {
"prompt": prompt
}
else:
assert False, f'prompt is none!!!'
async for request_output in engine.generate(
request, sampling_params, request_id
):
if request_output.outputs:
full_text = request_output.outputs[0].text
new_text = full_text[printed_length:]
print(new_text, end='', flush=True)
printed_length = len(full_text)
final_output = full_text
print('\n')
return final_output
if __name__ == "__main__":
os.makedirs(OUTPUT_PATH, exist_ok=True)
os.makedirs(f'{OUTPUT_PATH}/images', exist_ok=True)
image = load_image(INPUT_PATH).convert('RGB')
if '<image>' in PROMPT:
image_features = DeepseekOCRProcessor().tokenize_with_images(images = [image], bos=True, eos=True, cropping=CROP_MODE)
else:
image_features = ''
prompt = PROMPT
result_out = asyncio.run(stream_generate(image_features, prompt))
save_results = 1
if save_results and '<image>' in prompt:
print('='*15 + 'save results:' + '='*15)
image_draw = image.copy()
outputs = result_out
with open(f'{OUTPUT_PATH}/result_ori.mmd', 'w', encoding = 'utf-8') as afile:
afile.write(outputs)
matches_ref, matches_images, mathes_other = re_match(outputs)
# print(matches_ref)
result = process_image_with_refs(image_draw, matches_ref)
for idx, a_match_image in enumerate(tqdm(matches_images, desc="image")):
outputs = outputs.replace(a_match_image, f' + '.jpg)\n')
for idx, a_match_other in enumerate(tqdm(mathes_other, desc="other")):
outputs = outputs.replace(a_match_other, '').replace('\\coloneqq', ':=').replace('\\eqqcolon', '=:')
# if 'structural formula' in conversation[0]['content']:
# outputs = '<smiles>' + outputs + '</smiles>'
with open(f'{OUTPUT_PATH}/result.mmd', 'w', encoding = 'utf-8') as afile:
afile.write(outputs)
if 'line_type' in outputs:
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
lines = eval(outputs)['Line']['line']
line_type = eval(outputs)['Line']['line_type']
# print(lines)
endpoints = eval(outputs)['Line']['line_endpoint']
fig, ax = plt.subplots(figsize=(3,3), dpi=200)
ax.set_xlim(-15, 15)
ax.set_ylim(-15, 15)
for idx, line in enumerate(lines):
try:
p0 = eval(line.split(' -- ')[0])
p1 = eval(line.split(' -- ')[-1])
if line_type[idx] == '--':
ax.plot([p0[0], p1[0]], [p0[1], p1[1]], linewidth=0.8, color='k')
else:
ax.plot([p0[0], p1[0]], [p0[1], p1[1]], linewidth = 0.8, color = 'k')
ax.scatter(p0[0], p0[1], s=5, color = 'k')
ax.scatter(p1[0], p1[1], s=5, color = 'k')
except:
pass
for endpoint in endpoints:
label = endpoint.split(': ')[0]
(x, y) = eval(endpoint.split(': ')[1])
ax.annotate(label, (x, y), xytext=(1, 1), textcoords='offset points',
fontsize=5, fontweight='light')
try:
if 'Circle' in eval(outputs).keys():
circle_centers = eval(outputs)['Circle']['circle_center']
radius = eval(outputs)['Circle']['radius']
for center, r in zip(circle_centers, radius):
center = eval(center.split(': ')[1])
circle = Circle(center, radius=r, fill=False, edgecolor='black', linewidth=0.8)
ax.add_patch(circle)
except:
pass
plt.savefig(f'{OUTPUT_PATH}/geo.jpg')
plt.close()
result.save(f'{OUTPUT_PATH}/result_with_boxes.jpg')
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/run_dpsk_ocr_image.py",
"license": "MIT License",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepseek-ai/DeepSeek-OCR:DeepSeek-OCR-master/DeepSeek-OCR-vllm/run_dpsk_ocr_pdf.py | import os
import fitz
import img2pdf
import io
import re
from tqdm import tqdm
import torch
from concurrent.futures import ThreadPoolExecutor
if torch.version.cuda == '11.8':
os.environ["TRITON_PTXAS_PATH"] = "/usr/local/cuda-11.8/bin/ptxas"
os.environ['VLLM_USE_V1'] = '0'
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
from config import MODEL_PATH, INPUT_PATH, OUTPUT_PATH, PROMPT, SKIP_REPEAT, MAX_CONCURRENCY, NUM_WORKERS, CROP_MODE
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from deepseek_ocr import DeepseekOCRForCausalLM
from vllm.model_executor.models.registry import ModelRegistry
from vllm import LLM, SamplingParams
from process.ngram_norepeat import NoRepeatNGramLogitsProcessor
from process.image_process import DeepseekOCRProcessor
ModelRegistry.register_model("DeepseekOCRForCausalLM", DeepseekOCRForCausalLM)
llm = LLM(
model=MODEL_PATH,
hf_overrides={"architectures": ["DeepseekOCRForCausalLM"]},
block_size=256,
enforce_eager=False,
trust_remote_code=True,
max_model_len=8192,
swap_space=0,
max_num_seqs=MAX_CONCURRENCY,
tensor_parallel_size=1,
gpu_memory_utilization=0.9,
disable_mm_preprocessor_cache=True
)
logits_processors = [NoRepeatNGramLogitsProcessor(ngram_size=20, window_size=50, whitelist_token_ids= {128821, 128822})] #window for fastοΌwhitelist_token_ids: <td>,</td>
sampling_params = SamplingParams(
temperature=0.0,
max_tokens=8192,
logits_processors=logits_processors,
skip_special_tokens=False,
include_stop_str_in_output=True,
)
class Colors:
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
RESET = '\033[0m'
def pdf_to_images_high_quality(pdf_path, dpi=144, image_format="PNG"):
"""
pdf2images
"""
images = []
pdf_document = fitz.open(pdf_path)
zoom = dpi / 72.0
matrix = fitz.Matrix(zoom, zoom)
for page_num in range(pdf_document.page_count):
page = pdf_document[page_num]
pixmap = page.get_pixmap(matrix=matrix, alpha=False)
Image.MAX_IMAGE_PIXELS = None
if image_format.upper() == "PNG":
img_data = pixmap.tobytes("png")
img = Image.open(io.BytesIO(img_data))
else:
img_data = pixmap.tobytes("png")
img = Image.open(io.BytesIO(img_data))
if img.mode in ('RGBA', 'LA'):
background = Image.new('RGB', img.size, (255, 255, 255))
background.paste(img, mask=img.split()[-1] if img.mode == 'RGBA' else None)
img = background
images.append(img)
pdf_document.close()
return images
def pil_to_pdf_img2pdf(pil_images, output_path):
if not pil_images:
return
image_bytes_list = []
for img in pil_images:
if img.mode != 'RGB':
img = img.convert('RGB')
img_buffer = io.BytesIO()
img.save(img_buffer, format='JPEG', quality=95)
img_bytes = img_buffer.getvalue()
image_bytes_list.append(img_bytes)
try:
pdf_bytes = img2pdf.convert(image_bytes_list)
with open(output_path, "wb") as f:
f.write(pdf_bytes)
except Exception as e:
print(f"error: {e}")
def re_match(text):
pattern = r'(<\|ref\|>(.*?)<\|/ref\|><\|det\|>(.*?)<\|/det\|>)'
matches = re.findall(pattern, text, re.DOTALL)
mathes_image = []
mathes_other = []
for a_match in matches:
if '<|ref|>image<|/ref|>' in a_match[0]:
mathes_image.append(a_match[0])
else:
mathes_other.append(a_match[0])
return matches, mathes_image, mathes_other
def extract_coordinates_and_label(ref_text, image_width, image_height):
try:
label_type = ref_text[1]
cor_list = eval(ref_text[2])
except Exception as e:
print(e)
return None
return (label_type, cor_list)
def draw_bounding_boxes(image, refs, jdx):
image_width, image_height = image.size
img_draw = image.copy()
draw = ImageDraw.Draw(img_draw)
overlay = Image.new('RGBA', img_draw.size, (0, 0, 0, 0))
draw2 = ImageDraw.Draw(overlay)
# except IOError:
font = ImageFont.load_default()
img_idx = 0
for i, ref in enumerate(refs):
try:
result = extract_coordinates_and_label(ref, image_width, image_height)
if result:
label_type, points_list = result
color = (np.random.randint(0, 200), np.random.randint(0, 200), np.random.randint(0, 255))
color_a = color + (20, )
for points in points_list:
x1, y1, x2, y2 = points
x1 = int(x1 / 999 * image_width)
y1 = int(y1 / 999 * image_height)
x2 = int(x2 / 999 * image_width)
y2 = int(y2 / 999 * image_height)
if label_type == 'image':
try:
cropped = image.crop((x1, y1, x2, y2))
cropped.save(f"{OUTPUT_PATH}/images/{jdx}_{img_idx}.jpg")
except Exception as e:
print(e)
pass
img_idx += 1
try:
if label_type == 'title':
draw.rectangle([x1, y1, x2, y2], outline=color, width=4)
draw2.rectangle([x1, y1, x2, y2], fill=color_a, outline=(0, 0, 0, 0), width=1)
else:
draw.rectangle([x1, y1, x2, y2], outline=color, width=2)
draw2.rectangle([x1, y1, x2, y2], fill=color_a, outline=(0, 0, 0, 0), width=1)
text_x = x1
text_y = max(0, y1 - 15)
text_bbox = draw.textbbox((0, 0), label_type, font=font)
text_width = text_bbox[2] - text_bbox[0]
text_height = text_bbox[3] - text_bbox[1]
draw.rectangle([text_x, text_y, text_x + text_width, text_y + text_height],
fill=(255, 255, 255, 30))
draw.text((text_x, text_y), label_type, font=font, fill=color)
except:
pass
except:
continue
img_draw.paste(overlay, (0, 0), overlay)
return img_draw
def process_image_with_refs(image, ref_texts, jdx):
result_image = draw_bounding_boxes(image, ref_texts, jdx)
return result_image
def process_single_image(image):
"""single image"""
prompt_in = prompt
cache_item = {
"prompt": prompt_in,
"multi_modal_data": {"image": DeepseekOCRProcessor().tokenize_with_images(images = [image], bos=True, eos=True, cropping=CROP_MODE)},
}
return cache_item
if __name__ == "__main__":
os.makedirs(OUTPUT_PATH, exist_ok=True)
os.makedirs(f'{OUTPUT_PATH}/images', exist_ok=True)
print(f'{Colors.RED}PDF loading .....{Colors.RESET}')
images = pdf_to_images_high_quality(INPUT_PATH)
prompt = PROMPT
# batch_inputs = []
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
batch_inputs = list(tqdm(
executor.map(process_single_image, images),
total=len(images),
desc="Pre-processed images"
))
# for image in tqdm(images):
# prompt_in = prompt
# cache_list = [
# {
# "prompt": prompt_in,
# "multi_modal_data": {"image": DeepseekOCRProcessor().tokenize_with_images(images = [image], bos=True, eos=True, cropping=CROP_MODE)},
# }
# ]
# batch_inputs.extend(cache_list)
outputs_list = llm.generate(
batch_inputs,
sampling_params=sampling_params
)
output_path = OUTPUT_PATH
os.makedirs(output_path, exist_ok=True)
mmd_det_path = output_path + '/' + INPUT_PATH.split('/')[-1].replace('.pdf', '_det.mmd')
mmd_path = output_path + '/' + INPUT_PATH.split('/')[-1].replace('pdf', 'mmd')
pdf_out_path = output_path + '/' + INPUT_PATH.split('/')[-1].replace('.pdf', '_layouts.pdf')
contents_det = ''
contents = ''
draw_images = []
jdx = 0
for output, img in zip(outputs_list, images):
content = output.outputs[0].text
if '<ο½endβofβsentenceο½>' in content: # repeat no eos
content = content.replace('<ο½endβofβsentenceο½>', '')
else:
if SKIP_REPEAT:
continue
page_num = f'\n<--- Page Split --->'
contents_det += content + f'\n{page_num}\n'
image_draw = img.copy()
matches_ref, matches_images, mathes_other = re_match(content)
# print(matches_ref)
result_image = process_image_with_refs(image_draw, matches_ref, jdx)
draw_images.append(result_image)
for idx, a_match_image in enumerate(matches_images):
content = content.replace(a_match_image, f' + '_' + str(idx) + '.jpg)\n')
for idx, a_match_other in enumerate(mathes_other):
content = content.replace(a_match_other, '').replace('\\coloneqq', ':=').replace('\\eqqcolon', '=:').replace('\n\n\n\n', '\n\n').replace('\n\n\n', '\n\n')
contents += content + f'\n{page_num}\n'
jdx += 1
with open(mmd_det_path, 'w', encoding='utf-8') as afile:
afile.write(contents_det)
with open(mmd_path, 'w', encoding='utf-8') as afile:
afile.write(contents)
pil_to_pdf_img2pdf(draw_images, pdf_out_path)
| {
"repo_id": "deepseek-ai/DeepSeek-OCR",
"file_path": "DeepSeek-OCR-master/DeepSeek-OCR-vllm/run_dpsk_ocr_pdf.py",
"license": "MIT License",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
deepspeedai/DeepSpeed:tests/unit/ops/muon/test_muon_partial_training.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Test for PR #7869: Fix Muon optimizer with partial model training
This test verifies that the fix for Muon optimizer parameter grouping works
correctly when only part of the model parameters are trainable.
The bug occurred when:
1. Some parameters use Muon optimizer (p.use_muon = True)
2. Other parameters use AdamW optimizer (p.use_muon = False)
3. All trainable parameters happen to use the same optimizer type
This caused one of the parameter groups to be empty, leading to:
ValueError: torch.cat(): expected a non-empty list of Tensors
The fix filters parameters to only include those with requires_grad=True,
ensuring empty parameter groups are properly handled.
"""
import torch.nn as nn
import deepspeed
from unit.common import DistributedTest
class PartialTrainableModel(nn.Module):
"""
A model where some parameters use Muon and some use AdamW.
This simulates the scenario where:
- Hidden layers use Muon (ndim >= 2)
- Embeddings and biases use AdamW (ndim < 2)
"""
def __init__(self, vocab_size=100, hidden_dim=64, num_layers=2):
super().__init__()
self.embedding = nn.Embedding(vocab_size, hidden_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers)])
self.output = nn.Linear(hidden_dim, vocab_size)
# Set use_muon attribute for parameters
# Muon should be used for ndim >= 2 (matrices)
# AdamW should be used for ndim < 2 (embeddings, biases)
for name, param in self.named_parameters():
if param.ndim >= 2:
param.use_muon = True
else:
param.use_muon = False
class TestMuonPartialModelTraining(DistributedTest):
"""Test Muon optimizer with partial model training scenarios."""
world_size = 2
reuse_dist_env = True
requires_cuda_env = False
def test_muon_with_all_trainable_params(self):
"""
Test when all parameters are trainable.
This should work fine as both Muon and AdamW parameter groups
will be non-empty.
"""
model = PartialTrainableModel()
ds_config = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Muon",
"params": {
"lr": 0.02,
"weight_decay": 0.01
}
},
"zero_optimization": {
"stage": 2
},
}
# This should not raise ValueError
model_engine, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
# Verify the model was initialized successfully
assert model_engine is not None
def test_muon_with_partial_trainable_params_same_optimizer(self):
"""
Test the bug scenario: all trainable params use the same optimizer.
This is the bug case where:
- All trainable parameters have use_muon=True (or all False)
- This causes one parameter group to be empty
- Without the fix, this raises: ValueError: torch.cat(): expected a non-empty list of Tensors
The fix filters by requires_grad, so empty groups are properly handled.
"""
model = PartialTrainableModel()
# Freeze all Linear layers (which have use_muon=True)
# Keep only embeddings and biases trainable (use_muon=False)
for name, param in model.named_parameters():
if "layers" in name or "output" in name:
param.requires_grad = False
# Now all trainable parameters have use_muon=False
# This would cause muon_params to be empty without the fix
ds_config = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Muon",
"params": {
"lr": 0.02,
"weight_decay": 0.01
}
},
"zero_optimization": {
"stage": 2
},
}
# This would raise ValueError without the fix
# With the fix, it should initialize successfully
model_engine, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
# Verify the model was initialized successfully
assert model_engine is not None
def test_muon_with_mixed_trainable_params(self):
"""
Test when trainable parameters use both optimizers.
This is the normal case where:
- Some trainable params have use_muon=True
- Some trainable params have use_muon=False
- Both parameter groups are non-empty
This should work fine even without the fix.
"""
model = PartialTrainableModel()
# Freeze only the first Linear layer
# This leaves both Muon and AdamW parameters trainable
for name, param in model.named_parameters():
if "layers.0" in name:
param.requires_grad = False
ds_config = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Muon",
"params": {
"lr": 0.02,
"weight_decay": 0.01
}
},
"zero_optimization": {
"stage": 2
},
}
# This should work fine
model_engine, _, _, _ = deepspeed.initialize(model=model,
model_parameters=model.parameters(),
config=ds_config)
# Verify the model was initialized successfully
assert model_engine is not None
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/ops/muon/test_muon_partial_training.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:deepspeed/inference/v2/model_implementations/exaone4/container.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.inference.v2.model_implementations.common_parameters import *
from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer
class Exaone4TransformerContainer(LayerContainer):
"""
Transformer layer container for the EXAONE 4.0 model.
"""
qkv_w: UnfusedQKVParameter
attn_out_w: AttentionOutputParameter
mlp_1_w: GatedMLPParameter
mlp_2_w: MLP2Parameter
q_norm_gamma: NormParameter
k_norm_gamma: NormParameter
post_attn_norm_gamma: NormParameter
post_ff_norm_gamma: NormParameter
PARAM_MAPPING = {
"self_attn.q_proj.weight": "qkv_w.q_params",
"self_attn.k_proj.weight": "qkv_w.k_params",
"self_attn.v_proj.weight": "qkv_w.v_params",
"self_attn.o_proj.weight": "attn_out_w.params",
"mlp.gate_proj.weight": "mlp_1_w.gate_params",
"mlp.up_proj.weight": "mlp_1_w.up_params",
"mlp.down_proj.weight": "mlp_2_w.params",
"self_attn.q_norm.weight": "q_norm_gamma.params",
"self_attn.k_norm.weight": "k_norm_gamma.params",
"post_attention_layernorm.weight": "post_attn_norm_gamma.params",
"post_feedforward_layernorm.weight": "post_ff_norm_gamma.params",
}
class Exaone4NonTransformerContainer(LayerContainer):
"""
Non-Transformer layer container for the EXAONE 4.0 model.
"""
word_emb: EmbeddingParameter
word_unembed: UnembedParameter
final_norm: NormParameter
PARAM_MAPPING = {
"model.embed_tokens.weight": "word_emb.params",
"model.norm.weight": "final_norm.params",
"lm_head.weight": "word_unembed.params",
}
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/inference/v2/model_implementations/exaone4/container.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/inference/v2/model_implementations/exaone4/model.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Iterable, Optional, Tuple
import torch
import deepspeed.comm as dist
from ...allocator import empty_from
from ...inference_utils import ActivationType, DtypeEnum
from ...model_implementations import *
from ...modules.configs import *
from ...modules.interfaces import *
from ...ragged import RaggedBatchWrapper
from ...kernels.core_ops.cuda_rms_norm.rms_norm import CUDARMSNorm
from .container import Exaone4NonTransformerContainer, Exaone4TransformerContainer
class Exaone4InferenceModel(DSTransformerModelBase):
"""
Inference model implementation for ragged batching for EXAONE 4.0 models.
Key differences from Mistral/Llama:
- Post-norm architecture (norm after attn/mlp, not before)
- QK-Norm (RMSNorm on Q and K projections per head)
"""
_non_transformer: Optional[Exaone4NonTransformerContainer]
_transformer: Optional[Iterable[Exaone4TransformerContainer]]
@property
def max_sequence_length(self) -> int:
return self._config.max_position_embeddings
@property
def num_layers(self) -> int:
return self._config.num_hidden_layers
@property
def model_dim(self) -> int:
return self._config.hidden_size
@property
def vocab_size(self) -> int:
return self._config.vocab_size
@property
def head_size(self) -> int:
return getattr(self._config, "head_dim", self.model_dim // self.n_heads)
@property
def n_heads(self) -> int:
return self._config.num_attention_heads
@property
def intermediate_dim(self) -> int:
return self._config.intermediate_size
@property
def n_heads_kv(self) -> int:
return self._config.num_key_value_heads
@property
def activation_dtype(self) -> DtypeEnum:
if self._config.torch_dtype == torch.float16:
return DtypeEnum.fp16
elif self._config.torch_dtype == torch.bfloat16:
return DtypeEnum.bf16
else:
raise NotImplementedError("Only fp16 and bf16 are supported")
@property
def mlp_activation_fn(self) -> ActivationType:
activation = self._config.hidden_act.lower()
if activation == "silu":
return ActivationType.SiGLU
elif activation == "gelu":
return ActivationType.GEGLU
elif activation == "relu":
return ActivationType.ReGLU
else:
raise NotImplementedError(f"Activation {activation} not supported")
@property
def norm_type(self) -> NormTypeEnum:
return NormTypeEnum.RMSNorm
@property
def positional_embedding_type(self) -> PositionalEmbeddingType:
return PositionalEmbeddingType.rotate_half
@property
def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
rope_theta = getattr(self._config, "rope_theta", 1000000.0)
return RotateHalfConfig(theta_base=rope_theta)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._qk_norm = CUDARMSNorm(
channels=self.head_size,
fp_dtype=torch.float16 if self.activation_dtype == DtypeEnum.fp16 else torch.bfloat16,
epsilon=getattr(self._config, "rms_norm_eps", 1e-5),
)
def _apply_qk_norm(self, hidden_states: torch.Tensor, q_norm_gamma: torch.Tensor,
k_norm_gamma: torch.Tensor) -> torch.Tensor:
"""
Apply RMSNorm to Q and K projections independently per head.
hidden_states shape: [tokens, (n_q + n_kv + n_kv) * head_size]
"""
tokens = hidden_states.shape[0]
local_n_heads = self.n_heads_q_local
local_n_heads_kv = self.n_heads_kv_local
q_len = local_n_heads * self.head_size
kv_len = local_n_heads_kv * self.head_size
q = hidden_states[:, :q_len].contiguous()
k = hidden_states[:, q_len:q_len + kv_len].contiguous()
v = hidden_states[:, q_len + kv_len:]
# Reshape to [tokens * n_heads, head_size] for per-head RMSNorm
q = q.view(-1, self.head_size)
self._qk_norm(q, q, q_norm_gamma)
q = q.view(tokens, q_len)
k = k.view(-1, self.head_size)
self._qk_norm(k, k, k_norm_gamma)
k = k.view(tokens, kv_len)
hidden_states[:, :q_len] = q
hidden_states[:, q_len:q_len + kv_len] = k
return hidden_states
def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
embed = self.embed(ragged_batch, self._non_transformer.word_emb)
if embed.shape[-1] != self.model_dim:
raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
return embed
def _forward_transformer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
"""
EXAONE 4.0 uses post-norm architecture:
hidden = attn(hidden)
hidden = post_attn_norm(hidden)
residual = residual + hidden
hidden = mlp(residual)
hidden = post_ff_norm(hidden)
residual = residual + hidden
"""
cur_params = self._transformer[layer_idx]
kv_cache = self.state_manager.get_cache(layer_idx)
# Attention block
hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=None)
hidden_states = self._apply_qk_norm(hidden_states, cur_params.q_norm_gamma, cur_params.k_norm_gamma)
hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None)
if self.tp_size > 1:
dist.all_reduce(hidden_states, group=self._base_mp_group)
# Post-attn norm + residual add
_, hidden_states = self.norm(hidden_states, None, cur_params.post_attn_norm_gamma, beta=None)
residual.add_(hidden_states)
# MLP block
hidden_states = self.mlp_1(residual, cur_params.mlp_1_w, b=None)
hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None)
if self.tp_size > 1:
dist.all_reduce(hidden_states, group=self._base_mp_group)
# Post-ff norm + residual add
_, hidden_states = self.norm(hidden_states, None, cur_params.post_ff_norm_gamma, beta=None)
residual.add_(hidden_states)
return residual, residual
def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
logits = self.unembed(hidden_states,
self._non_transformer.word_unembed,
ragged_batch_info,
gamma=self._non_transformer.final_norm)
if self.tp_size > 1:
comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
return full_logits
else:
return logits
def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
residual = self._forward_embed(wrapped_batch)
for layer_idx in range(self.num_layers):
residual, hidden_states = self._forward_transformer(layer_idx, residual, residual, wrapped_batch)
return self._forward_unembed(residual, wrapped_batch)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/inference/v2/model_implementations/exaone4/model.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/inference/v2/model_implementations/exaone4/policy.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Any
from ...config_v2 import RaggedInferenceEngineConfig
from ..inference_policy_base import ContainerMap, InferenceV2Policy
from .container import Exaone4NonTransformerContainer, Exaone4TransformerContainer
from .model import Exaone4InferenceModel
class Exaone4Policy(InferenceV2Policy):
def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> Exaone4InferenceModel:
return Exaone4InferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
def build_container_map(self) -> ContainerMap:
map = ContainerMap()
transformer_containers = [Exaone4TransformerContainer(self.model) for _ in range(self.model.num_layers)]
map.set_transformer_params(['model.layers'], transformer_containers)
map.set_non_transformer_params(Exaone4NonTransformerContainer(self.model))
map.set_unmapped_params([])
return map
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/inference/v2/model_implementations/exaone4/policy.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:tests/unit/v1/zero/test_stage2_flatten_on_gpu.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Test that ZeRO Stage 1 and 2 use the GPU flatten path when VRAM is sufficient.
Parametrized over zero_stage (1, 2) and dtype (fp32, fp16, bf16).
"""
import pytest
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.utils import set_log_level_from_string
from unit.common import DistributedTest
from unit.simple_model import SimpleModel
def _apply_dtype_to_config(config_dict, dtype):
"""Set bf16/fp16 in config_dict based on dtype; skip if not supported."""
if dtype == "bf16":
if not get_accelerator().is_bf16_supported():
pytest.skip("bf16 is not supported on this accelerator")
config_dict["bf16"] = {"enabled": True}
elif dtype == "fp16":
if not get_accelerator().is_fp16_supported():
pytest.skip("fp16 is not supported on this accelerator")
config_dict["fp16"] = {"enabled": True, "initial_scale_power": 8}
# fp32: no half-precision block
@pytest.mark.parametrize("zero_stage", [1, 2])
@pytest.mark.parametrize("dtype", ["fp32", "fp16", "bf16"], ids=["fp32", "fp16", "bf16"])
class TestStage2FlattenOnGPU(DistributedTest):
"""ZeRO-1 and ZeRO-2 with small model should flatten on GPU (sufficient VRAM)."""
world_size = 2 # Run on 2 GPUs when available
def test_flatten_on_gpu_path_taken(self, monkeypatch, zero_stage, dtype):
"""Assert the GPU flatten path was used (not CPU flatten + move)."""
if not get_accelerator().is_available():
pytest.skip("Accelerator not available")
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"zero_optimization": {
"stage": zero_stage
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
}
_apply_dtype_to_config(config_dict, dtype)
set_log_level_from_string("info")
log_messages = []
def mock_logger_info(msg, *args, **kwargs):
log_messages.append(msg if isinstance(msg, str) else str(msg))
monkeypatch.setattr("deepspeed.utils.logger.info", mock_logger_info)
hidden_dim = 64
model = SimpleModel(hidden_dim=hidden_dim, nlayers=2)
deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=model.parameters(),
)
# Small model + no CPU offload => GPU path; that path logs "on GPU"
gpu_path_logs = [m for m in log_messages if "Flattening param group" in m and "on GPU" in m]
assert gpu_path_logs, (
f"Expected GPU flatten path (logger.info should be called with 'Flattening param group' and 'on GPU'). "
f"Captured messages: {log_messages}")
def test_flat_buffers_on_accelerator(self, zero_stage, dtype):
"""Regression: flat buffers must end up on the accelerator (not left on CPU)."""
if not get_accelerator().is_available():
pytest.skip("Accelerator not available")
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"zero_optimization": {
"stage": zero_stage
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
}
_apply_dtype_to_config(config_dict, dtype)
hidden_dim = 64
model = SimpleModel(hidden_dim=hidden_dim, nlayers=2)
engine, _, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=model.parameters(),
)
opt = engine.optimizer
assert hasattr(opt, "bit16_groups_flat"), "ZeRO-1/2 optimizer should have bit16_groups_flat"
device_type = get_accelerator().device_name()
for i, flat in enumerate(opt.bit16_groups_flat):
assert flat.device.type == device_type, (f"Flat buffer {i} must be on {device_type}, got {flat.device}")
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/v1/zero/test_stage2_flatten_on_gpu.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:deepspeed/compat.py | """Compatibility functions to support wider version ranges for python and dependencies."""
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Mapping, Any, Dict
from inspect import ismodule
try:
import annotationlib # python >= 3.14
except ImportError:
annotationlib = None
# Deal with annotations in python versions >=3.14. See:
# - Python 3.14 release notes: https://docs.python.org/3/whatsnew/3.14.html
# Porting annotations: https://docs.python.org/3/whatsnew/3.14.html#whatsnew314-porting-annotations
# - PEP649: https://peps.python.org/pep-0649/
# - PEP749: https://peps.python.org/pep-0749/
# Backwards compatible, applies best practices (use annotationlib) from python 3.14 onwards.
def get_annotations_from_namespace(namespace: Mapping[str, object]) -> Dict[str, Any]:
if annotationlib:
annotate_func = annotationlib.get_annotate_from_class_namespace(namespace)
if annotate_func is not None:
return annotationlib.call_annotate_function(annotate_func, annotationlib.Format.VALUE)
return namespace.get("__annotations__", {})
def get_annotations(obj: Any) -> Dict[str, Any]:
"""
Retrieves annotations from a Python object.
In python >=3.14 this is a thin wrapper around the `annotationlib.get_annotations` function
with the added convenience to automatically infer the type for non module, class, function
or customly annotated objects.
"""
if annotationlib:
has_annotations = hasattr(obj, "__annotations__") or hasattr(obj, "__annotate__")
if not isinstance(obj, type) and not ismodule(obj) and not callable(obj) and not has_annotations:
obj = type(obj)
return annotationlib.get_annotations(obj)
try:
return obj.__annotations__
except AttributeError:
return {}
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/compat.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/module_inject/autotp_config.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Configurable AutoTP API
This module provides a unified specification for tensor parallel layer partitioning.
The design is inspired by Universal Checkpointing's SubparamShape and provides
a single, well-defined format that users can easily understand, customize, and extend.
"""
import re
from dataclasses import dataclass, field
from typing import List, Tuple, Union, Optional
from enum import Enum
from deepspeed.utils.logging import warning_once
class PartitionType(Enum):
"""How the layer should be partitioned for tensor parallelism."""
COLUMN = "column" # Partition output dim, AllReduce in backward
ROW = "row" # Partition input dim, AllReduce in forward
SKIP = "skip" # Do not partition this layer
@dataclass
class TPLayerSpec:
"""
Unified specification for tensor parallel layer partitioning.
This is inspired by Universal Checkpointing's SubparamShape but extended
for AutoTP's needs (forward/backward communication patterns).
The `shape` parameter supports at most 1-level nesting at the partition dimension:
- (3, -1) -> 3 equal-size sub-params
- ((q, k, v), -1) -> 3 unequal-size sub-params (1-level nesting)
Examples:
# Simple row-parallel layer (e.g., o_proj, down_proj)
TPLayerSpec(
patterns=[".*\\.o_proj$", ".*\\.down_proj$"],
partition_type=PartitionType.ROW,
)
# Simple column-parallel layer (e.g., q_proj, k_proj, v_proj)
TPLayerSpec(
patterns=[".*\\.[qkv]_proj$"],
partition_type=PartitionType.COLUMN,
)
# Fused QKV - GLM style [Q, K, V] concatenated on dim 0
TPLayerSpec(
patterns=[".*\\.query_key_value\\.weight$"],
partition_type=PartitionType.COLUMN,
shape=(3, -1), # 3 equal sub-params, -1 = infer
partition_dim=0,
)
# Fused QKV - Bloom style [q1,k1,v1,q2,k2,v2,...]
TPLayerSpec(
patterns=[".*\\.query_key_value\\.weight$"],
partition_type=PartitionType.COLUMN,
# No reshape needed, just split along dim 0
)
# GQA with different Q/K/V sizes (1-level nesting)
TPLayerSpec(
patterns=[".*\\.qkv_proj\\.weight$"],
partition_type=PartitionType.COLUMN,
shape=((q_size, k_size, v_size), -1), # Unequal sub-params
partition_dim=0,
)
# Chunked MLP (gate_up_proj)
TPLayerSpec(
patterns=[".*\\.gate_up_proj\\.weight$"],
partition_type=PartitionType.COLUMN,
shape=(2, -1), # [gate, up] packed
partition_dim=0,
)
# MoE FFN with expert dimension
TPLayerSpec(
patterns=[".*\\.experts\\..*\\.w1\\.weight$"],
partition_type=PartitionType.COLUMN,
shape=(num_experts, -1, hidden_in), # View as 3D
partition_dim=1, # Partition the hidden_out dimension
)
# Skip layer (e.g., MoE gate)
TPLayerSpec(
patterns=[".*\\.gate$", ".*\\.router$"],
partition_type=PartitionType.SKIP,
)
"""
# Layer identification - regex patterns to match parameter names
patterns: List[str]
# Partition type determines communication pattern
partition_type: PartitionType = PartitionType.COLUMN
# Optional: logical shape for partitioning
# - Use -1 for dimensions that should be inferred
# - Use tuple of ints at partition_dim for unequal sub-params (1-level nesting only)
# Examples:
# (3, -1) -> 3 equal sub-params
# ((4096, 1024, 1024), -1) -> 3 unequal sub-params (GQA)
# (n_experts, -1, hidden) -> MoE reshape
shape: Optional[Tuple[Union[int, Tuple[int, ...]], ...]] = None
# Which dimension to partition (after optional reshape)
# Default: 0 for COLUMN, 1 for ROW (standard 2D weight matrix)
partition_dim: Optional[int] = None
# Optional: model type constraint (only apply for specific models)
model_types: Optional[List[str]] = None
def __post_init__(self):
if isinstance(self.partition_type, str):
self.partition_type = PartitionType(self.partition_type.lower())
if self.shape is not None:
self.shape = self._normalize_shape(self.shape)
self._validate_shape_format()
@staticmethod
def _normalize_shape(shape):
if isinstance(shape, list):
return tuple(TPLayerSpec._normalize_shape(item) for item in shape)
if isinstance(shape, tuple):
return tuple(TPLayerSpec._normalize_shape(item) if isinstance(item, list) else item for item in shape)
return shape
def _validate_shape_format(self):
if not isinstance(self.shape, tuple):
raise ValueError("AutoTP shape must be a tuple of ints or a tuple at partition_dim.")
partition_dim = self.get_partition_dim()
if partition_dim < 0 or partition_dim >= len(self.shape):
raise ValueError(
f"AutoTP partition_dim {partition_dim} is out of range for shape length {len(self.shape)}.")
nested_tuple_seen = False
for idx, dim in enumerate(self.shape):
if isinstance(dim, tuple):
if idx != partition_dim:
raise ValueError(
f"AutoTP shape nested tuple only allowed at partition_dim={partition_dim}, got at {idx}.")
if nested_tuple_seen:
raise ValueError("AutoTP shape supports only 1-level nesting at partition_dim.")
nested_tuple_seen = True
if len(dim) == 0:
raise ValueError("AutoTP shape nested tuple cannot be empty.")
for val in dim:
if isinstance(val, tuple):
raise ValueError("AutoTP shape supports only 1-level nesting at partition_dim.")
if not isinstance(val, int) or val <= 0:
raise ValueError("AutoTP nested sub-parameter sizes must be positive integers.")
elif isinstance(dim, int):
if dim == 0 or dim < -1:
raise ValueError("AutoTP shape dimensions must be positive integers or -1.")
else:
raise ValueError("AutoTP shape must contain only integers or a tuple at partition_dim.")
def get_partition_dim(self) -> int:
"""Get effective partition dimension."""
if self.partition_dim is not None:
return self.partition_dim
# Default based on partition type for 2D weight matrices
return 0 if self.partition_type == PartitionType.COLUMN else 1
def has_unequal_sub_params(self) -> bool:
"""Check if this spec has unequal sub-parameters (nested tuple at partition_dim)."""
if self.shape is None:
return False
dim = self.get_partition_dim()
if dim >= len(self.shape):
return False
return isinstance(self.shape[dim], tuple)
def get_sub_param_sizes(self) -> Optional[Tuple[int, ...]]:
"""Get sub-parameter sizes if using unequal sub-params."""
if not self.has_unequal_sub_params():
return None
return self.shape[self.get_partition_dim()]
def get_num_sub_params(self) -> Optional[int]:
"""Get the number of sub-parameters."""
if self.shape is None:
return None
dim = self.get_partition_dim()
if dim >= len(self.shape):
return None
if isinstance(self.shape[dim], tuple):
return len(self.shape[dim])
elif isinstance(self.shape[dim], int) and self.shape[dim] > 0:
return self.shape[dim]
return None
def matches(self, param_name: str, model_type: Optional[str] = None) -> bool:
"""Check if this spec matches the given parameter."""
# Check model type constraint
if self.model_types:
if model_type is None:
return False
model_type_norm = str(model_type).lower()
model_types_norm = [str(mt).lower() for mt in self.model_types]
if model_type_norm not in model_types_norm:
return False
# Check pattern match
return any(re.match(pattern, param_name) for pattern in self.patterns)
@dataclass
class AutoTPConfig:
"""
Configuration for Automatic Tensor Parallelism.
Example usage:
config = AutoTPConfig(
tp_size=4,
layer_specs=[
# Row-parallel layers (AllReduce after forward)
TPLayerSpec(
patterns=[".*\\.o_proj", ".*\\.down_proj"],
partition_type=PartitionType.ROW,
),
# Column-parallel layers
TPLayerSpec(
patterns=[".*\\.[qkv]_proj", ".*\\.up_proj", ".*\\.gate_proj"],
partition_type=PartitionType.COLUMN,
),
# Skip MoE gates
TPLayerSpec(
patterns=[".*\\.gate$"],
partition_type=PartitionType.SKIP,
),
],
)
"""
tp_size: int = 1
# Unified layer specifications
layer_specs: List[TPLayerSpec] = field(default_factory=list)
# Embedding configuration
embedding_partition_dim: int = 1 # Usually partition vocab dim
# LM head configuration
lm_head_patterns: List[str] = field(default_factory=lambda: ["lm_head", "embed_out"])
# Behavior flags
use_default_specs: bool = True # Merge with built-in specs
strict_mode: bool = False # Fail if unmatched Linear layers found
def find_matching_spec(self, param_name: str, model_type: Optional[str] = None) -> Optional[TPLayerSpec]:
"""Find the first matching spec for a parameter."""
matches = [spec for spec in self.layer_specs if spec.matches(param_name, model_type)]
if not matches:
return None
if len(matches) > 1:
matched_patterns = [spec.patterns for spec in matches]
warning_once(f"AutoTPConfig: parameter {param_name} matched multiple layer_specs {matched_patterns}; "
"using the first match.")
return matches[0]
@classmethod
def from_dict(cls, config_dict: dict) -> "AutoTPConfig":
"""Create config from dictionary (JSON config)."""
layer_specs = []
for spec_dict in config_dict.get("layer_specs", []):
# Convert partition_type string to enum
partition_type_str = spec_dict.get("partition_type", "column")
if isinstance(partition_type_str, str):
partition_type = PartitionType(partition_type_str.lower())
else:
partition_type = partition_type_str
# Convert shape from list to tuple if necessary
shape = spec_dict.get("shape")
if shape is not None:
shape = cls._convert_shape(shape)
layer_specs.append(
TPLayerSpec(
patterns=spec_dict.get("patterns", []),
partition_type=partition_type,
shape=shape,
partition_dim=spec_dict.get("partition_dim"),
model_types=spec_dict.get("model_types"),
))
return cls(
tp_size=config_dict.get("tp_size", 1),
layer_specs=layer_specs,
embedding_partition_dim=config_dict.get("embedding_partition_dim", 1),
lm_head_patterns=config_dict.get("lm_head_patterns", ["lm_head", "embed_out"]),
use_default_specs=config_dict.get("use_default_specs", True),
strict_mode=config_dict.get("strict_mode", False),
)
@staticmethod
def _convert_shape(shape):
"""Convert shape from list to tuple, handling nested structures."""
if isinstance(shape, list):
return tuple(AutoTPConfig._convert_shape(item) if isinstance(item, list) else item for item in shape)
return shape
class AutoTPPresets:
"""Built-in presets for common model architectures."""
@staticmethod
def llama() -> AutoTPConfig:
"""LLaMA-style models (separate Q, K, V projections)."""
return AutoTPConfig(layer_specs=[
TPLayerSpec(
patterns=[r".*\.self_attn\.o_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.self_attn\.[qkv]_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
TPLayerSpec(
patterns=[r".*\.mlp\.down_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.mlp\.(up|gate)_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
], )
@staticmethod
def llama_gqa(num_heads: int, num_kv_heads: int, head_dim: int) -> AutoTPConfig:
"""LLaMA with Grouped Query Attention (fused QKV variant)."""
q_size = num_heads * head_dim
kv_size = num_kv_heads * head_dim
return AutoTPConfig(
layer_specs=[
TPLayerSpec(
patterns=[r".*\.self_attn\.o_proj\.weight$"],
partition_type=PartitionType.ROW,
),
# Fused QKV with unequal sizes (GQA)
TPLayerSpec(
patterns=[r".*\.self_attn\.qkv_proj\.weight$"],
partition_type=PartitionType.COLUMN,
shape=((q_size, kv_size, kv_size), -1), # 1-level nesting
partition_dim=0,
),
TPLayerSpec(
patterns=[r".*\.mlp\.down_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.mlp\.(up|gate)_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
], )
@staticmethod
def bloom() -> AutoTPConfig:
"""BLOOM-style models (fused QKV with interleaved heads)."""
return AutoTPConfig(
layer_specs=[
TPLayerSpec(
patterns=[r".*\.self_attention\.dense\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.self_attention\.query_key_value\.weight$"],
partition_type=PartitionType.COLUMN,
# Bloom style: [q1,k1,v1,q2,k2,v2,...] - no reshape needed
),
TPLayerSpec(
patterns=[r".*\.mlp\.dense_4h_to_h\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.mlp\.dense_h_to_4h\.weight$"],
partition_type=PartitionType.COLUMN,
),
], )
@staticmethod
def chatglm() -> AutoTPConfig:
"""ChatGLM-style models (GLM-style fused QKV)."""
return AutoTPConfig(
layer_specs=[
TPLayerSpec(
patterns=[r".*\.self_attention\.dense\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.self_attention\.query_key_value\.weight$"],
partition_type=PartitionType.COLUMN,
shape=(3, -1), # [Q, K, V] concatenated
partition_dim=0,
),
TPLayerSpec(
patterns=[r".*\.mlp\.dense_4h_to_h\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.mlp\.dense_h_to_4h\.weight$"],
partition_type=PartitionType.COLUMN,
shape=(2, -1), # [gate, up] packed
partition_dim=0,
),
], )
@staticmethod
def mixtral() -> AutoTPConfig:
"""Mixtral MoE model."""
return AutoTPConfig(
layer_specs=[
TPLayerSpec(
patterns=[r".*\.self_attn\.o_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.self_attn\.[qkv]_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
# MoE experts
TPLayerSpec(
patterns=[r".*\.block_sparse_moe\.experts\.\d+\.w2\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.block_sparse_moe\.experts\.\d+\.w[13]\.weight$"],
partition_type=PartitionType.COLUMN,
),
# Skip MoE gate
TPLayerSpec(
patterns=[r".*\.block_sparse_moe\.gate\.weight$"],
partition_type=PartitionType.SKIP,
),
], )
@staticmethod
def deepseek_v2() -> AutoTPConfig:
"""DeepSeek-V2 with MLA (Multi-head Latent Attention)."""
return AutoTPConfig(
layer_specs=[
# Standard attention output
TPLayerSpec(
patterns=[r".*\.self_attn\.o_proj\.weight$"],
partition_type=PartitionType.ROW,
),
# MLA uses compressed KV, skip low-rank projections
TPLayerSpec(
patterns=[r".*\.self_attn\.(q_a_proj|kv_a_proj_with_mqa)\.weight$"],
partition_type=PartitionType.SKIP,
),
# Q/K/V projections from latent
TPLayerSpec(
patterns=[r".*\.self_attn\.(q_b_proj|kv_b_proj)\.weight$"],
partition_type=PartitionType.COLUMN,
),
# MoE experts
TPLayerSpec(
patterns=[r".*\.mlp\.experts\.\d+\.down_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.mlp\.experts\.\d+\.(up|gate)_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
# Skip MoE gate
TPLayerSpec(
patterns=[r".*\.mlp\.gate\.weight$"],
partition_type=PartitionType.SKIP,
),
# Shared expert
TPLayerSpec(
patterns=[r".*\.mlp\.shared_experts\.down_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.mlp\.shared_experts\.(up|gate)_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
], )
@staticmethod
def qwen2() -> AutoTPConfig:
"""Qwen2 model."""
return AutoTPConfig(layer_specs=[
TPLayerSpec(
patterns=[r".*\.self_attn\.o_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.self_attn\.[qkv]_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
TPLayerSpec(
patterns=[r".*\.mlp\.down_proj\.weight$"],
partition_type=PartitionType.ROW,
),
TPLayerSpec(
patterns=[r".*\.mlp\.(up|gate)_proj\.weight$"],
partition_type=PartitionType.COLUMN,
),
], )
@staticmethod
def phi3() -> AutoTPConfig:
"""Phi3 model with fused QKV and chunked MLP."""
return AutoTPConfig(
layer_specs=[
TPLayerSpec(
patterns=[r".*\.self_attn\.o_proj\.weight$"],
partition_type=PartitionType.ROW,
),
# Phi3 has fused qkv_proj
TPLayerSpec(
patterns=[r".*\.self_attn\.qkv_proj\.weight$"],
partition_type=PartitionType.COLUMN,
shape=(3, -1), # [Q, K, V] concatenated
partition_dim=0,
),
TPLayerSpec(
patterns=[r".*\.mlp\.down_proj\.weight$"],
partition_type=PartitionType.ROW,
),
# Phi3 has gate_up_proj fused
TPLayerSpec(
patterns=[r".*\.mlp\.gate_up_proj\.weight$"],
partition_type=PartitionType.COLUMN,
shape=(2, -1), # [gate, up] packed
partition_dim=0,
),
], )
@staticmethod
def get_preset(model_type: str) -> Optional[AutoTPConfig]:
"""Get a preset configuration by model type name."""
presets = {
"llama": AutoTPPresets.llama,
"bloom": AutoTPPresets.bloom,
"chatglm": AutoTPPresets.chatglm,
"mixtral": AutoTPPresets.mixtral,
"deepseek_v2": AutoTPPresets.deepseek_v2,
"qwen2": AutoTPPresets.qwen2,
"phi3": AutoTPPresets.phi3,
}
preset_fn = presets.get(model_type.lower())
if preset_fn:
return preset_fn()
return None
def merge_autotp_configs(base: AutoTPConfig, override: AutoTPConfig) -> AutoTPConfig:
"""Merge two AutoTP configs, with override taking precedence."""
# Combine layer specs - override specs come first (higher priority)
merged_specs = list(override.layer_specs) + list(base.layer_specs)
return AutoTPConfig(
tp_size=override.tp_size if override.tp_size > 1 else base.tp_size,
layer_specs=merged_specs,
embedding_partition_dim=override.embedding_partition_dim,
lm_head_patterns=override.lm_head_patterns or base.lm_head_patterns,
use_default_specs=override.use_default_specs,
strict_mode=override.strict_mode,
)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/module_inject/autotp_config.py",
"license": "Apache License 2.0",
"lines": 511,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/tensor_parallel/init_utils.py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import base64
import os
from typing import Optional, Union
import hjson
import torch
from deepspeed.runtime.config_utils import dict_raise_error_on_duplicate_keys
_TP_MODEL_INIT_ARGS = None
def load_ds_config(config: Union[str, dict]) -> dict:
if isinstance(config, dict):
return config
if isinstance(config, str):
if os.path.exists(config):
return hjson.load(open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys)
try:
config_decoded = base64.urlsafe_b64decode(config).decode('utf-8')
return hjson.loads(config_decoded)
except (UnicodeDecodeError, AttributeError, ValueError) as exc:
raise ValueError(
f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. "
f"Received: {config}") from exc
raise ValueError(f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. "
f"Received: {config}")
def record_tp_model_init_args(tp_size, dtype, tp_group, dist_module):
global _TP_MODEL_INIT_ARGS
new_args = {
"tp_size": tp_size,
"dtype": dtype,
"tp_group": tp_group,
}
if _TP_MODEL_INIT_ARGS is None:
_TP_MODEL_INIT_ARGS = new_args
return
if _TP_MODEL_INIT_ARGS["tp_size"] != tp_size or _TP_MODEL_INIT_ARGS["dtype"] != dtype:
raise ValueError("Conflicting tp_model_init arguments detected across multiple calls.")
existing_group = _TP_MODEL_INIT_ARGS.get("tp_group")
if existing_group is None and tp_group is None:
return
if (existing_group is None) != (tp_group is None):
raise ValueError("Conflicting tp_model_init arguments detected across multiple calls.")
existing_group_size = tp_group_world_size(existing_group, dist_module)
new_group_size = tp_group_world_size(tp_group, dist_module)
if existing_group_size != new_group_size:
raise ValueError("Conflicting tp_model_init arguments detected across multiple calls.")
def tp_group_world_size(tp_group, dist_module):
if tp_group is None or dist_module is None:
return None
return dist_module.get_world_size(group=tp_group)
def infer_config_dtype(config_dict: dict) -> Optional[torch.dtype]:
bf16_config = config_dict.get("bf16", {})
if isinstance(bf16_config, dict) and bf16_config.get("enabled", False):
return torch.bfloat16
fp16_config = config_dict.get("fp16", {})
if isinstance(fp16_config, dict) and fp16_config.get("enabled", False):
return torch.float16
return None
def merge_tp_model_init_into_config(config_dict: dict, mpu, mesh_param, dist_module):
if _TP_MODEL_INIT_ARGS is None:
return
tp_size = _TP_MODEL_INIT_ARGS["tp_size"]
dtype = _TP_MODEL_INIT_ARGS["dtype"]
tp_group = _TP_MODEL_INIT_ARGS["tp_group"]
if tp_group is not None and mpu is not None:
raise ValueError("tp_model_init provided tp_group; deepspeed.initialize must not receive mpu.")
if tp_group is None and mpu is None and mesh_param is None:
# Auto-create TP groups for compatibility with HF Trainer (mpu is not passed).
from deepspeed.utils import groups
groups._init_tp_mesh_device(tensor_model_parallel_size=tp_size)
tp_section = config_dict.get("tensor_parallel")
if tp_section is None:
tp_section = {}
config_dict["tensor_parallel"] = tp_section
config_autotp_size = tp_section.get("autotp_size")
if config_autotp_size is not None and config_autotp_size != tp_size:
raise ValueError(
f"Conflicting tensor_parallel.autotp_size in config ({config_autotp_size}) and tp_model_init ({tp_size}).")
if config_autotp_size is None:
tp_section["autotp_size"] = tp_size
tp_config = tp_section.get("tp") or {}
if not isinstance(tp_config, dict):
raise ValueError("tensor_parallel.tp must be a dict when provided.")
config_tp_size = tp_config.get("tp_size")
if config_tp_size is not None and config_tp_size != tp_size:
raise ValueError(
f"Conflicting tensor_parallel.tp.tp_size in config ({config_tp_size}) and tp_model_init ({tp_size}).")
if config_tp_size is None:
tp_config["tp_size"] = tp_size
if tp_group is not None:
config_tp_group = tp_config.get("tp_group")
if config_tp_group is not None and config_tp_group is not tp_group:
raise ValueError("Conflicting tensor_parallel.tp.tp_group in config and tp_model_init.")
tp_config["tp_group"] = tp_group
tp_group_size = tp_group_world_size(tp_group, dist_module)
if tp_group_size is not None and tp_group_size != tp_size:
raise ValueError(f"tp_model_init tp_size ({tp_size}) does not match tp_group size ({tp_group_size}).")
tp_section["tp"] = tp_config
config_dtype = infer_config_dtype(config_dict)
if config_dtype is not None and config_dtype != dtype:
raise ValueError(f"Conflicting dtype: config uses {config_dtype} but tp_model_init requested {dtype}.")
tp_dtype = tp_section.get("dtype")
if tp_dtype is not None:
if isinstance(tp_dtype, str):
tp_dtype_map = {
"fp16": torch.float16,
"bf16": torch.bfloat16,
"fp32": torch.float32,
}
tp_dtype_value = tp_dtype_map.get(tp_dtype.lower())
else:
tp_dtype_value = tp_dtype
if tp_dtype_value is not None and tp_dtype_value != dtype:
raise ValueError(f"Conflicting tensor_parallel.dtype in config ({tp_dtype}) and tp_model_init ({dtype}).")
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/tensor_parallel/init_utils.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:tests/unit/model_parallelism/test_autotp_custom_patterns.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed.comm as dist
import deepspeed
from copy import deepcopy
from torch import nn
from unit.common import DistributedTest, preferred_dtype
from deepspeed.accelerator import get_accelerator
from deepspeed.utils import groups
from deepspeed.module_inject.layers import (LinearAllreduce, LinearLayer, SubParamLinearLayer, fused_LinearLayer)
from deepspeed.module_inject.autotp_config import AutoTPConfig
from deepspeed.module_inject.auto_tp import AutoTP
def skip_on_device():
if get_accelerator().device_name() == 'xpu':
pytest.skip("XPU requires a higher version for test")
class SequentialLinearModel(torch.nn.Module):
def __init__(self, hidden_dim, nlayers=1):
super(SequentialLinearModel, self).__init__()
self.linears = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim) for _ in range(nlayers)])
def forward(self, x):
for layer in self.linears:
x = layer(x)
return x
class CustomLinearModule(torch.nn.Module):
def __init__(self, hidden_dim):
super(CustomLinearModule, self).__init__()
self.weight = torch.nn.Parameter(torch.empty(hidden_dim, hidden_dim))
self.bias = torch.nn.Parameter(torch.empty(hidden_dim))
torch.nn.init.uniform_(self.weight, -0.02, 0.02)
torch.nn.init.uniform_(self.bias, -0.02, 0.02)
def forward(self, x):
return torch.matmul(x, self.weight.transpose(-1, -2)) + self.bias
class CustomLinearModel(torch.nn.Module):
def __init__(self, hidden_dim):
super(CustomLinearModel, self).__init__()
self.custom = CustomLinearModule(hidden_dim)
def forward(self, x):
return self.custom(x)
class QKVLinearModule(torch.nn.Module):
def __init__(self, hidden_dim):
super(QKVLinearModule, self).__init__()
self.qkv_proj = torch.nn.Linear(hidden_dim, hidden_dim * 3)
def forward(self, x):
return self.qkv_proj(x)
class QKVLinearModel(torch.nn.Module):
def __init__(self, hidden_dim):
super(QKVLinearModel, self).__init__()
self.self_attn = QKVLinearModule(hidden_dim)
def forward(self, x):
return self.self_attn(x)
def init_tp_engine(tp_size, partition_config=None):
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-6
}
},
"tensor_parallel": {
"autotp_size": tp_size,
},
"zero_optimization": {
"stage": 0,
}
}
if partition_config is not None:
config_dict["tensor_parallel"]["partition_config"] = partition_config
else:
config_dict["tensor_parallel"]["partition_config"] = {
"use_default_specs": False,
"layer_specs": [{
"patterns": [".*\\.weight$"],
"partition_type": "skip",
}],
}
if preferred_dtype() is torch.float16:
config_dict["fp16"] = {"enabled": True}
elif preferred_dtype() is torch.bfloat16:
config_dict["bf16"] = {"enabled": True}
model = SequentialLinearModel(hidden_dim=8)
deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
def apply_autotp_with_partition_config(model, tp_size, partition_config):
groups._init_tp_mesh_device(tensor_model_parallel_size=tp_size)
autotp_config = AutoTPConfig.from_dict(partition_config)
autotp = AutoTP(module=model,
all_reduce_linears=[],
prefix="",
state_dict=None,
linear_layer_setting=None,
orig_layer_impl=None,
keep_module_on_host=False,
partition_config=autotp_config)
autotp.set_tensor_parallel_config(tp_size, groups.get_tensor_model_parallel_group())
autotp.update_linear_policies()
autotp._replace_module(model)
return model
def gather_subparam_output(output, subparam_sizes, mp_group):
tp_world_size = dist.get_world_size(group=mp_group)
local_sizes = [size // tp_world_size for size in subparam_sizes]
output_chunks = torch.split(output, local_sizes, dim=-1)
gathered_chunks = []
for chunk in output_chunks:
chunk = chunk.contiguous()
gathered = [torch.empty_like(chunk) for _ in range(tp_world_size)]
dist.all_gather(gathered, chunk, group=mp_group)
gathered_chunks.append(torch.cat(gathered, dim=-1))
return torch.cat(gathered_chunks, dim=-1)
def assert_close_for_preferred_dtype(actual, expected):
atol = 1e-3
rtol = 2e-2
if preferred_dtype() is torch.float32:
atol = 1e-5
rtol = 1e-5
torch.testing.assert_close(actual, expected, atol=atol, rtol=rtol)
class TestAutoTPCustomPatterns(DistributedTest):
world_size = 2
reuse_dist_env = False
def test_custom_pattern_replacement(self):
skip_on_device()
partition_config = {
"use_default_specs":
False,
"layer_specs": [
{
"patterns": [".*linears\\.0\\.weight$"],
"partition_type": "row",
},
{
"patterns": [".*linears\\.1\\.weight$"],
"partition_type": "column",
},
{
"patterns": [".*linears\\.2\\.weight$"],
"partition_type": "skip",
},
],
}
model = SequentialLinearModel(hidden_dim=16, nlayers=3)
model = apply_autotp_with_partition_config(model, tp_size=2, partition_config=partition_config)
assert isinstance(model.linears[0], LinearAllreduce)
assert isinstance(model.linears[1], LinearLayer)
assert isinstance(model.linears[2], nn.Linear)
def test_custom_patterns_applied_via_config(self):
skip_on_device()
partition_config = {
"use_default_specs":
False,
"layer_specs": [
{
"patterns": [".*linears\\.0\\.weight$"],
"partition_type": "row",
},
{
"patterns": [".*linears\\.1\\.weight$"],
"partition_type": "column",
},
{
"patterns": [".*linears\\.2\\.weight$"],
"partition_type": "skip",
},
],
}
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-6
}
},
"tensor_parallel": {
"autotp_size": 2,
"partition_config": partition_config,
},
"zero_optimization": {
"stage": 0,
}
}
if preferred_dtype() is torch.float16:
config_dict["fp16"] = {"enabled": True}
elif preferred_dtype() is torch.bfloat16:
config_dict["bf16"] = {"enabled": True}
model = SequentialLinearModel(hidden_dim=16, nlayers=3)
engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
assert isinstance(engine.module.linears[0], LinearAllreduce)
assert isinstance(engine.module.linears[1], LinearLayer)
assert isinstance(engine.module.linears[2], nn.Linear)
def test_use_default_specs_false_skips_unmatched_layers(self):
skip_on_device()
# Verify unmatched layers remain unsharded when defaults are disabled.
partition_config = {
"use_default_specs":
False,
"layer_specs": [
{
"patterns": [".*linears\\.0\\.weight$"],
"partition_type": "row",
},
{
"patterns": [".*linears\\.1\\.weight$"],
"partition_type": "column",
},
],
}
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-6
}
},
"tensor_parallel": {
"autotp_size": 2,
"partition_config": partition_config,
},
"zero_optimization": {
"stage": 0,
}
}
if preferred_dtype() is torch.float16:
config_dict["fp16"] = {"enabled": True}
elif preferred_dtype() is torch.bfloat16:
config_dict["bf16"] = {"enabled": True}
model = SequentialLinearModel(hidden_dim=16, nlayers=3)
engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
assert isinstance(engine.module.linears[0], LinearAllreduce)
assert isinstance(engine.module.linears[1], LinearLayer)
assert isinstance(engine.module.linears[2], nn.Linear)
def test_custom_module_replacement_with_patterns(self):
skip_on_device()
# Verify custom linear-like modules are partitioned via patterns.
partition_config = {
"use_default_specs": False,
"layer_specs": [
{
"patterns": [".*custom\\.weight$"],
"partition_type": "column",
},
],
}
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-6
}
},
"tensor_parallel": {
"autotp_size": 2,
"partition_config": partition_config,
},
"zero_optimization": {
"stage": 0,
}
}
if preferred_dtype() is torch.float16:
config_dict["fp16"] = {"enabled": True}
elif preferred_dtype() is torch.bfloat16:
config_dict["bf16"] = {"enabled": True}
model = CustomLinearModel(hidden_dim=16)
engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
assert isinstance(engine.module.custom, LinearLayer)
def test_custom_pattern_disables_fused_qkv_heuristic(self):
skip_on_device()
# Use a qkv_proj name that would trigger the fused-QKV heuristic, then
# verify custom patterns override that path and preserve correctness.
torch.manual_seed(1234)
hidden_dim = 16
qkv_sizes = (hidden_dim, hidden_dim, hidden_dim)
partition_config = {
"use_default_specs":
False,
"layer_specs": [
{
"patterns": [".*self_attn\\.qkv_proj\\.weight$"],
"partition_type": "column",
"shape": [list(qkv_sizes), -1],
"partition_dim": 0,
},
],
}
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-6
}
},
"tensor_parallel": {
"autotp_size": 2,
"partition_config": partition_config,
},
"zero_optimization": {
"stage": 0,
}
}
if preferred_dtype() is torch.float16:
config_dict["fp16"] = {"enabled": True}
elif preferred_dtype() is torch.bfloat16:
config_dict["bf16"] = {"enabled": True}
model = QKVLinearModel(hidden_dim=hidden_dim)
baseline = deepcopy(model).to(get_accelerator().current_device(), dtype=preferred_dtype())
engine, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
qkv_layer = engine.module.self_attn.qkv_proj
# Custom pattern should force SubParamLinearLayer (shape-based path),
# and avoid the legacy fused-QKV heuristic despite the qkv_proj name.
assert isinstance(qkv_layer, SubParamLinearLayer)
assert not isinstance(qkv_layer, fused_LinearLayer)
assert qkv_layer.partition_dim == 0
assert qkv_layer._subparam_sizes == qkv_sizes
assert qkv_layer._orig_weight_shape == (hidden_dim * 3, hidden_dim)
qkv_layer.gather_params([qkv_layer.weight, qkv_layer.bias])
torch.testing.assert_close(qkv_layer.weight, baseline.self_attn.qkv_proj.weight)
if qkv_layer.bias is not None:
torch.testing.assert_close(qkv_layer.bias, baseline.self_attn.qkv_proj.bias)
torch.manual_seed(4321)
inputs = torch.randn(2, hidden_dim, dtype=preferred_dtype(), device=get_accelerator().current_device())
full_output = baseline(inputs)
tp_output = engine.module(inputs)
assert_close_for_preferred_dtype(tp_output, full_output)
def test_first_match_precedence(self):
skip_on_device()
partition_config = {
"use_default_specs":
False,
"layer_specs": [
{
"patterns": [".*linears\\.0\\.weight$"],
"partition_type": "skip",
},
{
"patterns": [".*linears\\.0\\.weight$"],
"partition_type": "column",
},
],
}
model = SequentialLinearModel(hidden_dim=16, nlayers=1)
model = apply_autotp_with_partition_config(model, tp_size=2, partition_config=partition_config)
assert isinstance(model.linears[0], nn.Linear)
def test_invalid_custom_shape_rejected():
bad_config = {
"layer_specs": [{
"patterns": [".*"],
"partition_type": "column",
"shape": [2, [1, 1]],
}]
}
with pytest.raises(ValueError, match="nested tuple only allowed at partition_dim"):
AutoTPConfig.from_dict(bad_config)
class TestAutoTPFusedWeights(DistributedTest):
world_size = 2
reuse_dist_env = False
def test_gate_up_fused_weight_partition(self):
skip_on_device()
init_tp_engine(tp_size=2)
hidden_dim = 8
torch.manual_seed(42)
linear = nn.Linear(hidden_dim,
hidden_dim * 2,
bias=True,
dtype=preferred_dtype(),
device=get_accelerator().current_device())
full_weight = deepcopy(linear.weight.data)
full_bias = deepcopy(linear.bias.data)
layer = SubParamLinearLayer(deepcopy(linear),
groups.get_tensor_model_parallel_group(),
shape=(2, -1),
partition_dim=0,
name="mlp.gate_up_proj")
assert layer._subparam_sizes == (hidden_dim, hidden_dim)
assert layer.weight.shape == (hidden_dim, hidden_dim)
layer.gather_params([layer.weight, layer.bias])
torch.testing.assert_close(layer.weight.data, full_weight)
torch.testing.assert_close(layer.bias.data, full_bias)
def test_gqa_uneven_qkv_fused_weight_partition(self):
skip_on_device()
init_tp_engine(tp_size=2)
hidden_dim = 8
q_size, k_size, v_size = 8, 4, 4
torch.manual_seed(123)
linear = nn.Linear(hidden_dim,
q_size + k_size + v_size,
bias=True,
dtype=preferred_dtype(),
device=get_accelerator().current_device())
full_weight = deepcopy(linear.weight.data)
full_bias = deepcopy(linear.bias.data)
layer = SubParamLinearLayer(deepcopy(linear),
groups.get_tensor_model_parallel_group(),
shape=((q_size, k_size, v_size), -1),
partition_dim=0,
name="self_attn.qkv_proj")
assert layer._subparam_sizes == (q_size, k_size, v_size)
assert layer.weight.shape == ((q_size + k_size + v_size) // 2, hidden_dim)
layer.gather_params([layer.weight, layer.bias])
torch.testing.assert_close(layer.weight.data, full_weight)
torch.testing.assert_close(layer.bias.data, full_bias)
def test_gqa_uneven_qkv_fused_forward(self):
skip_on_device()
groups._init_tp_mesh_device(tensor_model_parallel_size=2)
hidden_dim = 8
q_size, k_size, v_size = 8, 4, 4
torch.manual_seed(321)
linear = nn.Linear(hidden_dim,
q_size + k_size + v_size,
bias=True,
dtype=preferred_dtype(),
device=get_accelerator().current_device())
layer = SubParamLinearLayer(deepcopy(linear),
groups.get_tensor_model_parallel_group(),
shape=((q_size, k_size, v_size), -1),
partition_dim=0,
name="self_attn.qkv_proj")
torch.manual_seed(42)
inputs = torch.randn(2, hidden_dim, dtype=preferred_dtype(), device=get_accelerator().current_device())
full_output = linear(inputs)
tp_output = layer(inputs)
gathered_output = gather_subparam_output(tp_output, (q_size, k_size, v_size),
groups.get_tensor_model_parallel_group())
assert_close_for_preferred_dtype(gathered_output, full_output)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/model_parallelism/test_autotp_custom_patterns.py",
"license": "Apache License 2.0",
"lines": 427,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:deepspeed/runtime/comm/utils.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import Optional
import torch
def check_and_handle_empty_buffer(
buffer_m: torch.Tensor,
original_shape: torch.Size,
original_size: int,
worker_error: torch.Tensor,
server_error: torch.Tensor,
) -> Optional[torch.Tensor]:
if original_size == 0:
if worker_error.numel():
worker_error.zero_()
if server_error.numel():
server_error.zero_()
if len(original_shape) > 1:
return buffer_m.reshape(original_shape)
return buffer_m
return None
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/comm/utils.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:tests/unit/v1/half_precision/test_with_autocast.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
import pytest
from unit.common import DistributedTest, allclose_on_all_ranks
from deepspeed.ops.op_builder import CPUAdamBuilder
from unit.simple_model import SimpleModel, random_dataloader
from unit.util import bf16_required_version_check
from deepspeed.accelerator import get_accelerator
from unit.v1.zero.test_zero_user_backward import (initialize_distributed, create_ddp_model, collect_ddp_gradients,
collect_gradients_safe, compare_gradients)
class TestTorchAutocastWithPrecisionModes(DistributedTest):
world_size = 2
@pytest.mark.parametrize("precision_mode,zero_stage", [
pytest.param("bf16_full", 1, id="z1_bf16_full_autocast"),
pytest.param("bf16_full", 2, id="z2_bf16_full_autocast"),
pytest.param("bf16_full", 3, id="z3_bf16_full_autocast"),
])
def test_gradients_match_ddp_with_autocast(self, precision_mode, zero_stage):
"""Test BF16 with torch_autocast by comparing gradients with DDP baseline."""
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
hidden_dim = 6
lr = 1e-3
seed = 123
device, rank, dtype = initialize_distributed()
# Create DDP baseline with torch.autocast
model_ddp, optimizer_ddp = create_ddp_model(SimpleModel,
device,
rank,
dtype,
seed=seed,
lr=lr,
hidden_dim=hidden_dim,
nlayers=2)
torch.manual_seed(seed)
ds_model = SimpleModel(hidden_dim, nlayers=2)
# BF16 configuration
autocast_dtype = torch.bfloat16
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": lr
}
},
"torch_autocast": {
"enabled": True,
"dtype": str(autocast_dtype)
},
"bf16": {
"enabled": True,
"bf16_master_weights_and_grads": True,
"bf16_optimizer_states": True
},
"zero_optimization": {
"stage": zero_stage
}
}
engine, _, _, _ = deepspeed.initialize(config=config_dict,
model=ds_model,
model_parameters=ds_model.parameters())
data_loader = random_dataloader(model=engine,
total_samples=8,
hidden_dim=hidden_dim,
device=device,
dtype=torch.bfloat16)
batch = next(iter(data_loader))
# DDP with torch.autocast
optimizer_ddp.zero_grad()
with torch.autocast(device_type=get_accelerator().device_name(), dtype=autocast_dtype, enabled=True):
loss_ddp = model_ddp(batch[0], batch[1])
loss_ddp.backward()
grads_ddp = collect_ddp_gradients(model_ddp)
# DeepSpeed with torch_autocast config
loss_ds = engine(batch[0], batch[1])
engine.backward(loss_ds)
grads_ds = collect_gradients_safe(engine)
compare_gradients(grads_ddp, grads_ds, step_info=f"precision_mode={precision_mode}, zero_stage={zero_stage}")
# Verify parameters have correct comm_dtype attribute for autocast
from deepspeed.runtime.torch_autocast import has_comm_dtype, get_comm_dtype
for name, param in engine.module.named_parameters():
if "weight" in name:
# Linear layer weights should have comm_dtype set
assert has_comm_dtype(param), f"Parameter {name} should have comm_dtype attribute"
assert get_comm_dtype(param) == autocast_dtype, \
f"Parameter {name} comm_dtype should be {autocast_dtype}, got {get_comm_dtype(param)}"
optimizer_ddp.step()
engine.step()
optimizer_ddp.zero_grad()
engine.zero_grad()
engine.destroy()
@pytest.mark.parametrize("precision_mode,zero_stage", [
pytest.param("fp16_master_wg", 2, id="z2_fp16_master_wg_autocast"),
pytest.param("fp16_master_wg", 3, id="z3_fp16_master_wg_autocast"),
])
def test_parameters_match_ddp_after_step(self, precision_mode, zero_stage):
"""Test that parameters match DDP after a training step.
Note: This test is for FP16 where gradients are scaled and hard to compare.
"""
if not bf16_required_version_check():
pytest.skip(
" DeepSpeed BFloat16 tests need torch >= 1.10, NCCL >= 2.10.3, CUDA > =11.0 and HW support for BFloat16 to run correctly"
)
# FP16 mode requires CPU offload
if precision_mode == "fp16_master_wg" and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
# FP16 mode requires FP16 support
if precision_mode == "fp16_master_wg" and not get_accelerator().is_fp16_supported():
pytest.skip("fp16 is not supported")
hidden_dim = 6
lr = 1e-3
seed = 123
device, rank, dtype = initialize_distributed()
# For fp16 mode with autocast, use float32 model parameters
# For bf16 mode, use bfloat16 model parameters
model_dtype = torch.float32 if precision_mode == "fp16_master_wg" else dtype
# Create DDP baseline with torch.autocast
model_ddp, optimizer_ddp = create_ddp_model(SimpleModel,
device,
rank,
model_dtype,
seed=seed,
lr=lr,
hidden_dim=hidden_dim,
nlayers=2)
torch.manual_seed(seed)
ds_model = SimpleModel(hidden_dim, nlayers=2)
# Configure based on precision mode
if precision_mode == "bf16_full":
autocast_dtype = torch.bfloat16
precision_config = {
"bf16": {
"enabled": True,
"bf16_master_weights_and_grads": True,
"bf16_optimizer_states": True
}
}
zero_config = {"stage": zero_stage}
data_dtype = torch.bfloat16
use_grad_scaler = False
else: # fp16_master_wg
autocast_dtype = torch.float16
precision_config = {"fp16": {"enabled": True, "fp16_master_weights_and_grads": True}}
zero_config = {"stage": zero_stage, "offload_optimizer": {"device": "cpu"}}
data_dtype = torch.float16
use_grad_scaler = True
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": lr
}
},
"torch_autocast": {
"enabled": True,
"dtype": str(autocast_dtype)
},
"zero_optimization": zero_config,
**precision_config
}
engine, _, _, _ = deepspeed.initialize(config=config_dict,
model=ds_model,
model_parameters=ds_model.parameters())
data_loader = random_dataloader(model=engine,
total_samples=8,
hidden_dim=hidden_dim,
device=device,
dtype=data_dtype)
batch = next(iter(data_loader))
# DDP with torch.autocast and optional GradScaler for fp16
if use_grad_scaler:
scaler = torch.amp.GradScaler()
optimizer_ddp.zero_grad()
with torch.autocast(device_type=get_accelerator().device_name(), dtype=autocast_dtype, enabled=True):
loss_ddp = model_ddp(batch[0], batch[1])
if use_grad_scaler:
scaler.scale(loss_ddp).backward()
scaler.step(optimizer_ddp)
scaler.update()
else:
loss_ddp.backward()
optimizer_ddp.step()
# DeepSpeed with torch_autocast config
loss_ds = engine(batch[0], batch[1])
engine.backward(loss_ds)
engine.step()
# Compare parameters after the optimizer step
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
for (name_ddp, param_ddp), (name_ds, param_ds) in zip(model_ddp.named_parameters(),
engine.module.named_parameters()):
# Remove 'module.' prefix from both for comparison
name_ddp_clean = name_ddp.replace('module.', '')
name_ds_clean = name_ds.replace('module.', '')
assert name_ddp_clean == name_ds_clean, f"Parameter name mismatch: {name_ddp_clean} vs {name_ds_clean}"
# Get full parameter for ZeRO stage 3
if hasattr(param_ds, 'ds_status') and param_ds.ds_status == ZeroParamStatus.NOT_AVAILABLE:
with deepspeed.zero.GatheredParameters([param_ds], modifier_rank=0):
param_ds_full = param_ds.detach().clone().cpu().float()
else:
param_ds_full = param_ds.detach().clone().cpu().float()
param_ddp_full = param_ddp.detach().clone().cpu().float()
# Use allclose_on_all_ranks for comparison
allclose_on_all_ranks(
param_ddp_full,
param_ds_full,
rtol=1e-3,
atol=1e-3,
assert_message=
f"Parameters differ for {name_ddp_clean} at precision_mode={precision_mode}, zero_stage={zero_stage}")
# Verify parameters have correct comm_dtype attribute for autocast
from deepspeed.runtime.torch_autocast import has_comm_dtype, get_comm_dtype
for name, param in engine.module.named_parameters():
if "weight" in name:
# Linear layer weights should have comm_dtype set
assert has_comm_dtype(param), f"Parameter {name} should have comm_dtype attribute"
assert get_comm_dtype(param) == autocast_dtype, \
f"Parameter {name} comm_dtype should be {autocast_dtype}, got {get_comm_dtype(param)}"
engine.destroy()
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/v1/half_precision/test_with_autocast.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:tests/unit/v1/zero/test_zero_user_backward.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed.comm as dist
import deepspeed
from torch.nn.parallel import DistributedDataParallel as DDP
from unit.common import DistributedTest, preferred_dtype, allclose_on_all_ranks
from unit.simple_model import SimpleModel, random_dataloader
from deepspeed.accelerator import get_accelerator
from deepspeed.utils import safe_get_full_grad
class SimpleNonScalarModel(torch.nn.Module):
"""Model that returns non-scalar output for testing tensor.backward(grad)"""
def __init__(self, hidden_dim):
super().__init__()
self.linear1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
def forward(self, x):
# Returns non-scalar output
x = self.linear1(x)
x = self.linear2(x)
return x
class SimpleOutputModel(torch.nn.Module):
"""Model that returns output without computing loss"""
def __init__(self, hidden_dim):
super().__init__()
self.linear1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
def get_config_dict(zero_stage, gradient_accumulation_steps=1):
"""Helper to create config dict with common settings"""
config_dict = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": gradient_accumulation_steps,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
}
if zero_stage == 3:
# For ZeRO-3, force partitioning of all parameters
config_dict["zero_optimization"]["stage3_param_persistence_threshold"] = 0
if get_accelerator().is_bf16_supported():
config_dict["bf16"] = {"enabled": True}
elif get_accelerator().is_fp16_supported():
config_dict["fp16"] = {"enabled": True, "initial_scale_power": 8}
return config_dict
def collect_gradients_safe(model):
"""Collect gradients from model parameters using safe_get_full_grad API"""
grads = {}
for name, param in model.named_parameters():
if param.requires_grad:
grad = safe_get_full_grad(param)
if grad is not None:
# Remove 'module.' prefix if present (DeepSpeed wraps the model)
clean_name = name.replace('module.', '')
grads[clean_name] = grad.detach().clone().cpu()
return grads
def initialize_distributed():
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
device = get_accelerator().current_device_name()
rank = get_accelerator().current_device()
dtype = preferred_dtype()
return device, rank, dtype
def create_ddp_model(model_class, device, rank, dtype, seed=42, lr=1e-3, **model_kwargs):
torch.manual_seed(seed)
model = model_class(**model_kwargs)
model = model.to(device=device, dtype=dtype)
model = DDP(model, device_ids=[rank], output_device=rank)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
return model, optimizer
def create_deepspeed_engine(model_class, zero_stage, seed=42, gradient_accumulation_steps=1, **model_kwargs):
torch.manual_seed(seed)
model = model_class(**model_kwargs)
config = get_config_dict(zero_stage, gradient_accumulation_steps=gradient_accumulation_steps)
engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters())
return engine
def create_deepspeed_engine_from_model(model, zero_stage, gradient_accumulation_steps=1):
config = get_config_dict(zero_stage, gradient_accumulation_steps=gradient_accumulation_steps)
engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters())
return engine
def setup_models_and_engines(model_class, zero_stage, seed=42, lr=1e-3, gradient_accumulation_steps=1, **model_kwargs):
# Initialize distributed environment
device, rank, dtype = initialize_distributed()
# Create DDP model
model_ddp, optimizer_ddp = create_ddp_model(model_class, device, rank, dtype, seed=seed, lr=lr, **model_kwargs)
# Create DeepSpeed engine
model_engine = create_deepspeed_engine(model_class,
zero_stage,
seed=seed,
gradient_accumulation_steps=gradient_accumulation_steps,
**model_kwargs)
return model_ddp, optimizer_ddp, model_engine, device, dtype
def collect_ddp_gradients(model_ddp):
"""Collect gradients from DDP model"""
grads = {}
for name, param in model_ddp.named_parameters():
if param.grad is not None:
clean_name = name.replace('module.', '')
grads[clean_name] = param.grad.detach().clone().cpu()
return grads
def compare_gradients(grads_ddp, grads_ds, step_info=""):
"""Compare gradients between DDP and DeepSpeed.
Uses PyTorch's default tolerances for the tensor dtype (e.g., for bfloat16:
rtol=1.6e-2, atol=1e-5). The 2-layer model keeps differences small enough
to pass with default tolerances even after multiple optimizer steps.
"""
step_suffix = f" at {step_info}" if step_info else ""
assert len(grads_ddp) == len(grads_ds), \
f"Different number of parameters with gradients{step_suffix}: DDP={len(grads_ddp)}, DeepSpeed={len(grads_ds)}"
for name in grads_ddp.keys():
assert name in grads_ds, f"Parameter {name} missing in DeepSpeed gradients{step_suffix}"
grad_ddp = grads_ddp[name]
grad_ds = grads_ds[name]
# If dtypes differ, convert ds to match ddp's dtype
if grad_ds.dtype != grad_ddp.dtype:
grad_ds = grad_ds.to(grad_ddp.dtype)
# Use PyTorch's default tolerances for the dtype
allclose_on_all_ranks(grad_ddp, grad_ds, assert_message=f"Gradients differ for parameter {name}{step_suffix}")
def collect_ddp_parameters(model_ddp):
"""Collect parameters from DDP model"""
params = {}
for name, param in model_ddp.named_parameters():
clean_name = name.replace('module.', '')
params[clean_name] = param.detach().clone().cpu()
return params
def collect_deepspeed_parameters(model_engine, zero_stage):
"""Collect parameters from DeepSpeed engine (handles ZeRO-3 gathering)"""
params = {}
for name, param in model_engine.named_parameters():
clean_name = name.replace('module.', '')
if zero_stage == 3:
with deepspeed.zero.GatheredParameters([param], modifier_rank=None):
params[clean_name] = param.detach().clone().cpu()
else:
params[clean_name] = param.detach().clone().cpu()
return params
def compare_parameters(params_ddp, params_ds, step_info=""):
"""Compare parameters between DDP and DeepSpeed"""
step_suffix = f" at {step_info}" if step_info else ""
assert len(params_ddp) == len(params_ds), \
f"Parameter count mismatch{step_suffix}: DDP={len(params_ddp)}, DeepSpeed={len(params_ds)}"
for name in params_ddp.keys():
assert name in params_ds, f"Parameter {name} missing in DeepSpeed model{step_suffix}"
# Convert to fp32 for comparison in case of dtype mismatch
params_ddp_fp32 = params_ddp[name].float()
params_ds_fp32 = params_ds[name].float()
allclose_on_all_ranks(params_ddp_fp32,
params_ds_fp32,
assert_message=f"Parameter {name} mismatch{step_suffix}")
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
class TestZeroUserBackwardBasic(DistributedTest):
"""Test basic functionality of user backward (loss.backward()) by comparing with PyTorch DDP"""
world_size = 2
def test_loss_backward_matches_ddp(self, zero_stage):
"""Test that DeepSpeed loss.backward() produces same gradients as PyTorch DDP"""
hidden_dim = 4
# Create DDP and DeepSpeed models
model_ddp, optimizer_ddp, model_engine, device, dtype = setup_models_and_engines(model_class=SimpleModel,
zero_stage=zero_stage,
hidden_dim=hidden_dim,
nlayers=2)
# Create data
data_loader = random_dataloader(model=model_engine, total_samples=8, hidden_dim=hidden_dim, device=device)
# Run one training step with both models
batch = next(iter(data_loader))
# DDP: forward and backward
optimizer_ddp.zero_grad()
loss_ddp = model_ddp(batch[0], batch[1])
loss_ddp.backward()
grads_ddp = collect_ddp_gradients(model_ddp)
# DeepSpeed: forward and backward
loss_ds = model_engine(batch[0], batch[1])
loss_ds.backward()
grads_ds = collect_gradients_safe(model_engine)
# Compare gradients
compare_gradients(grads_ddp, grads_ds)
model_engine.destroy()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
class TestZeroUserBackwardNonScalar(DistributedTest):
"""Test non-scalar backward support"""
world_size = 2
def test_non_scalar_backward(self, zero_stage):
"""Test that tensor.backward(grad) works correctly by comparing with PyTorch DDP"""
hidden_dim = 4
batch_size = 2
# Create DDP and DeepSpeed models
model_ddp, optimizer_ddp, model_engine, device, dtype = setup_models_and_engines(
model_class=SimpleNonScalarModel, zero_stage=zero_stage, hidden_dim=hidden_dim)
# Create input data
torch.manual_seed(123)
x = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype)
# DDP: forward and non-scalar backward
optimizer_ddp.zero_grad()
output_ddp = model_ddp(x)
grad_output = torch.ones_like(output_ddp)
output_ddp.backward(grad_output)
ddp_grads = collect_ddp_gradients(model_ddp)
# DeepSpeed: forward and non-scalar backward
output_deepspeed = model_engine(x)
grad_output_ds = torch.ones_like(output_deepspeed)
output_deepspeed.backward(grad_output_ds)
deepspeed_grads = collect_gradients_safe(model_engine)
# Compare gradients
compare_gradients(ddp_grads, deepspeed_grads, "after non-scalar backward")
# Run optimizer step
optimizer_ddp.step()
model_engine.step()
# Collect and compare parameters after step
ddp_params = collect_ddp_parameters(model_ddp)
deepspeed_params = collect_deepspeed_parameters(model_engine, zero_stage)
compare_parameters(ddp_params, deepspeed_params, "after non-scalar backward")
model_engine.destroy()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
class TestZeroUserBackwardGradAccumulation(DistributedTest):
"""Test gradient accumulation with user backward"""
world_size = 2
def test_grad_accumulation(self, zero_stage):
"""Test that gradient accumulation works correctly with loss.backward() by comparing with DDP"""
hidden_dim = 4
gradient_accumulation_steps = 4
# Create DDP and DeepSpeed models with gradient accumulation
model_ddp, optimizer_ddp, model_engine, device, _ = setup_models_and_engines(
model_class=SimpleModel,
zero_stage=zero_stage,
gradient_accumulation_steps=gradient_accumulation_steps,
hidden_dim=hidden_dim,
nlayers=2)
# Create data
data_loader = random_dataloader(model=model_engine, total_samples=16, hidden_dim=hidden_dim, device=device)
# Run training with gradient accumulation
for i, batch in enumerate(data_loader):
# DDP: Manual gradient accumulation
loss_ddp = model_ddp(batch[0], batch[1])
(loss_ddp / gradient_accumulation_steps).backward()
# DeepSpeed: Built-in gradient accumulation
loss_ds = model_engine(batch[0], batch[1])
loss_ds.backward()
# Compare gradients at accumulation boundary
if model_engine.is_gradient_accumulation_boundary():
grads_ddp = collect_ddp_gradients(model_ddp)
grads_ds = collect_gradients_safe(model_engine)
compare_gradients(grads_ddp, grads_ds, f"step {i}")
# Step both optimizers
optimizer_ddp.step()
optimizer_ddp.zero_grad()
# Step DeepSpeed (handles gradient accumulation internally)
model_engine.step()
model_engine.destroy()
def test_grad_accumulation_scale_wrt_gas_false(self, zero_stage):
"""Test that scale_wrt_gas=False disables gradient scaling by accumulation steps.
When scale_wrt_gas=False is passed to engine.backward(), gradients should NOT be
scaled by gradient_accumulation_steps. This is useful when users want to handle
gradient scaling themselves (e.g., using Hugging Face Accelerate).
"""
hidden_dim = 4
gradient_accumulation_steps = 4
# Create DDP and DeepSpeed models with gradient accumulation
model_ddp, optimizer_ddp, model_engine, device, _ = setup_models_and_engines(
model_class=SimpleModel,
zero_stage=zero_stage,
gradient_accumulation_steps=gradient_accumulation_steps,
hidden_dim=hidden_dim,
nlayers=2)
# Create data
data_loader = random_dataloader(model=model_engine, total_samples=16, hidden_dim=hidden_dim, device=device)
# Run training with gradient accumulation but WITHOUT scaling by GAS
for i, batch in enumerate(data_loader):
# DDP: Do NOT divide by GAS (since we're testing scale_wrt_gas=False)
loss_ddp = model_ddp(batch[0], batch[1])
loss_ddp.backward()
# DeepSpeed: Use scale_wrt_gas=False to disable gradient scaling
loss_ds = model_engine(batch[0], batch[1])
model_engine.backward(loss_ds, scale_wrt_gas=False)
# Compare gradients at accumulation boundary
if model_engine.is_gradient_accumulation_boundary():
grads_ddp = collect_ddp_gradients(model_ddp)
grads_ds = collect_gradients_safe(model_engine)
compare_gradients(grads_ddp, grads_ds, f"step {i} with scale_wrt_gas=False")
# Step both optimizers
optimizer_ddp.step()
optimizer_ddp.zero_grad()
# Step DeepSpeed (handles gradient accumulation internally)
model_engine.step()
model_engine.destroy()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
class TestZeroUserBackwardMultipleEngines(DistributedTest):
"""Test multiple DeepSpeed engines with combined loss without manual _backward_epilogue()"""
world_size = 2
def test_multiple_engines_combined_loss(self, zero_stage):
"""Test that multiple engines work with combined loss.backward() without manual _backward_epilogue()
This test compares the behavior with PyTorch DDP baseline to ensure correctness.
"""
hidden_dim = 4
batch_size = 2
num_models = 3
lr = 1e-3
# Initialize distributed
device, rank, dtype = initialize_distributed()
# Create DDP baseline models
ddp_models = []
ddp_optimizers = []
for i in range(num_models):
model, optimizer = create_ddp_model(SimpleModel,
device,
rank,
dtype,
seed=42 + i,
lr=lr,
hidden_dim=hidden_dim,
nlayers=2)
ddp_models.append(model)
ddp_optimizers.append(optimizer)
# Create multiple DeepSpeed engines with identical initialization
model_engines = []
for i in range(num_models):
engine = create_deepspeed_engine(SimpleModel, zero_stage, seed=42 + i, hidden_dim=hidden_dim, nlayers=2)
model_engines.append(engine)
# Create same input for all models
torch.manual_seed(123)
x = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype)
y = torch.randint(0, hidden_dim, (batch_size, ), device=device)
# DDP baseline: compute losses and combined backward
for optimizer in ddp_optimizers:
optimizer.zero_grad()
ddp_losses = []
for model in ddp_models:
loss = model(x, y)
ddp_losses.append(loss)
ddp_combined_loss = sum(l / (i + 1) for i, l in enumerate(ddp_losses))
ddp_combined_loss.backward()
# Collect DDP gradients for each model
ddp_grads_per_model = [collect_ddp_gradients(model) for model in ddp_models]
# DeepSpeed: compute losses and combined backward WITHOUT manual _backward_epilogue()
ds_losses = [engine(x, y) for engine in model_engines]
ds_combined_loss = sum(l / (i + 1) for i, l in enumerate(ds_losses))
ds_combined_loss.backward()
# Collect DeepSpeed gradients for each engine and compare with DDP
for engine_idx, engine in enumerate(model_engines):
ds_grads = collect_gradients_safe(engine)
ddp_grads = ddp_grads_per_model[engine_idx]
assert len(ds_grads) > 0, f"Engine {engine_idx} has no gradients after combined_loss.backward()"
compare_gradients(ddp_grads, ds_grads, f"Engine {engine_idx}")
# Step all DDP models
for optimizer in ddp_optimizers:
optimizer.step()
optimizer.zero_grad()
# Step all DeepSpeed engines
for engine in model_engines:
engine.step()
engine.optimizer.zero_grad()
# Run another iteration to ensure everything still works
torch.manual_seed(456)
x2 = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype)
y2 = torch.randint(0, hidden_dim, (batch_size, ), device=device)
# DDP second iteration
ddp_losses2 = [model(x2, y2) for model in ddp_models]
ddp_combined_loss2 = sum(l / (i + 1) for i, l in enumerate(ddp_losses2))
ddp_combined_loss2.backward()
ddp_grads_per_model2 = [collect_ddp_gradients(model) for model in ddp_models]
# DeepSpeed second iteration
ds_losses2 = [engine(x2, y2) for engine in model_engines]
ds_combined_loss2 = sum(l / (i + 1) for i, l in enumerate(ds_losses2))
ds_combined_loss2.backward()
# Verify gradients again and compare with DDP
for engine_idx, engine in enumerate(model_engines):
ds_grads = collect_gradients_safe(engine)
ddp_grads = ddp_grads_per_model2[engine_idx]
assert len(ds_grads) > 0, f"Engine {engine_idx} has no gradients in second iteration"
compare_gradients(ddp_grads, ds_grads, f"Engine {engine_idx} (iter 2)")
# Step both
for optimizer in ddp_optimizers:
optimizer.step()
for engine in model_engines:
engine.step()
# Cleanup
for engine in model_engines:
engine.destroy()
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
class TestZeroUserBackwardSeparateLoss(DistributedTest):
"""Test using separate loss functions"""
world_size = 2
def test_separate_loss_function(self, zero_stage):
"""Test that separate loss function works correctly by comparing with PyTorch DDP"""
hidden_dim = 4
batch_size = 2
# Create DDP and DeepSpeed models
model_ddp, optimizer_ddp, model_engine, device, dtype = setup_models_and_engines(model_class=SimpleOutputModel,
zero_stage=zero_stage,
hidden_dim=hidden_dim)
# Define loss function separately
loss_fn = torch.nn.CrossEntropyLoss()
# Create input data
torch.manual_seed(456)
x = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype)
y = torch.randint(0, hidden_dim, (batch_size, ), device=device)
# DDP: forward, loss, backward
optimizer_ddp.zero_grad()
output_ddp = model_ddp(x)
loss_ddp = loss_fn(output_ddp, y)
loss_ddp.backward()
grads_ddp = collect_ddp_gradients(model_ddp)
# DeepSpeed: forward, loss, backward
output_ds = model_engine(x)
loss_ds = loss_fn(output_ds, y)
loss_ds.backward()
grads_ds = collect_gradients_safe(model_engine)
# Compare gradients
compare_gradients(grads_ddp, grads_ds)
model_engine.destroy()
class LeafModuleModel(torch.nn.Module):
"""Model with ModuleList that uses all parameters - for testing leaf module compatibility"""
def __init__(self, hidden_dim):
super().__init__()
# ModuleList where all branches are used in forward pass
self.branches = torch.nn.ModuleList([
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
])
self.final_layer = torch.nn.Linear(hidden_dim, hidden_dim)
def forward(self, x, y):
# Use all branches - add their outputs together
x = self.branches[0](x) + self.branches[1](x)
x = self.final_layer(x)
loss = torch.nn.functional.cross_entropy(x, y)
return loss
class LeafNonScalarModel(torch.nn.Module):
"""Leaf module model that returns non-scalar output"""
def __init__(self, hidden_dim):
super().__init__()
self.branches = torch.nn.ModuleList([
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.Linear(hidden_dim, hidden_dim),
])
def forward(self, x):
# Use all branches - returns non-scalar output
return self.branches[0](x) + self.branches[1](x)
@pytest.mark.parametrize("zero_stage", [3])
class TestZeroUserBackwardLeafModule(DistributedTest):
"""Test leaf module behavior during backward passes in ZeRO Stage 3"""
world_size = 2
def test_leaf_module_backward(self, zero_stage):
"""Test that leaf modules work correctly with user backward by comparing with PyTorch DDP
This test validates that the leaf_module_count and backward hooks are correctly
handled in create_reduce_and_remove_grad_hooks.
"""
from deepspeed.utils import set_z3_leaf_modules, z3_leaf_module
hidden_dim = 4
batch_size = 2
lr = 1e-3
# Initialize distributed environment
device, rank, dtype = initialize_distributed()
# Create DDP model
model_ddp, optimizer_ddp = create_ddp_model(LeafModuleModel,
device,
rank,
dtype,
seed=42,
lr=lr,
hidden_dim=hidden_dim)
# Create DeepSpeed model and mark leaf modules BEFORE initialization
torch.manual_seed(42)
model_deepspeed = LeafModuleModel(hidden_dim=hidden_dim)
leaf_modules = set_z3_leaf_modules(model_deepspeed, [torch.nn.ModuleList])
assert len(leaf_modules) == 1, "Expected exactly one ModuleList to be marked as leaf"
assert z3_leaf_module(model_deepspeed.branches), "ModuleList should be marked as leaf module"
# Initialize DeepSpeed engine from the prepared model
model_engine = create_deepspeed_engine_from_model(model_deepspeed, zero_stage)
# Verify leaf_module_count was set correctly
assert len(model_engine.optimizer.leaf_parameters) == 1, \
"Expected 1 leaf module in optimizer.leaf_parameters"
# Create input data
torch.manual_seed(123)
x = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype)
y = torch.randint(0, hidden_dim, (batch_size, ), device=device)
# DDP: forward and backward
optimizer_ddp.zero_grad()
loss_ddp = model_ddp(x, y)
loss_ddp.backward()
ddp_grads = collect_ddp_gradients(model_ddp)
# DeepSpeed: forward and backward with leaf module
loss_deepspeed = model_engine(x, y)
loss_deepspeed.backward()
deepspeed_grads = collect_gradients_safe(model_engine)
# Compare gradients
compare_gradients(ddp_grads, deepspeed_grads, "with leaf modules")
model_engine.destroy()
def test_leaf_module_non_scalar_backward(self, zero_stage):
"""Test that leaf modules work correctly with non-scalar backward (tensor.backward(grad))
This specifically tests the interaction between leaf modules and non-scalar backward.
"""
from deepspeed.utils import set_z3_leaf_modules, z3_leaf_module
hidden_dim = 4
batch_size = 2
lr = 1e-3
# Initialize distributed environment
device, rank, dtype = initialize_distributed()
# Create DDP model
model_ddp, optimizer_ddp = create_ddp_model(LeafNonScalarModel,
device,
rank,
dtype,
seed=42,
lr=lr,
hidden_dim=hidden_dim)
# Create DeepSpeed model and mark leaf modules BEFORE initialization
torch.manual_seed(42)
model_deepspeed = LeafNonScalarModel(hidden_dim=hidden_dim)
leaf_modules = set_z3_leaf_modules(model_deepspeed, [torch.nn.ModuleList])
assert len(leaf_modules) == 1, "Expected exactly one ModuleList to be marked as leaf"
assert z3_leaf_module(model_deepspeed.branches), "ModuleList should be marked as leaf module"
# Initialize DeepSpeed engine from the prepared model
model_engine = create_deepspeed_engine_from_model(model_deepspeed, zero_stage)
# Verify leaf_module_count was set correctly
assert len(model_engine.optimizer.leaf_parameters) == 1, \
"Expected 1 leaf module in optimizer.leaf_parameters"
# Create input data
torch.manual_seed(123)
x = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype)
# DDP: forward and non-scalar backward
optimizer_ddp.zero_grad()
output_ddp = model_ddp(x)
grad_output = torch.ones_like(output_ddp)
output_ddp.backward(grad_output)
ddp_grads = collect_ddp_gradients(model_ddp)
# DeepSpeed: forward and non-scalar backward with leaf module
output_deepspeed = model_engine(x)
grad_output_ds = torch.ones_like(output_deepspeed)
output_deepspeed.backward(grad_output_ds)
deepspeed_grads = collect_gradients_safe(model_engine)
# Compare gradients
compare_gradients(ddp_grads, deepspeed_grads, "in leaf module non-scalar backward")
model_engine.destroy()
@pytest.mark.sequential
class TestZeroUserBackwardScaleErrorDetection(DistributedTest):
"""Test error detection for missing scale() with fp16 in single-process setup"""
world_size = 1 # Use single process to avoid distributed deadlock issues
def test_error_when_backward_without_scale_sequential(self):
"""Test that error is raised when calling backward() without scale() with fp16"""
if not get_accelerator().is_fp16_supported():
pytest.skip("Test requires fp16 support")
hidden_dim = 4
zero_stage = 1 # Use ZeRO stage 1 for simplicity
# Initialize distributed
device, _, _ = initialize_distributed()
# Create engine with fp16 - requires scaling
torch.manual_seed(42)
model = SimpleModel(hidden_dim=hidden_dim, nlayers=2)
config = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
model_engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters())
# Verify needs_scaler is True
from deepspeed.runtime.base_optimizer import ZeROOptimizer
assert isinstance(model_engine.optimizer, ZeROOptimizer)
assert model_engine.optimizer.needs_scaler(), "fp16 should require scaling"
# Create data
data_loader = random_dataloader(model=model_engine,
total_samples=8,
hidden_dim=hidden_dim,
device=device,
dtype=torch.float16)
batch = next(iter(data_loader))
loss = model_engine(batch[0], batch[1])
# Calling backward() without scale() should raise RuntimeError
with pytest.raises(RuntimeError, match="Loss scaling is required"):
loss.backward()
model_engine.destroy()
@pytest.mark.parametrize("zero_stage", [1, 3])
class TestZeroUserBackwardWithScale(DistributedTest):
"""Test engine.scale() method for manual backward passes with loss scaling"""
world_size = 2
def test_scale_backward_matches_engine_backward(self, zero_stage):
"""Test that engine.scale(loss).backward() produces same gradients as engine.backward(loss)"""
hidden_dim = 4
# Create DeepSpeed engines with same seed
model_engine1 = create_deepspeed_engine(model_class=SimpleModel,
zero_stage=zero_stage,
seed=42,
hidden_dim=hidden_dim,
nlayers=2)
model_engine2 = create_deepspeed_engine(model_class=SimpleModel,
zero_stage=zero_stage,
seed=42,
hidden_dim=hidden_dim,
nlayers=2)
# Create data
device = get_accelerator().current_device_name()
data_loader = random_dataloader(model=model_engine1, total_samples=8, hidden_dim=hidden_dim, device=device)
batch = next(iter(data_loader))
# Model 1: use engine.backward(loss)
loss1 = model_engine1(batch[0], batch[1])
model_engine1.backward(loss1)
grads1 = collect_gradients_safe(model_engine1)
# Model 2: use engine.scale(loss).backward()
loss2 = model_engine2(batch[0], batch[1])
scaled_loss = model_engine2.scale(loss2)
scaled_loss.backward()
grads2 = collect_gradients_safe(model_engine2)
# Compare gradients - they should be identical
compare_gradients(grads1, grads2, "comparing engine.backward vs engine.scale().backward()")
model_engine1.destroy()
model_engine2.destroy()
def test_scale_backward_matches_ddp(self, zero_stage):
"""Test that engine.scale(loss).backward() produces same gradients as DDP"""
hidden_dim = 4
# Create DDP and DeepSpeed models
model_ddp, optimizer_ddp, model_engine, device, dtype = setup_models_and_engines(model_class=SimpleModel,
zero_stage=zero_stage,
hidden_dim=hidden_dim,
nlayers=2)
# Create data
data_loader = random_dataloader(model=model_engine, total_samples=8, hidden_dim=hidden_dim, device=device)
batch = next(iter(data_loader))
# DDP: forward and backward
optimizer_ddp.zero_grad()
loss_ddp = model_ddp(batch[0], batch[1])
loss_ddp.backward()
grads_ddp = collect_ddp_gradients(model_ddp)
# DeepSpeed: forward and scale + backward
loss_ds = model_engine(batch[0], batch[1])
scaled_loss = model_engine.scale(loss_ds)
scaled_loss.backward()
grads_ds = collect_gradients_safe(model_engine)
# Compare gradients
compare_gradients(grads_ddp, grads_ds, "comparing DDP vs engine.scale().backward()")
model_engine.destroy()
def test_scale_with_gradient_accumulation(self, zero_stage):
"""Test that engine.scale() works correctly with gradient accumulation"""
hidden_dim = 4
gradient_accumulation_steps = 4
# Create models with gradient accumulation
model_ddp, optimizer_ddp, model_engine, device, _ = setup_models_and_engines(
model_class=SimpleModel,
zero_stage=zero_stage,
gradient_accumulation_steps=gradient_accumulation_steps,
hidden_dim=hidden_dim,
nlayers=2)
# Create data
data_loader = random_dataloader(model=model_engine, total_samples=16, hidden_dim=hidden_dim, device=device)
# Run gradient accumulation steps
for i, batch in enumerate(data_loader):
# DDP: manual gradient accumulation
loss_ddp = model_ddp(batch[0], batch[1])
# Scale by GAS for DDP to match DeepSpeed behavior
(loss_ddp / gradient_accumulation_steps).backward()
# DeepSpeed: use scale() with built-in gradient accumulation
# Note: scale() only applies loss scaler, NOT GAS. DeepSpeed handles GAS internally
# via engine.step(), so we do NOT manually divide by GAS here.
loss_ds = model_engine(batch[0], batch[1])
scaled_loss = model_engine.scale(loss_ds)
scaled_loss.backward()
# Compare gradients at accumulation boundary
if model_engine.is_gradient_accumulation_boundary():
grads_ddp = collect_ddp_gradients(model_ddp)
grads_ds = collect_gradients_safe(model_engine)
compare_gradients(grads_ddp, grads_ds, f"step {i}")
# Step both optimizers
optimizer_ddp.step()
optimizer_ddp.zero_grad()
# Step DeepSpeed (handles gradient accumulation internally)
model_engine.step()
model_engine.destroy()
def test_needs_scaler_with_fp16(self, zero_stage):
"""Test that needs_scaler() correctly identifies when scaling is required with fp16"""
if not get_accelerator().is_fp16_supported():
pytest.skip("Test requires fp16 support for gradient scaling")
hidden_dim = 4
# Initialize distributed first
device, _, _ = initialize_distributed()
# Create engine with fp16 explicitly to test gradient scaling requirement
torch.manual_seed(42)
model = SimpleModel(hidden_dim=hidden_dim, nlayers=2)
config = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
# Explicitly enable fp16 to test gradient scaling requirement
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
if zero_stage == 3:
config["zero_optimization"]["stage3_param_persistence_threshold"] = 0
model_engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters())
# Verify that the optimizer correctly reports it needs scaling with fp16
from deepspeed.runtime.base_optimizer import ZeROOptimizer
assert isinstance(model_engine.optimizer, ZeROOptimizer), "Optimizer should be ZeROOptimizer"
assert model_engine.optimizer.needs_scaler(), "fp16 configuration should require gradient scaling"
# Verify scale() method works correctly
data_loader = random_dataloader(model=model_engine,
total_samples=8,
hidden_dim=hidden_dim,
device=device,
dtype=torch.float16)
batch = next(iter(data_loader))
loss = model_engine(batch[0], batch[1])
# Should be able to use scale() method and get a valid scaled tensor
scaled_loss = model_engine.scale(loss)
assert scaled_loss is not None, "scale() should return a scaled loss tensor"
assert scaled_loss.requires_grad, "scaled loss should require grad"
model_engine.destroy()
def test_needs_scaler_with_bf16(self, zero_stage):
"""Test that needs_scaler() correctly identifies that bf16 does NOT require scaling"""
if not get_accelerator().is_bf16_supported():
pytest.skip("Test requires bf16 support")
hidden_dim = 4
# Initialize distributed first
device, _, _ = initialize_distributed()
# Create engine with bf16 to verify scaling is NOT required
torch.manual_seed(42)
model = SimpleModel(hidden_dim=hidden_dim, nlayers=2)
config = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
# Use bf16 which does NOT require gradient scaling
"bf16": {
"enabled": True
}
}
if zero_stage == 3:
config["zero_optimization"]["stage3_param_persistence_threshold"] = 0
model_engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters())
# Verify that the optimizer correctly reports it does NOT need scaling with bf16
from deepspeed.runtime.base_optimizer import ZeROOptimizer
assert isinstance(model_engine.optimizer, ZeROOptimizer), "Optimizer should be ZeROOptimizer"
assert not model_engine.optimizer.needs_scaler(), "bf16 configuration should NOT require gradient scaling"
# Verify that loss.backward() can be called directly without scale() for bf16
data_loader = random_dataloader(model=model_engine,
total_samples=8,
hidden_dim=hidden_dim,
device=device,
dtype=torch.bfloat16)
batch = next(iter(data_loader))
loss = model_engine(batch[0], batch[1])
# With bf16, should be able to call backward directly (no scaling required)
loss.backward()
# Collect gradients to verify backward completed successfully
grads = collect_gradients_safe(model_engine)
assert len(grads) > 0, "Expected gradients to be computed"
model_engine.destroy()
def test_error_when_backward_without_scale_fp16(self, zero_stage):
"""Test that calling backward() without scale() raises an error with fp16"""
if not get_accelerator().is_fp16_supported():
pytest.skip("Test requires fp16 support for gradient scaling")
hidden_dim = 4
# Initialize distributed first
device, _, _ = initialize_distributed()
# Create engine with fp16
torch.manual_seed(42)
model = SimpleModel(hidden_dim=hidden_dim, nlayers=2)
config = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
if zero_stage == 3:
config["zero_optimization"]["stage3_param_persistence_threshold"] = 0
model_engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters())
# Verify needs_scaler is True
assert model_engine.optimizer.needs_scaler(), "fp16 should require scaling"
# Create data
data_loader = random_dataloader(model=model_engine,
total_samples=8,
hidden_dim=hidden_dim,
device=device,
dtype=torch.float16)
batch = next(iter(data_loader))
loss = model_engine(batch[0], batch[1])
# Try to call backward without scale - should raise RuntimeError
error_raised = False
try:
loss.backward()
except RuntimeError as e:
if "Loss scaling is required" in str(e):
error_raised = True
else:
raise # Re-raise if it's a different error
# If the test completes (doesn't hang), verify error was raised
if error_raised:
# Success - error was properly detected
pass
else:
# If no error was raised, this is a problem (or it hung and timed out)
pytest.fail("Expected RuntimeError about loss scaling, but backward completed without error")
model_engine.destroy()
def test_scale_validates_scalar_loss(self, zero_stage):
"""Test that scale() validates the input is a scalar loss tensor"""
hidden_dim = 4
model_engine = create_deepspeed_engine(model_class=SimpleNonScalarModel,
zero_stage=zero_stage,
seed=42,
hidden_dim=hidden_dim)
device = get_accelerator().current_device_name()
dtype = preferred_dtype()
torch.manual_seed(123)
x = torch.randn(2, hidden_dim, device=device, dtype=dtype)
# Forward to get non-scalar output
output = model_engine(x)
# Trying to scale a non-scalar tensor should raise an assertion error
with pytest.raises(AssertionError, match="scalar tensor"):
model_engine.scale(output)
model_engine.destroy()
def test_scale_with_torch_autocast(self, zero_stage):
"""Test that scale() works correctly with torch.autocast and fp16"""
if not get_accelerator().is_fp16_supported():
pytest.skip("FP16 not supported on this accelerator")
hidden_dim = 4
# Initialize distributed first
device, _, _ = initialize_distributed()
# Create engine with fp16 config to test gradient scaling
torch.manual_seed(42)
model = SimpleModel(hidden_dim=hidden_dim, nlayers=2)
config = {
"train_micro_batch_size_per_gpu": 2,
"gradient_accumulation_steps": 1,
"steps_per_print": 1,
"zero_optimization": {
"stage": zero_stage,
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-3
}
},
# Enable fp16 to test gradient scaling (bf16 doesn't use gradient scaling)
"fp16": {
"enabled": True,
"initial_scale_power": 8
}
}
if zero_stage == 3:
config["zero_optimization"]["stage3_param_persistence_threshold"] = 0
model_engine, _, _, _ = deepspeed.initialize(config=config, model=model, model_parameters=model.parameters())
# Create data with fp16 dtype to match the config
data_loader = random_dataloader(model=model_engine,
total_samples=8,
hidden_dim=hidden_dim,
device=device,
dtype=torch.float16)
batch = next(iter(data_loader))
# Forward and use scale()
loss = model_engine(batch[0], batch[1])
scaled_loss = model_engine.scale(loss)
# Should be able to call backward
scaled_loss.backward()
# Collect gradients to verify they exist
grads = collect_gradients_safe(model_engine)
assert len(grads) > 0, "Expected gradients to be computed"
model_engine.destroy()
class NonCheckpointedModel(torch.nn.Module):
"""Model without gradient checkpointing, used as reference for comparison."""
def __init__(self, hidden_dim):
super().__init__()
self.linear1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
def forward(self, x):
x = self.linear1(x)
x = torch.nn.functional.relu(x)
x = self.linear2(x)
return x
class CheckpointedModel(torch.nn.Module):
"""Model that uses gradient checkpointing with configurable use_reentrant setting.
This model is designed to test the interaction between ZeRO-3 and gradient
checkpointing with both reentrant (use_reentrant=True) and non-reentrant
(use_reentrant=False) modes.
Uses 2 layers to minimize numerical divergence from bfloat16 precision
accumulation over multiple optimizer steps.
"""
def __init__(self, hidden_dim, use_reentrant=True):
super().__init__()
self.use_reentrant = use_reentrant
self.linear1 = torch.nn.Linear(hidden_dim, hidden_dim)
self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)
def _checkpointed_block(self, x):
"""Block that will be checkpointed"""
x = self.linear1(x)
x = torch.nn.functional.relu(x)
return x
def forward(self, x):
# Use gradient checkpointing on the first block
if self.training:
from torch.utils.checkpoint import checkpoint
x = checkpoint(self._checkpointed_block, x, use_reentrant=self.use_reentrant)
else:
x = self._checkpointed_block(x)
x = self.linear2(x)
return x
@pytest.mark.parametrize("zero_stage", [1, 2, 3])
@pytest.mark.parametrize("use_reentrant", [True, False])
class TestZeroUserBackwardWithCheckpointing(DistributedTest):
"""Test ZeRO with gradient checkpointing and non-scalar backward.
This test class validates the interaction between:
1. ZeRO parameter partitioning (stages 1 and 3)
2. Gradient checkpointing (both reentrant and non-reentrant modes)
3. Non-scalar backward (tensor.backward(gradient=...))
Both use_reentrant=True and use_reentrant=False are supported with ZeRO.
Note: When using use_reentrant=True, input tensors should have requires_grad=True
for proper gradient computation through the checkpointed region.
"""
world_size = 2
def test_checkpointed_non_scalar_backward(self, zero_stage, use_reentrant):
"""Test that gradient checkpointing works with ZeRO and non-scalar backward.
Verifies that tensor.backward(gradient=...) works correctly with ZeRO
and gradient checkpointing in both reentrant and non-reentrant modes.
"""
hidden_dim = 8
batch_size = 2
# Initialize distributed environment
device, rank, dtype = initialize_distributed()
# Create DDP model for reference (no checkpointing issues with DDP)
torch.manual_seed(42)
model_ddp = CheckpointedModel(hidden_dim=hidden_dim, use_reentrant=use_reentrant)
model_ddp = model_ddp.to(device=device, dtype=dtype)
model_ddp = DDP(model_ddp, device_ids=[rank], output_device=rank)
optimizer_ddp = torch.optim.Adam(model_ddp.parameters(), lr=1e-3)
# Create DeepSpeed model with ZeRO-3
torch.manual_seed(42)
model_ds = CheckpointedModel(hidden_dim=hidden_dim, use_reentrant=use_reentrant)
config = get_config_dict(zero_stage)
model_engine, _, _, _ = deepspeed.initialize(config=config,
model=model_ds,
model_parameters=model_ds.parameters())
# Create input data - use separate tensors for DDP and DeepSpeed to avoid
# memory sharing issues during parallel test execution
torch.manual_seed(123)
x_ddp = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype, requires_grad=True)
# DDP: forward and non-scalar backward
optimizer_ddp.zero_grad()
output_ddp = model_ddp(x_ddp)
grad_output = torch.ones_like(output_ddp)
output_ddp.backward(grad_output)
get_accelerator().synchronize() # Ensure CUDA ops complete
dist.barrier() # Ensure all ranks complete gradient sync
ddp_grads = collect_ddp_gradients(model_ddp)
# DeepSpeed with ZeRO-3: forward and non-scalar backward
# This is the pattern used in disaggregated training
# Create fresh tensor with same seed for reproducibility
torch.manual_seed(123)
x_ds = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype, requires_grad=True)
output_ds = model_engine(x_ds)
grad_output_ds = torch.ones_like(output_ds)
# Non-scalar backward with gradient checkpointing
output_ds.backward(grad_output_ds)
# Synchronize device before collecting gradients. ZeRO-3 uses async operations
# on separate streams for gradient reduction. With use_reentrant=True checkpointing,
# we need to ensure all operations complete before reading gradient data.
get_accelerator().synchronize()
dist.barrier() # Ensure all ranks complete backward before collecting gradients
# Collect and verify gradients
ds_grads = collect_gradients_safe(model_engine)
# Verify gradients were computed
assert len(ds_grads) > 0, \
f"No gradients computed with use_reentrant={use_reentrant} and ZeRO-3"
# Compare gradients with DDP reference
compare_gradients(ddp_grads, ds_grads, f"with checkpointing use_reentrant={use_reentrant}")
# Run optimizer step to verify full training loop works
model_engine.step()
model_engine.destroy()
def test_checkpointed_scalar_backward(self, zero_stage, use_reentrant):
"""Test that gradient checkpointing works with ZeRO and scalar backward.
Verifies that scalar loss.backward() works correctly with ZeRO and
gradient checkpointing in both reentrant and non-reentrant modes.
"""
hidden_dim = 8
batch_size = 2
# Initialize distributed environment
device, rank, dtype = initialize_distributed()
# Create DDP model for reference
torch.manual_seed(42)
model_ddp = CheckpointedModel(hidden_dim=hidden_dim, use_reentrant=use_reentrant)
model_ddp = model_ddp.to(device=device, dtype=dtype)
model_ddp = DDP(model_ddp, device_ids=[rank], output_device=rank)
optimizer_ddp = torch.optim.Adam(model_ddp.parameters(), lr=1e-3)
# Create DeepSpeed model with ZeRO-3
torch.manual_seed(42)
model_ds = CheckpointedModel(hidden_dim=hidden_dim, use_reentrant=use_reentrant)
config = get_config_dict(zero_stage)
model_engine, _, _, _ = deepspeed.initialize(config=config,
model=model_ds,
model_parameters=model_ds.parameters())
# Create input data - use separate tensors for DDP and DeepSpeed to avoid
# memory sharing issues during parallel test execution
torch.manual_seed(123)
x_ddp = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype, requires_grad=True)
y = torch.randint(0, hidden_dim, (batch_size, ), device=device)
# DDP: forward with scalar loss and backward
optimizer_ddp.zero_grad()
output_ddp = model_ddp(x_ddp)
loss_ddp = torch.nn.functional.cross_entropy(output_ddp, y)
loss_ddp.backward()
get_accelerator().synchronize() # Ensure CUDA ops complete
dist.barrier() # Ensure all ranks complete gradient sync
ddp_grads = collect_ddp_gradients(model_ddp)
# DeepSpeed with ZeRO-3: forward with scalar loss and backward
# Create fresh tensor with same seed for reproducibility
torch.manual_seed(123)
x_ds = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype, requires_grad=True)
output_ds = model_engine(x_ds)
loss_ds = torch.nn.functional.cross_entropy(output_ds, y)
loss_ds.backward()
# Synchronize device before collecting gradients. ZeRO-3 uses async operations
# on separate streams for gradient reduction. With use_reentrant=True checkpointing,
# we need to ensure all operations complete before reading gradient data.
get_accelerator().synchronize()
dist.barrier() # Ensure all ranks complete backward before collecting gradients
# Collect and verify gradients
ds_grads = collect_gradients_safe(model_engine)
# Verify gradients were computed
assert len(ds_grads) > 0, \
f"No gradients computed with scalar loss, use_reentrant={use_reentrant}"
# Compare gradients with DDP reference
compare_gradients(ddp_grads, ds_grads, f"scalar loss with checkpointing use_reentrant={use_reentrant}")
model_engine.destroy()
def test_checkpointed_multiple_backward(self, zero_stage, use_reentrant):
"""Test multiple backward passes with checkpointing and ZeRO.
Verifies that consecutive training iterations work correctly with
gradient checkpointing. Compares gradients with DDP at all iterations
to verify correctness. Uses PyTorch Adam for both to ensure fair comparison.
"""
hidden_dim = 8
batch_size = 2
num_iterations = 3
# Initialize distributed environment
device, rank, dtype = initialize_distributed()
# Create DDP model for reference with PyTorch Adam
torch.manual_seed(42)
model_ddp = CheckpointedModel(hidden_dim=hidden_dim, use_reentrant=use_reentrant)
model_ddp = model_ddp.to(device=device, dtype=dtype)
model_ddp = DDP(model_ddp, device_ids=[rank], output_device=rank)
optimizer_ddp = torch.optim.Adam(model_ddp.parameters(), lr=1e-3)
# Create DeepSpeed model WITH checkpointing, using PyTorch Adam
torch.manual_seed(42)
model_ds = CheckpointedModel(hidden_dim=hidden_dim, use_reentrant=use_reentrant)
optimizer_ds = torch.optim.Adam(model_ds.parameters(), lr=1e-3)
config = get_config_dict(zero_stage)
model_engine, _, _, _ = deepspeed.initialize(config=config,
model=model_ds,
model_parameters=model_ds.parameters(),
optimizer=optimizer_ds)
for iteration in range(num_iterations):
# Use same random seed for both models
torch.manual_seed(123 + iteration)
x = torch.randn(batch_size, hidden_dim, device=device, dtype=dtype, requires_grad=True)
# DDP: forward and backward
optimizer_ddp.zero_grad()
x_ddp = x.clone().detach().requires_grad_(True)
output_ddp = model_ddp(x_ddp)
output_ddp.backward(torch.ones_like(output_ddp))
get_accelerator().synchronize()
dist.barrier()
ddp_grads = collect_ddp_gradients(model_ddp)
# DeepSpeed: forward and backward
x_ds = x.clone().detach().requires_grad_(True)
output_ds = model_engine(x_ds)
output_ds.backward(torch.ones_like(output_ds))
get_accelerator().synchronize()
dist.barrier()
ds_grads = collect_gradients_safe(model_engine)
# Verify gradients were computed
assert len(ds_grads) > 0, \
f"No gradients at iteration {iteration} with use_reentrant={use_reentrant}"
# Compare gradients with DDP - using same optimizer so should match closely
# Small differences at later iterations are expected due to bfloat16 precision
compare_gradients(ddp_grads, ds_grads, f"iteration {iteration} with use_reentrant={use_reentrant}")
# Run optimizer steps on both models
optimizer_ddp.step()
model_engine.step()
model_engine.destroy()
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/v1/zero/test_zero_user_backward.py",
"license": "Apache License 2.0",
"lines": 1128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:deepspeed/datastates/config.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# Apache-2.0 License Copyright (c) UChicago Argonne LLC, operator of Argonne National Laboratory.
# DeepSpeed Team
from deepspeed.runtime.config_utils import DeepSpeedConfigObject
import copy
DATASTATES_CHECKPOINTING = "datastates_ckpt"
DATASTATES_CHECKPOINTING_ENABLED = False
class DeepSpeedDataStatesConfig(DeepSpeedConfigObject):
def __init__(self, param_dict):
super(DeepSpeedDataStatesConfig, self).__init__()
self.enabled = param_dict.get(DATASTATES_CHECKPOINTING, DATASTATES_CHECKPOINTING_ENABLED) is not False
self.config = copy.deepcopy(param_dict.get(DATASTATES_CHECKPOINTING, None))
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/datastates/config.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/checkpoint_engine/datastates_checkpoint_engine.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# Apache-2.0 License Copyright (c) UChicago Argonne LLC, operator of Argonne National Laboratory.
# DeepSpeed Team
from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \
CheckpointEngine, CheckpointCommitInfo
ENGINE_NAME = "DataStatesCheckpointEngine"
class DataStatesCheckpointEngine(CheckpointEngine):
def __init__(self, deepspeed_config, rank):
super().__init__(deepspeed_config)
self.commit_info = None
self.ckpt_engine = None
try:
from datastates import CheckpointEngine as DataStatesEngine
self.ckpt_engine = DataStatesEngine(deepspeed_config, rank)
except ImportError:
raise RuntimeError("Please install DataStates from https://github.com/DataStates/datastates-llm.")
except Exception as e:
raise RuntimeError(f"An error occurred while initializing DataStates Checkpoint Engine: {e}")
def __del__(self):
self.cleanup()
def create(self, info: CheckpointCommitInfo):
self.commit_info = info
return None
def save(self, state_dict, path: str):
return self.ckpt_engine.save(state_dict, path)
def load(self, path: str, map_location=None):
return self.ckpt_engine.load(path, map_location)
def commit(self, info: CheckpointCommitInfo):
if info is None:
return
assert info == self.commit_info
self.ckpt_engine.wait(persist=True)
self.commit_info = None
return True
def cleanup(self):
self.commit(self.commit_info)
if self.ckpt_engine:
self.ckpt_engine.wait(persist=True)
del self.ckpt_engine
def is_decoupled(self):
return True
def preserves_storage_sharing(self):
return False
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/checkpoint_engine/datastates_checkpoint_engine.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/zero/leaf_module_config.py | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from typing import List
from pydantic import Field, model_validator
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
DEFAULT_LEAF_MODULE_CLASSES: List[str] = [
"transformers.models.mixtral.modeling_mixtral.MixtralSparseMoeBlock",
"transformers.models.qwen2_moe.modeling_qwen2_moe.Qwen2MoeSparseMoeBlock",
"transformers.models.qwen3_moe.modeling_qwen3_moe.Qwen3MoeSparseMoeBlock",
]
DEFAULT_LEAF_MODULE_NAMES: List[str] = []
DEFAULT_LEAF_MODULE_NAME_SUFFIXES: List[str] = []
class DeepSpeedZeroLeafModuleConfig(DeepSpeedConfigModel):
"""Configuration for ZeRO leaf modules that should bypass hook installation."""
classes: List[str] = Field(default_factory=lambda: list(DEFAULT_LEAF_MODULE_CLASSES))
names: List[str] = Field(default_factory=lambda: list(DEFAULT_LEAF_MODULE_NAMES))
name_suffixes: List[str] = Field(default_factory=lambda: list(DEFAULT_LEAF_MODULE_NAME_SUFFIXES))
@model_validator(mode="before")
def _coerce_container_types(cls, values):
if values is None:
return {}
if isinstance(values, dict):
coerced = dict(values)
for key in ("classes", "names", "name_suffixes"):
if key in coerced and isinstance(coerced[key], str):
coerced[key] = [coerced[key]]
return coerced
raise TypeError("leaf_module configuration must be a mapping of fields to values")
@model_validator(mode="after")
def _validate_entries(self):
normalized_classes = [str(cls) for cls in self.classes]
normalized_names = [str(name) for name in self.names]
normalized_suffixes = [str(suffix) for suffix in self.name_suffixes]
deduped_classes = list(dict.fromkeys(normalized_classes))
deduped_names = list(dict.fromkeys(normalized_names))
deduped_suffixes = list(dict.fromkeys(normalized_suffixes))
object.__setattr__(self, "classes", deduped_classes)
object.__setattr__(self, "names", deduped_names)
object.__setattr__(self, "name_suffixes", deduped_suffixes)
return self
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/zero/leaf_module_config.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/superoffload/superoffload_stage3.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import time
import torch
from collections import deque
from typing import List
from deepspeed.runtime.superoffload.superoffload_utils import SuperOffloadCPUOptimizer, TaskKeys, ResultKeys, EventTypes
from deepspeed.runtime.zero.partition_parameters import Parameter, Tensor
from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3
from deepspeed.utils.nvtx import instrument_w_nvtx
from deepspeed.utils import logger
from deepspeed.accelerator import get_accelerator
OPTIMIZER_STEP_TIMER = 'optimizer_step'
class SuperOffloadOptimizer_Stage3(DeepSpeedZeroOptimizer_Stage3):
def __init__(
self,
module,
init_optimizer,
param_names,
timers,
ds_config,
**kwargs,
):
self.sub_group_to_param_num = {}
self.params_in_ipg_bucket_buffer = deque()
self._cur_bucket_index = -1
self.async_cpuadam_num = 0
self.max_grad_numel = 0
super().__init__(module, init_optimizer, param_names, timers, ds_config, **kwargs)
optimizer_config = {
"lr": self.optimizer.param_groups[0]["lr"],
"betas": self.optimizer.param_groups[0]["betas"],
"eps": self.optimizer.param_groups[0]["eps"],
"weight_decay": self.optimizer.param_groups[0]["weight_decay"],
"amsgrad": self.optimizer.param_groups[0]["amsgrad"]
}
cpuadam_cores_perc = kwargs.get("cpuadam_cores_perc", 0.8)
self.superoffload_cpu_optimizer = SuperOffloadCPUOptimizer(optimizer_config=optimizer_config,
cpuadam_cores_perc=cpuadam_cores_perc,
max_grad_numel=self.max_grad_numel)
def _create_fp16_sub_groups(self, params_group):
params_group_numel = sum([param.partition_numel() for param in params_group])
sub_group_size = self.sub_group_size
if sub_group_size is None or sub_group_size >= params_group_numel:
return [params_group]
sub_groups = []
sub_group = []
local_sub_group_size = 0
for param in params_group:
sub_group.append(param)
local_sub_group_size += param.partition_numel()
if local_sub_group_size >= sub_group_size or id(param) == id(params_group[-1]):
self.max_grad_numel = max(self.max_grad_numel, local_sub_group_size)
sub_groups.append(sub_group)
self.sub_group_to_param_num[len(sub_groups) - 1] = len(sub_group)
sub_group = []
local_sub_group_size = 0
return sub_groups
def _optimizer_step(self, sub_group_id):
param_group_id = self.sub_group_to_group_id[sub_group_id]
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
def step_with_gradscaler(optimizer):
if self.torch_autocast_gradscaler:
self.torch_autocast_gradscaler.step(optimizer)
self.torch_autocast_gradscaler.update()
else:
optimizer.step()
cur_device = self.subgroup_to_device[sub_group_id]
if cur_device != 'cpu':
self.backup_optimizer.param_groups[param_group_id]['params'] = [fp32_param]
step_with_gradscaler(self.backup_optimizer)
self.backup_optimizer.param_groups[param_group_id]['params'] = []
def reduce_independent_p_g_buckets_and_remove_grads(self, param):
comm_dtype = self.get_param_comm_dtype(param)
bucket = self.ipg_buckets[comm_dtype]
i, _, _ = self.grad_position[self.get_param_id(param)]
if len(bucket.params) == 0:
self._cur_bucket_index = i
if getattr(param, "ds_grad_is_ready", True):
self._DeepSpeedZeroOptimizer_Stage3__add_grad_to_ipg_bucket(param)
# If this is a single-parameter sub-group, reduce immediately
if self.sub_group_to_param_num[self._cur_bucket_index] == 1:
self._DeepSpeedZeroOptimizer_Stage3__reduce_and_partition_ipg_grads(comm_dtype)
elif i != self._cur_bucket_index:
# Parameter belongs to different sub-group, buffer it
self.params_in_ipg_bucket_buffer.append(param)
else:
# Parameter belongs to current bucket
if getattr(param, "ds_grad_is_ready", True):
self._DeepSpeedZeroOptimizer_Stage3__add_grad_to_ipg_bucket(param)
# Check if bucket is complete
if self.sub_group_to_param_num[self._cur_bucket_index] == len(bucket.params):
self._DeepSpeedZeroOptimizer_Stage3__reduce_and_partition_ipg_grads(comm_dtype)
# Process buffered parameters
while self.params_in_ipg_bucket_buffer:
buffered_param = self.params_in_ipg_bucket_buffer.popleft()
ci, _, _ = self.grad_position[self.get_param_id(buffered_param)]
self._cur_bucket_index = ci
if getattr(buffered_param, "ds_grad_is_ready", True):
self._DeepSpeedZeroOptimizer_Stage3__add_grad_to_ipg_bucket(buffered_param)
@instrument_w_nvtx
def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id):
if self.subgroup_to_device[sub_group_id] == 'cpu':
self._unflatten_partitioned_parameters(sub_group_id)
return
if self.fp16_partitioned_groups_flat[sub_group_id] is not None:
self.fp16_partitioned_groups_flat[sub_group_id].data.copy_(
self.fp32_partitioned_groups_flat[sub_group_id].data)
self._unflatten_partitioned_parameters(sub_group_id)
else:
self._partitioned_params_swap_out(sub_group_id)
@instrument_w_nvtx
def _reassign_or_swap_out_partitioned_parameters_async(self, sub_group_id, updated_param):
"""Asynchronously update partitioned parameters with optimized values."""
self.fp32_partitioned_groups_flat[sub_group_id].data.copy_(updated_param, non_blocking=True)
@instrument_w_nvtx
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None:
# print("[DEBUG] partition_grads called")
buffers = []
device_buffers = {}
buffer_numel_min = {}
buffer_numel_max = {}
for param, grad_partition in zip(params_to_release, grad_partitions):
i, dest_offset, _ = self.grad_position[self.get_param_id(param)]
if self.is_gradient_accumulation_boundary:
self.norm_for_param_grads[self.get_param_id(param)] = self._constant_buffered_norm2(grad_partition)
buffer_numel = grad_partition.numel()
buffers.append(grad_partition)
if i not in device_buffers:
device_buffers[i] = []
device_buffers[i].append(grad_partition)
if i not in buffer_numel_min:
buffer_numel_min[i] = dest_offset
buffer_numel_max[i] = dest_offset + buffer_numel
else:
buffer_numel_min[i] = min(buffer_numel_min[i], dest_offset)
buffer_numel_max[i] = max(buffer_numel_max[i], dest_offset + buffer_numel)
if self.is_gradient_accumulation_boundary:
for i in buffer_numel_min.keys():
fp32_grad_tensor = self.fp32_partitioned_groups_flat[i].grad.narrow(
0, buffer_numel_min[i], buffer_numel_max[i] - buffer_numel_min[i])
concatenated_buffer = torch.cat(device_buffers[i], dim=0).to(dtype=self.master_weights_and_grads_dtype)
if self.subgroup_to_device[i] == 'cpu':
# Trigger asynchronous CPU optimization
param_group_id = self.sub_group_to_group_id[i]
fp32_param = self.fp32_partitioned_groups_flat[i]
self.superoffload_cpu_optimizer.async_step(param_group_id, i, fp32_param.data,
concatenated_buffer.data)
self.async_cpuadam_num += 1
# Check for completed async operations
result = self.superoffload_cpu_optimizer.get_result()
if result is not None:
self._reassign_or_swap_out_partitioned_parameters_async(result[TaskKeys.SUB_GROUP_ID],
result[ResultKeys.UPDATED_PARAM])
self.async_cpuadam_num -= 1
fp32_grad_tensor.copy_(concatenated_buffer, non_blocking=True)
else:
fp32_grad_tensor.copy_(concatenated_buffer, non_blocking=True)
# Clean up parameter gradients
for param in params_to_release:
if not get_accelerator().is_synchronized_device():
param.grad.record_stream(get_accelerator().current_stream())
param.grad = None
@instrument_w_nvtx
def step(self, closure=None):
"""
Not supporting closure.
"""
# Wait for any pending asynchronous CPU optimizer operations
self._wait_for_async_operations()
self._pre_step()
self._partition_all_parameters()
if self._overflow_check_and_loss_scale_update():
self._handle_overflow_rollback()
return
norm_groups = self._get_norm_groups()
scaled_global_grad_norm = torch.linalg.vector_norm(torch.stack(norm_groups))
self._global_grad_norm = scaled_global_grad_norm / self.loss_scale
timer_names = set()
timer_names.add(OPTIMIZER_STEP_TIMER)
self.timers(OPTIMIZER_STEP_TIMER).start()
if self.check_clip_grads(scaled_global_grad_norm):
self._handle_gradient_clipping(scaled_global_grad_norm)
for sub_group_id, group in enumerate(self.fp16_groups):
# Prepare optimizer states, gradients and fp32 parameters for update
self._prepare_sub_group(sub_group_id, timer_names)
# Scale the fp32 gradients
self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm)
# Apply the optimizer step on the sub group and copy fp32 parameters to fp16
self._optimizer_step(sub_group_id)
# Put fp16 parameters in appropriate location
self._reassign_or_swap_out_partitioned_parameters(sub_group_id)
# Release memory or swap out optimizer states of fp32 parameters
self._release_sub_group(sub_group_id, timer_names)
self.timers(OPTIMIZER_STEP_TIMER).stop()
self._post_step(timer_names)
def _wait_for_async_operations(self, timeout_seconds=60):
"""Wait for all pending asynchronous CPU optimizer operations to complete with timeout error.
Args:
timeout_seconds (int): Maximum time to wait before throwing an error. Default is 60 seconds.
"""
if self.async_cpuadam_num > 0:
logger.info(f"[INFO] {self.async_cpuadam_num} asynchronous CPU optimizer operations pending...")
if self.async_cpuadam_num == 0:
return
start_time = time.time()
initial_pending_ops = self.async_cpuadam_num
while self.async_cpuadam_num > 0:
result = self.superoffload_cpu_optimizer.get_result()
if result is None:
current_time = time.time()
elapsed_time = current_time - start_time
# Throw error if we've been waiting longer than the timeout
if elapsed_time >= timeout_seconds:
raise RuntimeError(
f"SuperOffload CPU optimizer timeout after {elapsed_time:.1f} seconds. "
f"Still waiting for {self.async_cpuadam_num}/{initial_pending_ops} async operations to complete. "
f"This indicates a deadlock or critical performance issue in the CPU optimizer.")
time.sleep(0.001) # 1ms sleep
continue
self._reassign_or_swap_out_partitioned_parameters_async(result[TaskKeys.SUB_GROUP_ID],
result[ResultKeys.UPDATED_PARAM])
self.async_cpuadam_num -= 1
def _wait_for_single_async_result(self, event_type: str, timeout_seconds=60):
"""Wait for a single asynchronous CPU-Adam optimizer operation with timeout.
Args:
event_type (str): Type of operation expected ('adam_step' or 'rollback').
timeout_seconds (int): Maximum time to wait before throwing an error. Default is 60 seconds.
"""
start_time = time.time()
while True:
result = self.superoffload_cpu_optimizer.get_result(expected_event_type=event_type)
if result is not None:
self._reassign_or_swap_out_partitioned_parameters_async(result[TaskKeys.SUB_GROUP_ID],
result[ResultKeys.UPDATED_PARAM])
break
current_time = time.time()
elapsed_time = current_time - start_time
# Throw error if we've been waiting longer than the timeout
if elapsed_time >= timeout_seconds:
raise RuntimeError(f"SuperOffload CPU optimizer timeout after {elapsed_time:.1f} seconds. "
f"This indicates a deadlock or critical performance issue in the CPU optimizer.")
time.sleep(0.001) # 1ms sleep
def _sync_cpu_optimizer_step(self,
param_group_id: int,
sub_group_id: int,
fp32_param_data,
fp32_grad_data,
rollback: bool = False,
timeout_seconds: int = 60):
event_type = EventTypes.ROLLBACK if rollback else EventTypes.ADAM_STEP
self.superoffload_cpu_optimizer.async_step(param_group_id,
sub_group_id,
fp32_param_data,
fp32_grad_data,
rollback=rollback)
# Wait for completion
self._wait_for_single_async_result(event_type, timeout_seconds)
def _handle_overflow_rollback(self):
"""Handle gradient overflow by rolling back CPU optimizer states."""
for sub_group_id, _ in enumerate(self.fp16_groups):
if self.subgroup_to_device[sub_group_id] == 'cpu':
param_group_id = self.sub_group_to_group_id[sub_group_id]
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
# Trigger rollback
self._sync_cpu_optimizer_step(param_group_id,
sub_group_id,
fp32_param.data,
fp32_param.grad.data,
rollback=True)
def _handle_gradient_clipping(self, scaled_global_grad_norm):
"""Handle gradient clipping with CPU optimizer rollback and re-optimization."""
for sub_group_id, _ in enumerate(self.fp16_groups):
if self.subgroup_to_device[sub_group_id] == 'cpu':
param_group_id = self.sub_group_to_group_id[sub_group_id]
fp32_param = self.fp32_partitioned_groups_flat[sub_group_id]
# Rollback CPU optimizer states
self._sync_cpu_optimizer_step(param_group_id,
sub_group_id,
fp32_param.data,
fp32_param.grad.data,
rollback=True)
# Clip gradients and re-optimize
self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm)
self._sync_cpu_optimizer_step(param_group_id,
sub_group_id,
fp32_param.data,
fp32_param.grad.data,
rollback=False)
@instrument_w_nvtx
def check_clip_grads(self, total_norm):
"""Check if gradients need to be clipped based on the global norm."""
unscaled_norm = total_norm / self.loss_scale
return self.clip_grad and unscaled_norm > self.clip_grad
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/superoffload/superoffload_stage3.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/superoffload/superoffload_utils.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
SuperOffload utilities for 1) running CPU optimizers in separate processes.
"""
from typing import Dict, Optional, Any
import torch
import torch.multiprocessing as mp
import psutil
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.utils import logger
class TaskKeys:
PARAM_DATA = "param_data"
PARAM_GRAD = "param_grad"
PARAM_GROUP_ID = "param_group_id"
SUB_GROUP_ID = "sub_group_id"
ROLLBACK = "rollback"
class ResultKeys:
UPDATED_PARAM = "updated_param"
EVENT_TYPE = "event_type"
class EventTypes:
ADAM_STEP = "adam_step"
ROLLBACK = "rollback"
def superoffload_optimizer_worker(param_queue: mp.SimpleQueue, result_queue: mp.SimpleQueue,
optimizer_config: Dict[str, Any], max_grad_numel: int) -> None:
"""
This function runs in a separate process and continuously processes optimization
tasks from the parameter queue. It creates a DeepSpeedCPUAdam optimizer and
applies optimization steps to parameters received from the main process.
Args:
param_queue: Queue for receiving optimization tasks
result_queue: Queue for sending back optimization results
optimizer_config: Configuration dictionary for the optimizer containing
lr, betas, eps, weight_decay, and amsgrad parameters
max_grad_numel: Maximum number of elements expected in gradient tensors
"""
# Initialize dummy parameter for optimizer creation
cpu_tensor = torch.randn(1, device="cpu")
cpu_param = torch.nn.Parameter(cpu_tensor)
try:
optimizer = DeepSpeedCPUAdam([cpu_param],
lr=optimizer_config["lr"],
betas=optimizer_config["betas"],
eps=optimizer_config["eps"],
weight_decay=optimizer_config["weight_decay"],
amsgrad=optimizer_config["amsgrad"])
except KeyError as e:
error_msg = f"Missing required optimizer config key: {e}"
logger.error(error_msg)
result_queue.put({"error": error_msg})
return
# Pre-allocate reusable pinned memory buffer for gradients
pinned_grad_buffer = torch.empty(max_grad_numel, dtype=torch.float32, device='cpu', pin_memory=True)
while True:
try:
task = param_queue.get()
if task is None:
logger.debug("Received termination signal, shutting down worker")
break
param_data = task[TaskKeys.PARAM_DATA]
param_grad = task[TaskKeys.PARAM_GRAD]
param_group_id = task[TaskKeys.PARAM_GROUP_ID]
sub_group_id = task[TaskKeys.SUB_GROUP_ID]
rollback = task.get(TaskKeys.ROLLBACK, False)
logger.debug(f"Processing param_group_id: {param_group_id}, sub_group_id: {sub_group_id}")
del task[TaskKeys.PARAM_DATA]
del task[TaskKeys.PARAM_GRAD]
task.clear()
grad_numel = param_grad.numel()
if grad_numel > max_grad_numel:
error_msg = (
f"Gradient size {grad_numel} exceeds pre-allocated buffer size {max_grad_numel}. "
f"This indicates insufficient buffer allocation. Please increase max_grad_numel parameter.")
result_queue.put({"error": error_msg})
break
param_grad_cpu = pinned_grad_buffer[:grad_numel].view_as(param_grad)
param_grad_cpu.copy_(param_grad, non_blocking=True)
fp32_param = torch.nn.Parameter(param_data)
fp32_param.grad = param_grad_cpu
optimizer.param_groups[param_group_id]['params'] = [fp32_param]
if rollback:
logger.debug(f"Rolling back optimizer state for sub_group_id: {sub_group_id}")
optimizer.rollback_subgroup(sub_group_id)
else:
optimizer.step_subgroup(sub_group_id)
# Send result back to main process
event_type = EventTypes.ROLLBACK if rollback else EventTypes.ADAM_STEP
result_queue.put({
TaskKeys.PARAM_GROUP_ID: param_group_id,
TaskKeys.SUB_GROUP_ID: sub_group_id,
ResultKeys.UPDATED_PARAM: fp32_param.data,
ResultKeys.EVENT_TYPE: event_type,
})
# Clean up references to free memory
optimizer.param_groups[param_group_id]['params'] = []
del param_grad_cpu, fp32_param.grad, fp32_param, param_grad, param_data
except KeyError as e:
error_msg = f"Missing required task key: {e}"
logger.error(error_msg)
result_queue.put({"error": error_msg})
break
except Exception as e:
error_msg = f"Unexpected error in worker process: {e}"
logger.error(error_msg)
result_queue.put({"error": error_msg})
break
# Clean up pinned memory buffer
if 'pinned_grad_buffer' in locals():
del pinned_grad_buffer
logger.debug("Cleaned up pinned memory buffer")
logger.debug("Worker process terminated")
class SuperOffloadCPUOptimizer:
def __init__(self,
optimizer_config: Dict[str, Any],
cpuadam_cores_perc: float = 0.8,
max_grad_numel: int = 1000000) -> None:
if not 0 < cpuadam_cores_perc <= 1:
raise ValueError("cpuadam_cores_perc must be between 0 and 1")
self.max_grad_numel = max_grad_numel
self.mp_context = mp.get_context('spawn')
self.param_queue = self.mp_context.SimpleQueue()
self.result_queue = self.mp_context.SimpleQueue()
self.cpuadam_process = self.mp_context.Process(
target=superoffload_optimizer_worker,
args=(self.param_queue, self.result_queue, optimizer_config, max_grad_numel),
daemon=True,
)
self.cpuadam_process.start()
# Set CPU affinity for better performance isolation
self._set_cpu_affinity(cpuadam_cores_perc)
def _set_cpu_affinity(self, cpuadam_cores_perc: float) -> None:
"""
Set CPU affinity for the main (Pytorch) process and worker (CPU Adam) process.
Args:
cpuadam_cores_perc: Percentage of cores to allocate to the worker (CPU Adam) process
"""
try:
current_process = psutil.Process()
all_cores = current_process.cpu_affinity()
num_cores = len(all_cores)
split_idx = int((1 - cpuadam_cores_perc) * num_cores)
pt_cores = all_cores[:split_idx]
cpuadam_cores = all_cores[split_idx:]
# Set affinity for main process (PyTorch)
current_process.cpu_affinity(pt_cores)
# Set affinity for optimizer process (CPU Adam)
optimizer_process = psutil.Process(self.cpuadam_process.pid)
optimizer_process.cpu_affinity(cpuadam_cores)
logger.debug(f"Set CPU affinity - PyTorch cores: {pt_cores}, "
f"Optimizer cores: {cpuadam_cores}")
except (psutil.AccessDenied, psutil.NoSuchProcess, AttributeError) as e:
logger.debug(f"Could not set CPU affinities for superoffload optimizer process: {e}")
except Exception as e:
logger.warning(f"Unexpected error setting CPU affinity: {e}")
def async_step(self,
param_group_id: int,
sub_group_id: int,
fp32_param: torch.Tensor,
fp32_grad: torch.Tensor,
rollback: bool = False) -> None:
"""
Queue parameter for optimization in the worker process.
"""
if not self.cpuadam_process.is_alive():
raise RuntimeError("Worker process is not alive")
self.param_queue.put({
TaskKeys.PARAM_DATA: fp32_param,
TaskKeys.PARAM_GRAD: fp32_grad,
TaskKeys.PARAM_GROUP_ID: param_group_id,
TaskKeys.SUB_GROUP_ID: sub_group_id,
TaskKeys.ROLLBACK: rollback,
})
def get_result(self, expected_event_type: str = None) -> Optional[Dict[str, Any]]:
"""
Get result from worker process with optional event type validation.
Args:
expected_event_type (str, optional): Expected event type ('adam_step' or 'rollback').
If provided, validates that the result matches.
"""
if self.result_queue.empty():
return None
result = self.result_queue.get()
if "error" in result:
raise RuntimeError(f"Error in worker process: {result['error']}")
# Validate event type if expected_event_type is provided
if expected_event_type is not None:
result_event_type = result.get(ResultKeys.EVENT_TYPE)
if result_event_type != expected_event_type:
raise RuntimeError(f"Event type mismatch: expected '{expected_event_type}', got '{result_event_type}'")
return result
def close(self) -> None:
"""
Shutdown the worker process gracefully.
Sends termination signal to worker and waits for clean shutdown.
If the process doesn't terminate within the timeout, it will be forcefully killed.
"""
if not self.cpuadam_process.is_alive():
logger.debug("Worker process already terminated")
return
# Send termination signal
self.param_queue.put(None)
# Wait for graceful shutdown
self.cpuadam_process.join(timeout=5)
if self.cpuadam_process.is_alive():
logger.warning("Optimizer process did not terminate cleanly within timeout, "
"forcefully terminating")
self.cpuadam_process.terminate()
self.cpuadam_process.join(timeout=2)
# Last resort: kill the process
if self.cpuadam_process.is_alive():
logger.error("Failed to terminate optimizer process, killing it")
self.cpuadam_process.kill()
self.cpuadam_process.join()
logger.debug("SuperOffload CPU optimizer closed successfully")
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/superoffload/superoffload_utils.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/zero/muon/muon_optimizer.py | # Copyright (c) 2025 Peng Du and Zhipeng Wang
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
try:
from deepspeed.runtime.zero.muon.original_muon import MuonWithAuxAdam as BaseMuonWithAuxAdam
from deepspeed.runtime.zero.muon.original_muon import adam_update
except ImportError:
pass
class MuonWithAuxAdam(BaseMuonWithAuxAdam):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if group["use_muon"]:
# we move the muon update part to the deepspeed's optimizer since the parameter here is a flat version
# thus not suitable for muon update
for p in group["params"]:
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(p.grad.reshape(p.shape), alpha=-group["lr"])
else:
for p in group["params"]:
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["exp_avg"] = torch.zeros_like(p)
state["exp_avg_sq"] = torch.zeros_like(p)
state["step"] = 0
state["step"] += 1
update = adam_update(p.grad, state["exp_avg"], state["exp_avg_sq"], state["step"], group["betas"],
group["eps"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update, alpha=-group["lr"])
return loss
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/zero/muon/muon_optimizer.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/zero/muon/original_muon.py | # Copyright (c) 2024 Keller Jordan
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
MIT License
Copyright (c) 2024 Keller Jordan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
import deepspeed.comm as dist # replace torch's distributed package with deepspeed.comm to resolve deepspeed check
from deepspeed.runtime import compiler
@compiler.compile()
def zeropower_via_newtonschulz5(G, steps: int):
"""
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
"""
assert G.ndim >= 2 # batched Muon implementation by @scottjmaddox, and put into practice in the record by @YouJiacheng
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
if G.size(-2) > G.size(-1):
X = X.mT
# Ensure spectral norm is at most 1
X = X / (X.norm(dim=(-2, -1), keepdim=True) + 1e-7)
# Perform the NS iterations
for _ in range(steps):
A = X @ X.mT
B = b * A + c * A @ A # quintic computation strategy adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(-2) > G.size(-1):
X = X.mT
return X
@compiler.compile()
def muon_update(grad, momentum, beta=0.95, ns_steps=5, nesterov=True):
momentum.lerp_(grad, 1 - beta)
update = grad.lerp_(momentum, beta) if nesterov else momentum
if update.ndim == 4: # for the case of conv filters
update = update.view(len(update), -1)
update = zeropower_via_newtonschulz5(update, steps=ns_steps)
update *= max(1, grad.size(-2) / grad.size(-1))**0.5
return update
class Muon(torch.optim.Optimizer):
"""
Muon - MomentUm Orthogonalized by Newton-schulz
https://kellerjordan.github.io/posts/muon/
Muon internally runs standard SGD-momentum, and then performs an orthogonalization post-
processing step, in which each 2D parameter's update is replaced with the nearest orthogonal
matrix. For efficient orthogonalization we use a Newton-Schulz iteration, which has the
advantage that it can be stably run in bfloat16 on the GPU.
Muon should only be used for hidden weight layers. The input embedding, final output layer,
and any internal gains or biases should be optimized using a standard method such as AdamW.
Hidden convolutional weights can be trained using Muon by viewing them as 2D and then
collapsing their last 3 dimensions.
Arguments:
lr: The learning rate, in units of spectral norm per update.
weight_decay: The AdamW-style weight decay.
momentum: The momentum. A value of 0.95 here is usually fine.
"""
def __init__(self, params, lr=0.02, weight_decay=0, momentum=0.95):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
assert isinstance(params, list) and len(params) >= 1 and isinstance(params[0], torch.nn.Parameter)
params = sorted(params, key=lambda x: x.size(), reverse=True)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params = group["params"]
params_pad = params + [torch.empty_like(params[-1])
] * (dist.get_world_size() - len(params) % dist.get_world_size())
for base_i in range(len(params))[::dist.get_world_size()]:
if base_i + dist.get_rank() < len(params):
p = params[base_i + dist.get_rank()]
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["momentum_buffer"] = torch.zeros_like(p)
update = muon_update(p.grad, state["momentum_buffer"], beta=group["momentum"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update.reshape(p.shape), alpha=-group["lr"])
dist.all_gather(params_pad[base_i:base_i + dist.get_world_size()],
params_pad[base_i + dist.get_rank()])
return loss
class SingleDeviceMuon(torch.optim.Optimizer):
"""
Muon variant for usage in non-distributed settings.
"""
def __init__(self, params, lr=0.02, weight_decay=0, momentum=0.95):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["momentum_buffer"] = torch.zeros_like(p)
update = muon_update(p.grad, state["momentum_buffer"], beta=group["momentum"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update.reshape(p.shape), alpha=-group["lr"])
return loss
def adam_update(grad, buf1, buf2, step, betas, eps):
buf1.lerp_(grad, 1 - betas[0])
buf2.lerp_(grad.square(), 1 - betas[1])
buf1c = buf1 / (1 - betas[0]**step)
buf2c = buf2 / (1 - betas[1]**step)
return buf1c / (buf2c.sqrt() + eps)
class MuonWithAuxAdam(torch.optim.Optimizer):
"""
Distributed Muon variant that can be used for all parameters in the network, since it runs an
internal AdamW for the parameters that are not compatible with Muon. The user must manually
specify which parameters shall be optimized with Muon and which with Adam by passing in a
list of param_groups with the `use_muon` flag set.
The point of this class is to allow the user to have a single optimizer in their code, rather
than having both a Muon and an Adam which each need to be stepped.
You can see an example usage below:
https://github.com/KellerJordan/modded-nanogpt/blob/master/records/052525_MuonWithAuxAdamExample/b01550f9-03d8-4a9c-86fe-4ab434f1c5e0.txt#L470
```
hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n]
embed_params = [p for n, p in model.named_parameters() if "embed" in n]
scalar_params = [p for p in model.parameters() if p.ndim < 2]
head_params = [model.lm_head.weight]
from muon import MuonWithAuxAdam
adam_groups = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)]
adam_groups = [dict(**g, betas=(0.8, 0.95), eps=1e-10, use_muon=False) for g in adam_groups]
muon_group = dict(params=hidden_matrix_params, lr=0.05, momentum=0.95, use_muon=True)
param_groups = [*adam_groups, muon_group]
optimizer = MuonWithAuxAdam(param_groups)
```
"""
def __init__(self, param_groups):
for group in param_groups:
assert "use_muon" in group
if group["use_muon"]:
group["params"] = sorted(group["params"], key=lambda x: x.size(), reverse=True)
# defaults
group["lr"] = group.get("lr", 0.02)
group["momentum"] = group.get("momentum", 0.95)
group["weight_decay"] = group.get("weight_decay", 0)
assert set(group.keys()) == set(["params", "lr", "momentum", "weight_decay", "use_muon"])
else:
# defaults
group["lr"] = group.get("lr", 3e-4)
group["betas"] = group.get("betas", (0.9, 0.95))
group["eps"] = group.get("eps", 1e-10)
group["weight_decay"] = group.get("weight_decay", 0)
assert set(group.keys()) == set(["params", "lr", "betas", "eps", "weight_decay", "use_muon"])
super().__init__(param_groups, dict())
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if group["use_muon"]:
params = group["params"]
params_pad = params + [torch.empty_like(params[-1])
] * (dist.get_world_size() - len(params) % dist.get_world_size())
for base_i in range(len(params))[::dist.get_world_size()]:
if base_i + dist.get_rank() < len(params):
p = params[base_i + dist.get_rank()]
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["momentum_buffer"] = torch.zeros_like(p)
update = muon_update(p.grad, state["momentum_buffer"], beta=group["momentum"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update.reshape(p.shape), alpha=-group["lr"])
dist.all_gather(params_pad[base_i:base_i + dist.get_world_size()],
params_pad[base_i + dist.get_rank()])
else:
for p in group["params"]:
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["exp_avg"] = torch.zeros_like(p)
state["exp_avg_sq"] = torch.zeros_like(p)
state["step"] = 0
state["step"] += 1
update = adam_update(p.grad, state["exp_avg"], state["exp_avg_sq"], state["step"], group["betas"],
group["eps"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update, alpha=-group["lr"])
return loss
class SingleDeviceMuonWithAuxAdam(torch.optim.Optimizer):
"""
Non-distributed variant of MuonWithAuxAdam.
"""
def __init__(self, param_groups):
for group in param_groups:
assert "use_muon" in group
if group["use_muon"]:
# defaults
group["lr"] = group.get("lr", 0.02)
group["momentum"] = group.get("momentum", 0.95)
group["weight_decay"] = group.get("weight_decay", 0)
assert set(group.keys()) == set(["params", "lr", "momentum", "weight_decay", "use_muon"])
else:
# defaults
group["lr"] = group.get("lr", 3e-4)
group["betas"] = group.get("betas", (0.9, 0.95))
group["eps"] = group.get("eps", 1e-10)
group["weight_decay"] = group.get("weight_decay", 0)
assert set(group.keys()) == set(["params", "lr", "betas", "eps", "weight_decay", "use_muon"])
super().__init__(param_groups, dict())
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if group["use_muon"]:
for p in group["params"]:
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["momentum_buffer"] = torch.zeros_like(p)
update = muon_update(p.grad, state["momentum_buffer"], beta=group["momentum"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update.reshape(p.shape), alpha=-group["lr"])
else:
for p in group["params"]:
if p.grad is None:
# continue
p.grad = torch.zeros_like(p) # Force synchronization
state = self.state[p]
if len(state) == 0:
state["exp_avg"] = torch.zeros_like(p)
state["exp_avg_sq"] = torch.zeros_like(p)
state["step"] = 0
state["step"] += 1
update = adam_update(p.grad, state["exp_avg"], state["exp_avg_sq"], state["step"], group["betas"],
group["eps"])
p.mul_(1 - group["lr"] * group["weight_decay"])
p.add_(update, alpha=-group["lr"])
return loss
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/zero/muon/original_muon.py",
"license": "Apache License 2.0",
"lines": 275,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:tests/unit/ops/muon/test_muon.py | # Copyright (c) 2025 Peng Du and Zhipeng Wang
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
import torch
import pytest
from unit.common import DistributedTest
from unit.simple_model import SimpleModel
from deepspeed.accelerator import get_accelerator
if torch.half not in get_accelerator().supported_dtypes():
pytest.skip(f"fp16 not supported, valid dtype: {get_accelerator().supported_dtypes()}", allow_module_level=True)
# 'optimizer_type, zero_stage, lr, hidden_dim, nlayer'
muon_configs = []
for optimizer_name in ['muon', 'adam']:
for stage in [1, 2]:
for lr in [0.01, 0.05]:
for model_dim in [32, 128]:
for nlayer in [5, 10]:
muon_configs.append([optimizer_name, stage, lr, model_dim, nlayer])
@pytest.mark.parametrize('optimizer_type, zero_stage, lr, hidden_dim, nlayer', muon_configs)
class TestMuonConfigs(DistributedTest):
def test(self, optimizer_type, zero_stage, lr, hidden_dim, nlayer):
optimizer_params = {"lr": lr}
batch_size = 8
config_dict = {
"train_batch_size": batch_size,
"optimizer": {
"type": optimizer_type,
"params": optimizer_params
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
}
}
# Perform a few training steps to ensure the optimizer works correctly
model = SimpleModel(hidden_dim=hidden_dim, nlayers=nlayer)
initial_params = [p.clone().cpu() for p in model.parameters()]
engine, optimizer, _, _ = deepspeed.initialize(
config=config_dict,
model=model,
model_parameters=model.parameters(),
dist_init_required=False,
)
assert optimizer_type in optimizer.optimizer.__class__.__name__.lower(
), f"Expected optimizer type {optimizer_type}, got {optimizer.optimizer.__class__.__name__}"
steps = 5
for _ in range(steps):
# Random inputs: (batch_size, hidden_dim)
x = torch.randn(batch_size, hidden_dim, device=engine.device, dtype=torch.half)
# Random class labels: (batch_size,)
y = torch.randint(0, hidden_dim, (batch_size, ), device=engine.device)
# Forward + loss
loss = engine(x, y)
# Backward
engine.backward(loss)
engine.step()
# Verify that parameters have been updated
after_training = [p.clone().cpu() for p in model.parameters()]
for initial, final in zip(initial_params, after_training):
assert not torch.equal(initial.cpu(), final.cpu()), "Parameters should have been updated during training"
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/ops/muon/test_muon.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:deepspeed/ops/adam/zenflow_cpu_adam.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.ops.adam import DeepSpeedCPUAdam
import torch
class ZenFlowCPUAdam(DeepSpeedCPUAdam):
def __init__(self, *args, overlap_step=False, **kwargs):
super(ZenFlowCPUAdam, self).__init__(*args, **kwargs)
self.overlap_step = overlap_step
if not self.overlap_step:
print("ZenFlowCPUAdam initialized with normal step.")
self.step = self._sequential_step
else:
print("ZenFlowCPUAdam initialized with overlap step.")
self.step = self._parallel_step
@torch.no_grad()
def _sequential_step(self, step_id, closure=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
if p.grad is None:
continue
assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
# gradient momentums
state['exp_avg'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
# gradient variances
state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=state_dtype, device=device)
#memory_format=torch.preserve_format)
state['step'] = step_id
beta1, beta2 = group['betas']
self.ds_opt_adam.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'],
group['weight_decay'], group['bias_correction'], p.data, p.grad.data,
state['exp_avg'], state['exp_avg_sq'])
return loss
@torch.no_grad()
def _parallel_step(self, step_id, now_state, group_info, closure=None):
"""Update the model parameters.
.. note::
This method will be called internally by ZeRO-Offload. DeepSpeed
users should still use ``engine.step()`` as shown in the
`Getting Started
<https://www.deepspeed.ai/getting-started/#training>`_ guide.
Args:
closure (callable, optional): closure to compute the loss.
Defaults to ``None``.
Returns:
loss: if ``closure`` is provided. Otherwise ``None``.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# intended device for step
device = torch.device('cpu')
stale_param = None
for group_id, group in enumerate(self.param_groups):
for param_id, p in enumerate(group['params']):
assert p.data.is_shared(), "param.data must be in shared memory"
if not hasattr(p, 'overlap_grad'):
continue
assert p.device == device, f"CPUAdam param is on {p.device} and must be 'cpu', make " \
"sure you enabled 'offload_optimizer': 'cpu' in your ZeRO config."
state = self.state[p]
# State initialization
if len(state) == 0:
#print(f'group {group_id} param {param_id} = {p.numel()}')
# print("creating", flush=True)
state['step'] = 0
#use full precision by default unless self.fp32_optimizer_states is off
state_dtype = torch.float if self.fp32_optimizer_states else p.dtype
exp_avg = torch.zeros_like(p.data, dtype=state_dtype, device=device)
exp_avg_sq = torch.zeros_like(p.data, dtype=state_dtype, device=device)
state['exp_avg'] = [exp_avg, exp_avg.clone()]
state['exp_avg_sq'] = [exp_avg_sq, exp_avg_sq.clone()]
state['step'] = step_id
beta1, beta2 = group_info['betas']
self.ds_opt_adam.adam_update(self.opt_id, state['step'], group_info['lr'], beta1, beta2,
group_info['eps'], group_info['weight_decay'],
group_info['bias_correction'], p.data, p.overlap_grad[now_state].data,
state['exp_avg'][now_state], state['exp_avg_sq'][now_state])
p.stale_param.data.copy_(p.data.clone())
return loss
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/ops/adam/zenflow_cpu_adam.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/ops/adam/zenflow_torch_adam.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from typing import cast, List, Optional, Tuple, Union
from torch import Tensor
from deepspeed.utils.torch import required_torch_version
# Check if we have PyTorch >= 2.0 for ZenFlow features
_ZENFLOW_AVAILABLE = required_torch_version(min_version=2.1)
if _ZENFLOW_AVAILABLE:
try:
from torch.optim.optimizer import (
_default_to_fused_or_foreach,
_disable_dynamo_if_unsupported,
_get_capturable_supported_devices,
_get_value,
_stack_if_compiling,
_view_as_real,
DeviceDict,
Optimizer,
)
except ImportError as e:
# print(f"[WARNING] ZenFlow disabled: torch internal optimizer symbols could not be imported: {e}")
_ZENFLOW_AVAILABLE = False
if not _ZENFLOW_AVAILABLE:
# safe disable dynamo if unsupported
def _disable_dynamo_if_unsupported(**kwargs): # noqa
def wrapper(fn):
return fn
return wrapper
_ZENFLOW_AVAILABLE = False
class ZenFlowSelectiveAdamW(torch.optim.AdamW):
def __init__(self, *args, offload=False, bucket_size=5e8, **kwargs):
if not _ZENFLOW_AVAILABLE:
raise RuntimeError("ZenFlow features are not available with PyTorch < 2.0. "
"Please upgrade to PyTorch 2.0+ to use ZenFlow, or omit 'zenflow' "
"from your DeepSpeed configuration to use the default ZeRO-Offload optimizer.")
super(ZenFlowSelectiveAdamW, self).__init__(*args, **kwargs)
self.offload = offload
if offload:
self.step = self._step_with_offload
self.bucket_size = bucket_size
else:
self.step = self._step_without_offload
def temp_copy_param(self, group_to_paramlist):
for group_id, params in group_to_paramlist.items():
for param in params:
if hasattr(param, "selected_grad"):
temp_selected_param = param.data[:, param.selected_indices].clone().detach() if len(
param.shape) != 1 else param.data.clone().detach()
if self.offload:
param.temp_selected_param = temp_selected_param.cpu()
else:
param.temp_selected_param = temp_selected_param
def copy_mv_from_cpu(self, params):
for param in params:
param.exp_avg = param.exp_avg_cpu_data.to(param.device, non_blocking=True)
param.exp_avg_sq = param.exp_avg_sq_cpu_data.to(param.device, non_blocking=True)
def copy_mv_to_cpu(self, params):
for param in params:
param.exp_avg_cpu_data.copy_(param.exp_avg.data, non_blocking=True)
param.exp_avg_sq_cpu_data.copy_(param.exp_avg_sq.data, non_blocking=True)
param.exp_avg = None
param.exp_avg_sq = None
def clear_selected_mv(self):
print("Zenflow: clearing selective optimizer states...")
for group in self.param_groups:
for param in group['params']:
state = self.state.setdefault(param, {})
if len(state) == 0:
continue
if self.offload:
param.exp_avg_cpu_data.zero_()
param.exp_avg_sq_cpu_data.zero_()
else:
state["exp_avg"].zero_()
state["exp_avg_sq"].zero_()
@torch.no_grad()
def _step_without_offload(self):
for group in self.param_groups:
params_with_grad: List[Tensor] = []
grads: List[Tensor] = []
exp_avgs: List[Tensor] = []
exp_avg_sqs: List[Tensor] = []
max_exp_avg_sqs: List[Tensor] = []
state_steps: List[Tensor] = []
amsgrad: bool = group["amsgrad"]
beta1, beta2 = cast(Tuple[float, float], group["betas"])
for param in group["params"]:
if hasattr(param, "selected_grad"):
selected_param = param.data[:, param.selected_indices] if len(param.shape) != 1 else param.data
if hasattr(param, 'temp_selected_param') and param.temp_selected_param is not None:
selected_param.copy_(param.temp_selected_param)
state = self.state.setdefault(param, {})
if len(state) == 0:
state["step"] = torch.zeros((), dtype=param.dtype, device=selected_param.device)
state["exp_avg"] = torch.zeros_like(selected_param)
state["exp_avg_sq"] = torch.zeros_like(selected_param)
if amsgrad:
state["max_exp_avg_sq"] = torch.zeros_like(selected_param)
params_with_grad.append(selected_param)
grads.append(param.selected_grad)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
adamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
maximize=False,
)
for i, param in enumerate(group["params"]):
if hasattr(param, "selected_grad"):
if len(param.shape) != 1:
param.data[:, param.selected_indices] = params_with_grad[i]
for param in group["params"]:
if hasattr(param, "temp_selected_param"):
param.temp_selected_param = None
param.selected_grad = None
@torch.no_grad()
def _step_with_offload(self):
"""
Performs parameter updates in offload mode.
In this mode, group_step() calls adamw() on each pre-partitioned param bucket,
so memory can be released after each bucket update to reduce GPU overhead.
Without offload, adamw() is called directly for speed.
"""
for group_id, group in enumerate(self.param_groups):
params = group["params"]
bucket = []
bucket_numel = 0
def flush_bucket():
if not bucket:
return
for param in bucket:
if hasattr(param, "temp_selected_param") and param.temp_selected_param is not None:
selected_param = param.data[:, param.selected_indices] if len(param.shape) != 1 else param.data
temp_selected_param = param.temp_selected_param.to(param.device, non_blocking=True)
selected_param.copy_(temp_selected_param)
param.temp_selected_param = None
self.group_step({group_id: bucket})
bucket.clear()
for param in params:
if hasattr(param, "selected_grad"):
bucket.append(param)
bucket_numel += param.numel()
if bucket_numel >= self.bucket_size:
flush_bucket()
bucket_numel = 0
flush_bucket()
@torch.no_grad()
def group_step(self, group_to_paramlist):
for group_id, params in group_to_paramlist.items():
group = self.param_groups[group_id]
if self.offload:
self.copy_mv_from_cpu(params)
params_with_grad: List[Tensor] = []
grads: List[Tensor] = []
exp_avgs: List[Tensor] = []
exp_avg_sqs: List[Tensor] = []
max_exp_avg_sqs: List[Tensor] = []
state_steps: List[Tensor] = []
amsgrad: bool = group["amsgrad"]
beta1, beta2 = cast(Tuple[float, float], group["betas"])
for param in params:
if hasattr(param, "selected_grad"):
is_2d = (len(param.shape) != 1)
selected_param = param.data[:, param.selected_indices] if is_2d else param.data
state = self.state.setdefault(param, {})
if len(state) == 0:
state["step"] = torch.zeros((), dtype=param.dtype, device=selected_param.device)
if amsgrad:
state["max_exp_avg_sq"] = torch.zeros_like(selected_param)
if not self.offload:
state["exp_avg"] = torch.zeros_like(selected_param)
state["exp_avg_sq"] = torch.zeros_like(selected_param)
if self.offload:
exp_avg_t = param.exp_avg.view_as(selected_param)
exp_avg_sq_t = param.exp_avg_sq.view_as(selected_param)
else:
exp_avg_t = state["exp_avg"]
exp_avg_sq_t = state["exp_avg_sq"]
params_with_grad.append(selected_param)
grads.append(param.selected_grad)
exp_avgs.append(exp_avg_t)
exp_avg_sqs.append(exp_avg_sq_t)
if amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
adamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
maximize=False,
)
for i, param in enumerate(params):
if hasattr(param, "selected_grad") and len(param.shape) != 1:
param.data[:, param.selected_indices] = params_with_grad[i]
if self.offload:
self.copy_mv_to_cpu(params)
for param in params:
param.selected_grad = None
class ZenFlowSelectiveAdamW_stage3(torch.optim.AdamW):
def __init__(self, *args, offload=False, bucket_size=5e8, **kwargs):
super(ZenFlowSelectiveAdamW_stage3, self).__init__(*args, **kwargs)
self.offload = offload
if offload:
self.step = self._step_with_offload
self.bucket_size = bucket_size
else:
self.step = self._step_without_offload
@torch.no_grad()
def temp_copy_param(self, paramlist):
for param in paramlist:
if hasattr(param, "selected_grad"):
num_column, num_row = param.ds_shape if len(param.ds_shape) != 1 else (param.ds_shape[0], 1)
if num_row != 1:
param_2d = param.ds_tensor.data.narrow(0, param.complete_column_offset, param.complete_numel).view(
param.complete_numel // num_row, num_row)
temp_selected_param = param_2d[param.selected_indices, :].clone().detach()
else:
temp_selected_param = param.ds_tensor.data.clone().detach()
if self.offload:
param.temp_selected_param = temp_selected_param.cpu()
else:
param.temp_selected_param = temp_selected_param
def clear_selected_mv(self):
print("Zenflow: clearing selective optimizer states...")
for group in self.param_groups:
for param in group['params']:
state = self.state.setdefault(param, {})
if len(state) == 0:
continue
if self.offload:
param.exp_avg_cpu_data.zero_()
param.exp_avg_sq_cpu_data.zero_()
else:
state["exp_avg"].zero_()
state["exp_avg_sq"].zero_()
@torch.no_grad()
def _step_without_offload(self):
for group in self.param_groups:
params_with_grad: List[Tensor] = []
grads: List[Tensor] = []
exp_avgs: List[Tensor] = []
exp_avg_sqs: List[Tensor] = []
max_exp_avg_sqs: List[Tensor] = []
state_steps: List[Tensor] = []
amsgrad: bool = group["amsgrad"]
beta1, beta2 = cast(Tuple[float, float], group["betas"])
for param in group["params"]:
if hasattr(param, "selected_grad"):
num_column, num_row = param.ds_shape if len(param.ds_shape) != 1 else (param.ds_shape[0], 1)
if num_row != 1:
param_2d = param.ds_tensor.data.narrow(0, param.complete_column_offset,
param.complete_numel).view(
param.complete_numel // num_row, num_row)
selected_param = param_2d[param.selected_indices, :]
else:
selected_param = param.ds_tensor.data
if hasattr(param, 'temp_selected_param') and param.temp_selected_param is not None:
selected_param.copy_(param.temp_selected_param)
state = self.state.setdefault(param, {})
if len(state) == 0:
state["step"] = torch.zeros((), dtype=param.dtype, device=selected_param.device)
state["exp_avg"] = torch.zeros_like(selected_param)
state["exp_avg_sq"] = torch.zeros_like(selected_param)
if amsgrad:
state["max_exp_avg_sq"] = torch.zeros_like(selected_param)
params_with_grad.append(selected_param)
grads.append(param.selected_grad)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
adamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
maximize=False,
)
for i, param in enumerate(group["params"]):
if hasattr(param, "selected_grad"):
num_column, num_row = param.ds_shape if len(param.ds_shape) != 1 else (param.ds_shape[0], 1)
if num_row != 1:
param_2d = param.ds_tensor.data.narrow(0, param.complete_column_offset,
param.complete_numel).view(
param.complete_numel // num_row, num_row)
param_2d[param.selected_indices, :] = params_with_grad[i]
for param in group["params"]:
if hasattr(param, "temp_selected_param"):
param.temp_selected_param = None
param.selected_grad = None
def copy_mv_from_cpu(self, params):
for param in params:
param.exp_avg = param.exp_avg_cpu_data.to(param.device, non_blocking=True)
param.exp_avg_sq = param.exp_avg_sq_cpu_data.to(param.device, non_blocking=True)
def copy_mv_to_cpu(self, params):
for param in params:
param.exp_avg_cpu_data.copy_(param.exp_avg.data, non_blocking=True)
param.exp_avg_sq_cpu_data.copy_(param.exp_avg_sq.data, non_blocking=True)
param.exp_avg = None
param.exp_avg_sq = None
@torch.no_grad()
def group_step(self, paramlist):
group_to_paramlist = {}
for param in paramlist:
group_id = param.group_id
if group_id not in group_to_paramlist:
group_to_paramlist[group_id] = []
group_to_paramlist[group_id].append(param)
for group_id in sorted(group_to_paramlist.keys()):
params = group_to_paramlist[group_id]
group = self.param_groups[group_id]
if self.offload:
self.copy_mv_from_cpu(params)
params_with_grad: List[Tensor] = []
grads: List[Tensor] = []
exp_avgs: List[Tensor] = []
exp_avg_sqs: List[Tensor] = []
max_exp_avg_sqs: List[Tensor] = []
state_steps: List[Tensor] = []
amsgrad: bool = group["amsgrad"]
beta1, beta2 = cast(Tuple[float, float], group["betas"])
for param in params:
if hasattr(param, "selected_grad"):
num_column, num_row = param.ds_shape if len(param.ds_shape) != 1 else (param.ds_shape[0], 1)
if num_row != 1:
param_2d = param.ds_tensor.data.narrow(0, param.complete_column_offset,
param.complete_numel).view(
param.complete_numel // num_row, num_row)
selected_param = param_2d[param.selected_indices, :]
else:
selected_param = param.ds_tensor.data
state = self.state.setdefault(param, {})
if len(state) == 0:
state["step"] = torch.zeros((), dtype=param.dtype, device=selected_param.device)
if amsgrad:
state["max_exp_avg_sq"] = torch.zeros_like(selected_param)
if not self.offload:
state["exp_avg"] = torch.zeros_like(selected_param)
state["exp_avg_sq"] = torch.zeros_like(selected_param)
if self.offload:
exp_avg_t = param.exp_avg.view_as(selected_param)
exp_avg_sq_t = param.exp_avg_sq.view_as(selected_param)
else:
exp_avg_t = state["exp_avg"]
exp_avg_sq_t = state["exp_avg_sq"]
params_with_grad.append(selected_param)
grads.append(param.selected_grad)
exp_avgs.append(exp_avg_t)
exp_avg_sqs.append(exp_avg_sq_t)
if amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
adamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
eps=group["eps"],
maximize=False,
)
for i, param in enumerate(params):
if hasattr(param, "selected_grad"):
num_column, num_row = param.ds_shape if len(param.ds_shape) != 1 else (param.ds_shape[0], 1)
if num_row != 1:
param_2d = param.ds_tensor.data.narrow(0, param.complete_column_offset,
param.complete_numel).view(
param.complete_numel // num_row, num_row)
param_2d[param.selected_indices, :] = params_with_grad[i]
if self.offload:
self.copy_mv_to_cpu(params)
for param in params:
param.selected_grad = None
@torch.no_grad()
def _step_with_offload(self):
"""
Performs parameter updates in offload mode.
In this mode, group_step() calls adamw() on each pre-partitioned param bucket,
so memory can be released after each bucket update to reduce GPU overhead.
Without offload, adamw() is called directly for speed.
"""
for group_id, group in enumerate(self.param_groups):
params = group["params"]
bucket = []
bucket_numel = 0
def flush_bucket():
if not bucket:
return
for param in bucket:
if hasattr(param, "temp_selected_param") and param.temp_selected_param is not None:
temp_selected_param = param.temp_selected_param.to(param.device, non_blocking=True)
num_column, num_row = param.ds_shape if len(param.ds_shape) != 1 else (param.ds_shape[0], 1)
if num_row != 1:
param_2d = param.ds_tensor.data.narrow(0, param.complete_column_offset,
param.complete_numel).view(
param.complete_numel // num_row, num_row)
param_2d[param.selected_indices, :] = temp_selected_param
else:
param.ds_tensor.data.copy_(temp_selected_param)
param.temp_selected_param = None
self.group_step(bucket)
bucket.clear()
for param in params:
if hasattr(param, "selected_grad"):
bucket.append(param)
bucket_numel += param.numel()
if bucket_numel >= self.bucket_size:
flush_bucket()
bucket_numel = 0
flush_bucket()
def _single_tensor_adamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: Union[Tensor, float],
weight_decay: float,
eps: float,
maximize: bool,
capturable: bool,
differentiable: bool,
has_complex: bool,
):
assert grad_scale is None and found_inf is None
if torch.jit.is_scripting():
# this assert is due to JIT being dumb and not realizing that the ops below
# have overloads to handle both float and Tensor lrs, so we just assert it's
# a float since most people using JIT are using floats
assert isinstance(lr, float)
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices()
assert (
param.device.type == step_t.device.type and param.device.type in capturable_supported_devices
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
if torch.is_complex(param):
grad = torch.view_as_real(grad)
exp_avg = torch.view_as_real(exp_avg)
exp_avg_sq = torch.view_as_real(exp_avg_sq)
if amsgrad:
max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i])
param = torch.view_as_real(param)
# update step
step_t += 1
# Perform stepweight decay
param.mul_(1 - lr * weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.lerp_(grad, 1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if capturable or differentiable:
step = step_t
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
step_size_neg = step_size.neg()
bias_correction2_sqrt = bias_correction2.sqrt()
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
if differentiable:
max_exp_avg_sq = max_exp_avg_sqs[i].clone()
else:
max_exp_avg_sq = max_exp_avg_sqs[i]
max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq))
# Uses the max. for normalizing running avg. of gradient
# Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write
# (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor)
denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
else:
denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
param.addcdiv_(exp_avg, denom)
else:
step = _get_value(step_t)
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
bias_correction2_sqrt = bias_correction2**0.5
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
param.addcdiv_(exp_avg, denom, value=-step_size)
# Lastly, switch back to complex view
if amsgrad and torch.is_complex(params[i]):
max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i])
def _multi_tensor_adamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: Union[Tensor, float],
weight_decay: float,
eps: float,
maximize: bool,
capturable: bool,
differentiable: bool,
has_complex: bool,
):
if len(params) == 0:
return
if isinstance(lr, Tensor) and not capturable:
raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True")
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if not torch._utils.is_compiling() and capturable:
capturable_supported_devices = _get_capturable_supported_devices(supports_xla=False)
assert all(
p.device.type == step.device.type and p.device.type in capturable_supported_devices
for p, step in zip(params, state_steps)
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
assert not differentiable, "_foreach ops don't support autograd"
assert grad_scale is None and found_inf is None
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] # type: ignore[list-item]
)
for (
device_params_,
device_grads_,
device_exp_avgs_,
device_exp_avg_sqs_,
device_max_exp_avg_sqs_,
device_state_steps_,
), _ in grouped_tensors.values():
device_params = cast(List[Tensor], device_params_)
device_grads = cast(List[Tensor], device_grads_)
device_exp_avgs = cast(List[Tensor], device_exp_avgs_)
device_exp_avg_sqs = cast(List[Tensor], device_exp_avg_sqs_)
device_state_steps = cast(List[Tensor], device_state_steps_)
if has_complex:
if amsgrad:
device_max_exp_avg_sqs = cast(List[Tensor], device_max_exp_avg_sqs_)
_view_as_real(
device_params,
device_grads,
device_exp_avgs,
device_exp_avg_sqs,
device_max_exp_avg_sqs,
)
else:
_view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs)
if maximize:
device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not torch._utils.is_compiling() and device_state_steps[0].is_cpu:
torch._foreach_add_(device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0)
else:
torch._foreach_add_(device_state_steps, 1)
# Perform stepweight decay
if weight_decay != 0:
torch._foreach_mul_(device_params, 1 - lr * weight_decay)
# Decay the first and second moment running average coefficient
torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1)
torch._foreach_mul_(device_exp_avg_sqs, beta2)
torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2)
# Delete the local intermediate since it won't be used anymore to save on peak memory
del device_grads
bias_correction1: Union[Tuple[Tensor, ...], List[Tensor]]
bias_correction2: Union[Tuple[Tensor, ...], List[Tensor]]
bias_correction2_sqrt: Union[Tuple[Tensor, ...], List[Tensor]]
if capturable:
bias_correction1 = torch._foreach_pow(beta1, device_state_steps)
bias_correction2 = torch._foreach_pow(beta2, device_state_steps)
# foreach_sub doesn't allow a scalar as the first arg
torch._foreach_sub_(bias_correction1, 1)
torch._foreach_sub_(bias_correction2, 1)
# we do not negate bias_correction1 as it'll need to be negated later anyway
torch._foreach_neg_(bias_correction2)
# foreach_div doesn't allow a scalar as the first arg
torch._foreach_div_(bias_correction1, lr)
torch._foreach_reciprocal_(bias_correction1)
torch._foreach_sqrt_(bias_correction2)
# Re-assign for clarity as we maintain minimal intermediates: we'll have
# step_size = - lr / (1 - beta1 ^ t) where t = num_steps
# bias_correction2_sqrt = sqrt(1 - beta2 ^ t)
step_size = bias_correction1
bias_correction2_sqrt = bias_correction2
if amsgrad:
device_max_exp_avg_sqs = cast(List[Tensor], device_max_exp_avg_sqs_)
# Maintains the maximum of all 2nd moment running avg. till now
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs)
# Use the max. for normalizing running avg. of gradient
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
else:
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
torch._foreach_add_(exp_avg_sq_sqrt, eps)
torch._foreach_div_(exp_avg_sq_sqrt, step_size)
# at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt)
else:
bias_correction1 = [1 - beta1**_get_value(step) for step in device_state_steps]
bias_correction2 = [1 - beta2**_get_value(step) for step in device_state_steps]
step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1])
bias_correction2_sqrt = [
bc**0.5 for bc in bias_correction2 # type: ignore[arg-type]
]
if amsgrad:
device_max_exp_avg_sqs = cast(List[Tensor], device_max_exp_avg_sqs_)
# Maintains the maximum of all 2nd moment running avg. till now
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs)
# Use the max. for normalizing running avg. of gradient
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
else:
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
torch._foreach_add_(exp_avg_sq_sqrt, eps)
torch._foreach_addcdiv_(
device_params,
device_exp_avgs,
exp_avg_sq_sqrt,
step_size, # type: ignore[arg-type]
)
def _fused_adamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: Union[Tensor, float],
weight_decay: float,
eps: float,
maximize: bool,
capturable: bool, # Needed for consistency.
differentiable: bool,
has_complex: bool, # Needed for consistency.
) -> None:
if not params:
return
if differentiable:
raise RuntimeError("Adam with fused=True does not support differentiable=True")
grad_scale_dict: DeviceDict = ({grad_scale.device: grad_scale} if grad_scale is not None else {})
found_inf_dict: DeviceDict = ({found_inf.device: found_inf} if found_inf is not None else {})
# We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer
# treating it as a scalar.
lr_dict: Optional[DeviceDict] = ({lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None)
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps] # type: ignore[list-item]
)
for (device, _), (
(
device_params_,
device_grads_,
device_exp_avgs_,
device_exp_avg_sqs_,
device_max_exp_avg_sqs,
device_state_steps_,
),
_,
) in grouped_tensors.items():
device_params = cast(List[Tensor], device_params_)
device_grads = cast(List[Tensor], device_grads_)
device_exp_avgs = cast(List[Tensor], device_exp_avgs_)
device_exp_avg_sqs = cast(List[Tensor], device_exp_avg_sqs_)
device_state_steps = cast(List[Tensor], device_state_steps_)
if device.type == "mps": # type: ignore[union-attr]
assert found_inf is None and grad_scale is None
device_grad_scale, device_found_inf = None, None
if grad_scale is not None:
device_grad_scale = grad_scale_dict.setdefault(device, grad_scale.to(device, non_blocking=True))
if found_inf is not None:
device_found_inf = found_inf_dict.setdefault(device, found_inf.to(device, non_blocking=True))
if lr_dict is not None and device not in lr_dict:
lr = lr_dict.setdefault(
device,
lr.to(device=device, non_blocking=True) # type: ignore[union-attr]
)
torch._foreach_add_(device_state_steps, 1)
torch._fused_adamw_(
device_params,
device_grads,
device_exp_avgs,
device_exp_avg_sqs,
device_max_exp_avg_sqs, # type: ignore[arg-type]
device_state_steps,
amsgrad=amsgrad,
lr=lr, # type: ignore[arg-type]
beta1=beta1,
beta2=beta2,
weight_decay=weight_decay,
eps=eps,
maximize=maximize,
grad_scale=device_grad_scale,
found_inf=device_found_inf,
)
if device_found_inf is not None:
torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps))
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adamw)
def adamw(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: Optional[bool] = None,
capturable: bool = False,
differentiable: bool = False,
fused: Optional[bool] = None,
grad_scale: Optional[Tensor] = None,
found_inf: Optional[Tensor] = None,
has_complex: bool = False,
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: Union[float, Tensor],
weight_decay: float,
eps: float,
maximize: bool,
):
r"""Functional API that performs AdamW algorithm computation.
See :class:`~torch.optim.AdamW` for details.
"""
if not _ZENFLOW_AVAILABLE:
raise RuntimeError("ZenFlow adamw function is not available with PyTorch < 2.0. "
"Please upgrade to PyTorch 2.0+ to use ZenFlow, or omit 'zenflow' "
"from your DeepSpeed configuration to use the default ZeRO-Offload optimizer.")
if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
# Respect when the user inputs False/True for foreach or fused. We only want to change
# the default when neither have been user-specified. Note that we default to foreach
# and pass False to use_fused. This is not a mistake--we want to give the fused impl
# bake-in time before making it the default, even if it is typically faster.
if fused is None and foreach is None:
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
# Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False.
if foreach and isinstance(lr, Tensor) and not capturable:
foreach = False
if fused is None:
fused = False
if foreach is None:
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if fused and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with fused optimizers")
if fused and not torch.jit.is_scripting():
func = _fused_adamw
elif foreach and not torch.jit.is_scripting():
func = _multi_tensor_adamw
else:
func = _single_tensor_adamw
func(
params,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=amsgrad,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
eps=eps,
maximize=maximize,
capturable=capturable,
differentiable=differentiable,
grad_scale=grad_scale,
found_inf=found_inf,
has_complex=has_complex,
)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/ops/adam/zenflow_torch_adam.py",
"license": "Apache License 2.0",
"lines": 835,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/zenflow/engine.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed import comm as dist
from typing import TYPE_CHECKING
from deepspeed.utils.torch import required_torch_version
if TYPE_CHECKING:
from deepspeed.runtime.engine import DeepSpeedEngine
def configure_zenflow(engine: "DeepSpeedEngine") -> None:
"""Configure ZenFlow-related scheduling parameters on the engine.
This function initializes ZenFlow flags (e.g., `zenflow`, `auto_update`,
`select_interval`, etc.) based on the `zenflow_config` object. It handles
selection/update strategy resolution and performs basic validation.
Args:
engine (DeepSpeedEngine): The DeepSpeed engine to configure.
"""
zenflow_config = engine.zenflow_config()
if zenflow_config == None:
engine.zenflow = False
return
if not required_torch_version(min_version=2.1):
raise ValueError(
"Please use PyTorch 2.1 or later to enable ZenFlow. Alternatively, omit `zenflow` config in the config file to fall back to the default ZeRO-Offload optimizer."
)
engine.zenflow = True
select_strategy = zenflow_config.select_strategy
if select_strategy == 'auto':
select_strategy = "epoch"
if isinstance(zenflow_config.select_interval, int):
raise Warning(
"If use auto select strategy, select_interval will be set to 1 and select_strategy will be set to epoch, thus select_interval would be overwritten."
)
engine.select_interval = 1
else:
if isinstance(zenflow_config.select_interval, str):
raise ValueError("If don't use auto select strategy, select_interval must be a number.")
engine.select_interval = zenflow_config.select_interval
if isinstance(zenflow_config.update_interval, str):
engine.auto_update = True
engine.update_interval = 0
else:
engine.auto_update = False
engine.update_interval = int(zenflow_config.update_interval)
if select_strategy == 'epoch':
if engine.training_dataloader is not None:
zenflow_config.steps_per_epoch = len(engine.training_dataloader)
engine.select_interval = engine.select_interval * len(engine.training_dataloader)
else:
engine.select_interval = 0
if not engine.auto_update and engine.select_interval != 0 and engine.select_interval < engine.update_interval:
raise ValueError("Select interval must be greater or equal to update interval")
engine.overlap_step = zenflow_config.overlap_step
engine.full_warm_up_rounds = zenflow_config.full_warm_up_rounds
engine._config.gradient_accumulation_steps = engine.update_interval
def is_zenflow_update_boundary(engine: "DeepSpeedEngine"):
"""Determine whether the current step is an update boundary for ZenFlow.
This function checks whether the engine should trigger an optimizer update
based on gradient accumulation, warmup phase, and selection/update intervals.
Returns:
bool: True if this step is an update boundary, otherwise False.
"""
if engine.auto_update:
if (engine.micro_steps + 1) <= engine.full_warm_up_rounds:
return True
return (engine.optimizer.zenflow_need_update[engine.optimizer.zenflow_state ^ 1]
or (engine.select_interval != 0 and (engine.micro_steps + 1) % engine.select_interval == 0))
else:
if (engine.micro_steps + 1) < engine.full_warm_up_rounds:
return True
return ((engine.micro_steps + 1 - engine.full_warm_up_rounds) % engine.gradient_accumulation_steps() == 0
or (engine.select_interval != 0 and (engine.micro_steps + 1) % engine.select_interval == 0))
def zenflow_step(engine: "DeepSpeedEngine", lr_kwargs):
"""Main step logic for ZenFlow update scheduling.
This function performs either:
- a selective optimizer update (if at accumulation boundary),
- or just a learning rate scheduler step and logging (if at accumulation iteration).
Args:
engine (DeepSpeedEngine): The engine managing training state.
lr_kwargs (dict): Optional kwargs passed to the LR scheduler step.
"""
if engine.is_gradient_accumulation_boundary():
if engine.micro_steps + 1 >= engine.full_warm_up_rounds:
_take_selective_parameter_step(engine)
if engine.auto_update:
if dist.get_rank() == 0:
print(f"Zenflow: This is an update iter. update_interval: {engine.update_interval}")
engine.update_interval = 0
else:
_take_lr_scheduler_step(engine, lr_kwargs)
_log_selective_optimizer_timers(engine)
def _take_selective_parameter_step(engine: "DeepSpeedEngine"):
"""
Trigger a step on the selective optimizer.
"""
engine.optimizer.selective_optimizer_step()
def _take_lr_scheduler_step(engine: "DeepSpeedEngine", lr_kwargs):
"""
Take a step on the learning rate scheduler.
"""
if engine.lr_scheduler is not None:
try:
engine.lr_scheduler.step(**(lr_kwargs or {}))
except TypeError:
# XXX Hack to work with Megatron 2.0 and DeepSpeed pipelines.
# We don't currently have a way to specify lr_kwargs from
# pipe_engine.train_batch()
engine.lr_scheduler.step(engine.train_batch_size())
def _log_selective_optimizer_timers(engine):
"""
Log the selective optimizer timers.
"""
engine.optimizer.log_selective_optimizer_timers()
def sync_zenflow_optimizer_lr(engine: "DeepSpeedEngine"):
"""
Synchronize the learning rate of the selective optimizer.
If auto_update is enabled, increment the update interval.
"""
engine.optimizer._sync_selective_optimizer_lr()
if engine.auto_update:
engine.update_interval += 1
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/zenflow/engine.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/zenflow/zenflow_config.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pydantic import Field, model_validator
from typing import Optional, Union
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
class ZenFlowConfig(DeepSpeedConfigModel):
"""Configuration options for ZenFlow optimization module."""
topk_ratio: float = Field(0.1, ge=0.0, le=1.0)
"""Ratio of top-k important gradient columns to retain (range: 0.0 to 1.0)."""
select_strategy: str = "auto"
"""Strategy for selecting important gradient indices.
Options: "auto", "step", or "epoch"."""
select_interval: Union[str, int] = "auto"
"""Interval at which to reselect important gradient indices.
Can be "auto" or a fixed integer step/epoch interval."""
update_interval: Union[str, int] = "auto"
"""Interval for applying accumulated unimportant gradients to model parameters.
Can be "auto" or a fixed integer step interval."""
overlap_step: bool = False
"""Whether to overlap CPU-side optimizer steps with forward/backward computation."""
offload: bool = False
"""Whether to offload selective optimizer states to CPU to save memory."""
auto_ratio: float = Field(0.99, ge=0.0, le=1.0)
"""Threshold used in the "auto" strategy to determine update_interval."""
full_warm_up_rounds: int = 0
"""Number of initial rounds during which all gradients are fully updated (no selection)."""
pt_reserved_cores_perc: float = Field(0.5, ge=0.0, le=1.0)
"""Number of cores reserved for pytorch threads,
the remaining cores will be used by zenflow optimizer workers"""
steps_per_epoch: Optional[int] = Field(
default=None,
description=
"Number of steps per epoch. This field is initialized during execution and should not be set by users.",
exclude=True)
@model_validator(mode="after")
def validate_fields(self):
if self.select_strategy not in ["auto", "step", "epoch"]:
raise ValueError('select_strategy must be one of "auto", "step", or "epoch"')
if isinstance(self.select_interval, str) and self.select_interval != "auto":
raise ValueError('If select_interval is a string, it must be "auto"')
if isinstance(self.update_interval, str) and self.update_interval != "auto":
raise ValueError('If update_interval is a string, it must be "auto"')
if not isinstance(self.full_warm_up_rounds, int):
raise ValueError('full_warm_up_rounds must be an integer')
if not isinstance(self.pt_reserved_cores_perc, float):
raise ValueError('pt_reserved_cores_perc must be a float')
return self
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/zenflow/zenflow_config.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/zenflow/zenflow_stage_1_and_2.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed import comm as dist
from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer
from deepspeed.runtime.zenflow.zenflow_utils import start_optimizer_process
from deepspeed.runtime.utils import (see_memory_usage)
from deepspeed.ops.adam import ZenFlowSelectiveAdamW
from deepspeed.moe.utils import is_moe_param
from deepspeed.accelerator import get_accelerator
from deepspeed.runtime.utils import all_gather_dp_groups
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
OPTIMIZER_ALLGATHER_TIMER = 'optimizer_allgather'
OPTIMIZER_GRADIENTS_TIMER = 'optimizer_gradients'
OPTIMIZER_STEP_TIMER = 'optimizer_step'
OPTIMIZER_TRANSMIT_TIMER = 'optimizer_transmit_time'
OPTIMIZER_CALC_TIMER = 'optimizer_calc_time'
OPTIMIZER_RECV_PARAMS_TIMER = 'optimizer_receive_params_time'
OPTIMIZER_UPDATE_MODEL_TIMER = 'optimizer_update_model_time'
OPTIMIZER_TIMERS = [
OPTIMIZER_ALLGATHER_TIMER, OPTIMIZER_GRADIENTS_TIMER, OPTIMIZER_STEP_TIMER, OPTIMIZER_TRANSMIT_TIMER,
OPTIMIZER_CALC_TIMER, OPTIMIZER_RECV_PARAMS_TIMER, OPTIMIZER_UPDATE_MODEL_TIMER
]
INITIAL_MICRO_STEP_ID = -1
SELECTIVE_OPTIMIZER_UPDATE_TIMER = 'selective_optimizer_update'
SELECTIVE_OPTIMIZER_PROCESS_TIMER = 'selective_optimizer_process'
SELECTIVE_OPTIMIZER_STEP_TIMER = 'selective_optimizer_step'
SELECTIVE_OPTIMIZER_SYNC_TIMER = 'selective_optimizer_sync'
SELECTIVE_OPTIMIZER_TIMERS = [
SELECTIVE_OPTIMIZER_UPDATE_TIMER, SELECTIVE_OPTIMIZER_PROCESS_TIMER, SELECTIVE_OPTIMIZER_STEP_TIMER,
SELECTIVE_OPTIMIZER_SYNC_TIMER
]
class ZenFlowZeroOptimizer(DeepSpeedZeroOptimizer):
def __init__(
self,
init_optimizer,
param_names,
timers,
optimizer_params,
**kwargs,
):
super().__init__(init_optimizer, param_names, timers, optimizer_params, **kwargs)
zenflow_config = kwargs.get("zenflow_config", None)
self.micro_step = -1
self.full_warm_up_rounds = zenflow_config.full_warm_up_rounds
self.offload_selective_optimizer = zenflow_config.offload
self.pt_reserved_cores_perc = zenflow_config.pt_reserved_cores_perc
self.start_optimizer_process = lambda: start_optimizer_process(self)
self.zf_stage3 = False
if self.offload_selective_optimizer:
assert kwargs.get("overlap_comm", False), "offload selective optimizer should be used with overlap_comm"
self._configure_zenflow(zenflow_config)
self.selective_optimizer = ZenFlowSelectiveAdamW([{"params": group} for group in self.bit16_groups], \
offload=zenflow_config.offload,
bucket_size=self.allgather_bucket_size,
**optimizer_params)
self.num_total_param = sum(sum(1 for param in group if len(param.shape) != 1) for group in self.bit16_groups)
@classmethod
def create(cls, zenflow_config):
if zenflow_config.overlap_step:
return ZenFlowZeroOptimizerParallel
else:
return ZenFlowZeroOptimizerSequential
def _configure_zenflow(self, zenflow_config):
"""
Configure ZenFlow optimizer
"""
if not self.cpu_offload:
raise ValueError("Zenflow must be used with cpu offload")
self.select_strategy = zenflow_config.select_strategy
if self.select_strategy == 'auto':
self.select_strategy = "epoch"
if isinstance(zenflow_config.select_interval, int):
raise Warning(
"If use auto select strategy, select_interval will be set to 1 and select_strategy will be set to epoch, thus select_interval would be overwritten."
)
self.select_interval = 1
else:
if isinstance(zenflow_config.select_interval, str):
raise ValueError("If don't use auto select strategy, select_interval must be a number.")
self.select_interval = int(zenflow_config.select_interval)
if isinstance(zenflow_config.update_interval, str):
self.auto_update = True
self.update_interval = 0
else:
self.auto_update = False
self.update_interval = int(zenflow_config.update_interval)
if self.select_strategy == 'epoch':
if zenflow_config.steps_per_epoch is not None:
self.select_interval = self.select_interval * zenflow_config.steps_per_epoch
else:
self.select_interval = 0
if not self.auto_update and self.select_interval != 0 and self.select_interval < self.update_interval:
raise ValueError("Select interval must be greater or equal to update interval")
self.topk_ratio = zenflow_config.topk_ratio
self.param_id_index_buffer_offset = {}
self.param_id_grad_buffer_offset = {}
if self.auto_update:
self.param_id_sum_buffer_offset = {}
self.auto_ratio = zenflow_config.auto_ratio
self.zenflow_need_update = [False, False]
self.zenflow_state = 0
self.num_need_update = 0
def is_zenflow_select_boundary(self):
return self.zenflow and (self.micro_step - self.full_warm_up_rounds) >= 0 and (
(self.micro_step - self.full_warm_up_rounds) == 0 or
(self.select_interval != 0 and self.micro_step % self.select_interval == 0))
def sync_fp32_param_from_gpu(self):
if self.micro_step == 0:
return
for i, group in enumerate(self.bit16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
bit16_partitions = self.parallel_partitioned_bit16_groups[i]
fp32_partition = self.single_partition_of_fp32_groups[i]
with torch.no_grad():
fp32_partition.copy_(bit16_partitions[partition_id].to(dtype=fp32_partition.dtype,
device=fp32_partition.device))
def update_selected_channels(self, tensor, total_size, communication_data_type):
curr_size = 0
curr_index_buffer_size = 0
rank_and_offsets = []
prev_id, prev_process_group = -1, None
process_group = self.dp_process_group
rank = dist.get_rank(process_group)
self.index_buffer = torch.empty(total_size, dtype=torch.int32, device=get_accelerator().current_device_name())
bucket = self.ipg_buckets[communication_data_type]
for i, param_idx_in_group, param_id in bucket.params:
param = self.bit16_groups[i][param_idx_in_group]
if len(param.shape) == 1:
continue
if not hasattr(param, 'selected_indices'):
param.selected_indices = None
partition_ids = self.param_to_partition_ids[i][param_id]
# Get all partition ids + their offsets
partition_ids_w_offsets = []
for partition_id in partition_ids:
offset = self.grad_start_offset[i][partition_id][param_id]
partition_ids_w_offsets.append((partition_id, offset))
partition_ids_w_offsets.sort(key=lambda t: t[1])
# Calculate rank and offsets for grad slices
for idx in range(len(partition_ids_w_offsets)):
partition_id, offset = partition_ids_w_offsets[idx]
if idx == len(partition_ids_w_offsets) - 1:
numel = param.numel() - offset
else:
numel = partition_ids_w_offsets[idx + 1][1] - offset
num_row, num_col = param.shape if len(param.shape) == 2 else (1, param.shape[0])
start_column = 0 if not offset else int((offset - 1) / num_row) + 1
end_column = int((offset + numel) / num_row)
num_select = int(self.topk_ratio * (end_column - start_column))
if partition_id == rank:
start_idx = int(curr_size + start_column * num_row - offset)
num_elements = (end_column - start_column) * num_row
sum_per_column = tensor.narrow(0, start_idx, num_elements)
sum_per_column = sum_per_column.view(end_column - start_column, num_row)
sum_array = sum_per_column.abs().sum(dim=1)
_, top_indices = torch.topk(sum_array, num_select)
top_indices += start_column
self.index_buffer.narrow(0, curr_index_buffer_size, num_select).copy_(top_indices)
if partition_id == prev_id and process_group == prev_process_group:
prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + num_select)
else:
rank_and_offsets.append((partition_id, curr_index_buffer_size, num_select))
if param_id not in self.param_id_index_buffer_offset:
self.param_id_index_buffer_offset[param_id] = []
self.param_id_index_buffer_offset[param_id].append((curr_index_buffer_size, num_select))
curr_size += numel
curr_index_buffer_size += num_select
for src_rank, offset, num_select in rank_and_offsets:
index_slice = self.index_buffer.narrow(0, offset, num_select)
dist.broadcast(index_slice, src=src_rank, group=process_group)
for i, param_idx_in_group, param_id in bucket.params:
param = self.bit16_groups[i][param_idx_in_group]
if len(param.shape) == 1:
continue
param.selected_indices = None
param.partition_selected_indices = []
for offset, num_select in self.param_id_index_buffer_offset[param_id]:
selected = self.index_buffer.narrow(0, offset, num_select).clone().sort()[0]
if param.selected_indices is None:
param.selected_indices = selected
else:
param.selected_indices = torch.cat([param.selected_indices, selected])
param.partition_selected_indices.append(selected)
self.param_id_index_buffer_offset[param_id] = []
num_row, num_col = param.shape if len(param.shape) == 2 else (1, param.shape[0])
param.selected_indices.sort()
param.selected_shape = (param.selected_indices.shape[0],
num_row) if num_row != 1 else (param.selected_indices.shape[0], )
self.index_buffer = None
def _process_selected_fp32_groups_grad(self, tensor, total_size, communication_data_type):
"""
Process gradients for selected columns in FP32 groups
Args:
param: The parameter to process
param_id: ID of the parameter
"""
curr_size = 0
curr_grad_buffer_size = 0
curr_sum_buffer_size = 0
rank_and_offsets = []
prev_id, prev_process_group = -1, None
process_group = self.dp_process_group
rank = dist.get_rank(process_group)
self.grad_buffer = torch.empty(total_size, dtype=self.dtype, device=get_accelerator().current_device_name())
bucket = self.ipg_buckets[communication_data_type]
if self.auto_update:
self.sum_buffer = torch.empty(len(bucket.params) + dist.get_world_size(group=process_group),
dtype=torch.bfloat16,
device=get_accelerator().current_device_name())
group_to_paramlist = {}
for i, param_idx_in_group, param_id in bucket.params:
param = self.bit16_groups[i][param_idx_in_group]
if not hasattr(param, 'selected_indices'):
param.selected_indices = None
partition_ids = self.param_to_partition_ids[i][param_id]
# Get all partition ids + their offsets
partition_ids_w_offsets = []
for partition_id in partition_ids:
offset = self.grad_start_offset[i][partition_id][param_id]
partition_ids_w_offsets.append((partition_id, offset))
partition_ids_w_offsets.sort(key=lambda t: t[1])
# Calculate rank and offsets for grad slices
for idx in range(len(partition_ids_w_offsets)):
partition_id, offset = partition_ids_w_offsets[idx]
if idx == len(partition_ids_w_offsets) - 1:
numel = param.numel() - offset
else:
numel = partition_ids_w_offsets[idx + 1][1] - offset
num_row, num_col = param.shape if len(param.shape) == 2 else (1, param.shape[0])
start_column = 0 if not offset else int((offset - 1) / num_row) + 1
end_column = int((offset + numel) / num_row)
num_select = int(self.topk_ratio * (end_column - start_column)) if len(param.shape) == 2 else numel
grad_size = num_select * num_row
if partition_id == rank:
selected_grad = param.grad[
param.partition_selected_indices[idx], :] if num_row != 1 else param.grad[offset:offset +
numel]
self.grad_buffer.narrow(0, curr_grad_buffer_size, grad_size).copy_(selected_grad.view(-1))
if self.auto_update:
self.sum_buffer[curr_sum_buffer_size] = tensor.narrow(0, int(curr_size),
int(numel)).abs().sum()
if partition_id == prev_id and process_group == prev_process_group:
if self.auto_update:
prev_pid, prev_size, prev_numel, prev_sum_size, prev_sum_num = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + grad_size, prev_sum_size,
prev_sum_num + 1)
else:
prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + grad_size)
else:
if self.auto_update:
rank_and_offsets.append(
(partition_id, curr_grad_buffer_size, grad_size, curr_sum_buffer_size, 1))
else:
rank_and_offsets.append((partition_id, curr_grad_buffer_size, grad_size))
if param_id not in self.param_id_grad_buffer_offset:
self.param_id_grad_buffer_offset[param_id] = []
if self.auto_update and param_id not in self.param_id_sum_buffer_offset:
self.param_id_sum_buffer_offset[param_id] = []
self.param_id_grad_buffer_offset[param_id].append((curr_grad_buffer_size, grad_size))
if self.auto_update:
self.param_id_sum_buffer_offset[param_id].append(curr_sum_buffer_size)
curr_size += numel
curr_grad_buffer_size += grad_size
curr_sum_buffer_size += 1
for item in rank_and_offsets:
if self.auto_update:
src_rank, offset, grad_size, sum_offset, sum_num = item
else:
src_rank, offset, grad_size = item
grad_slice = self.grad_buffer.narrow(0, offset, grad_size)
dist.broadcast(grad_slice, src=src_rank, group=process_group)
if self.auto_update:
sum_slice = self.sum_buffer.narrow(0, sum_offset, sum_num)
dist.broadcast(sum_slice, src=src_rank, group=process_group)
for i, param_idx_in_group, param_id in bucket.params:
param = self.bit16_groups[i][param_idx_in_group]
selected_grad = None
for offset, grad_size in self.param_id_grad_buffer_offset[param_id]:
selected_grad_buffer = self.grad_buffer.narrow(0, offset, grad_size).clone().detach()
if selected_grad is None:
selected_grad = selected_grad_buffer
else:
selected_grad = torch.cat([selected_grad, selected_grad_buffer])
param.selected_grad = selected_grad.view(param.selected_shape).t() if len(
param.shape) != 1 else selected_grad
if self.offload_selective_optimizer and not hasattr(param, 'exp_avg_cpu_data'):
buffer = torch.zeros(param.selected_grad.numel(), dtype=param.dtype, device=self.device)
param.exp_avg_cpu_data = get_accelerator().pin_memory(
buffer) if self.cpu_offload_pin_memory else buffer
param.exp_avg_sq_cpu_data = get_accelerator().pin_memory(
buffer.clone()) if self.cpu_offload_pin_memory else buffer.clone()
param_list = group_to_paramlist.setdefault(i, [])
param_list.append(param)
self.param_id_grad_buffer_offset[param_id] = []
if self.auto_update:
grad_total_sum = 0
num_row, num_col = param.shape if len(param.shape) == 2 else (1, param.shape[0])
if num_row == 1:
continue
for offset in self.param_id_sum_buffer_offset[param_id]:
grad_total_sum += self.sum_buffer.narrow(0, offset, 1)
grad_critic_sum = param.selected_grad.abs().sum()
if not hasattr(param, 'non_critic_sum'):
param.non_critic_sum = 0
if not hasattr(param, 'avg_critic_sum'):
param.avg_critic_sum = 0
param.avg_critic_sum = (param.avg_critic_sum * (self.update_interval - 1) +
grad_critic_sum) / self.update_interval / (self.topk_ratio * 10)
param.non_critic_sum += (grad_total_sum - grad_critic_sum) / ((1 - self.topk_ratio) * 10)
if param.non_critic_sum >= param.avg_critic_sum:
self.num_need_update += 1
if self.num_need_update >= int(self.auto_ratio * self.num_total_param):
self.zenflow_need_update[self.zenflow_state] = True
self.param_id_sum_buffer_offset[param_id] = []
if not self.is_gradient_accumulation_boundary:
self.selective_optimizer.group_step(group_to_paramlist)
else:
self.selective_optimizer.temp_copy_param(group_to_paramlist)
self.grad_buffer = None
if self.auto_update:
self.sum_buffer = None
def average_tensor(self, tensor: torch.Tensor, communication_data_type: torch.dtype):
if self.overlap_comm:
stream = self.reduction_stream
if not get_accelerator().resolves_data_dependency():
stream.wait_stream(get_accelerator().current_stream())
get_accelerator().current_stream().wait_stream(stream)
else:
stream = get_accelerator().current_stream()
with get_accelerator().stream(stream):
if not self.reduce_scatter:
self.gradient_reduction_w_predivide(tensor)
return
# Accumulate destination ranks and bucket offsets for each gradient slice.
# Note: potential future optimization, record access pattern of parameters
# in backward pass and partition gradients w.r.t. access pattern so that our
# bucket is guaranteed to be contiguous w.r.t. ranks
rank_and_offsets = []
real_dp_process_group = []
curr_size = 0
prev_id, prev_process_group = -1, None
curr_column_size = 0
curr_selected_reduce_size = 0
process_group = self.dp_process_group
bucket = self.ipg_buckets[communication_data_type]
for i, param_idx_in_group, param_id in bucket.params:
param = self.bit16_groups[i][param_idx_in_group]
process_group = self.dp_process_group
if bucket.has_moe_params:
process_group = self.expert_dp_process_group[param.group_name] if is_moe_param(
param) else self.dp_process_group
partition_ids = self.param_to_partition_ids[i][param_id]
assert all([p_id < dist.get_world_size(group=process_group) for p_id in partition_ids
]), f"world size {dist.get_world_size(group=process_group)} and p_ids: {partition_ids}"
partition_size = self.partition_size[i]
# Get all partition ids + their offsets
partition_ids_w_offsets = []
for partition_id in partition_ids:
offset = self.grad_start_offset[i][partition_id][param_id]
partition_ids_w_offsets.append((partition_id, offset))
partition_ids_w_offsets.sort(key=lambda t: t[1])
num_row, num_col = param.shape if len(param.shape) == 2 else (1, param.shape[0])
curr_column_size += int(num_col * self.topk_ratio) if num_row != 1 else 0
# Calculate rank and offsets for grad slices
for idx in range(len(partition_ids_w_offsets)):
partition_id, offset = partition_ids_w_offsets[idx]
# Calculate numel for grad slice depending on partition location
if idx == len(partition_ids_w_offsets) - 1:
# Last partition_id uses its own offset
numel = param.numel() - offset
else:
# Set numel to next partition's offset
numel = partition_ids_w_offsets[idx + 1][1] - offset
# Merge bucket ranges if they belong to the same rank
if partition_id == prev_id and process_group == prev_process_group:
prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + numel)
else:
rank_and_offsets.append((partition_id, curr_size, numel))
real_dp_process_group.append(process_group)
curr_size += numel
curr_selected_reduce_size += int(numel * self.topk_ratio) if num_row != 1 else numel
prev_id, prev_process_group = partition_id, process_group
tensor.div_(dist.get_world_size(group=self.dp_process_group) / float(self.sequence_parallel_size))
buckets = {}
for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets):
grad_slice = tensor.narrow(0, int(bucket_offset), int(numel))
bucket_key = real_dp_process_group[i] if self.use_multi_rank_bucket_allreduce else (
dst, real_dp_process_group[i])
if bucket_key not in buckets:
buckets[bucket_key] = []
if self.use_multi_rank_bucket_allreduce:
buckets[bucket_key].append((dst, grad_slice))
else:
buckets[bucket_key].append(grad_slice)
for bucket_key in buckets:
if self.use_multi_rank_bucket_allreduce:
self.allreduce_and_scatter(buckets[bucket_key],
communication_data_type,
numel_per_bucket=self.reduce_bucket_size,
divide=False,
process_group=bucket_key)
else:
dst, process_group = bucket_key
self.allreduce_no_retain(buckets[bucket_key],
communication_data_type,
numel_per_bucket=self.reduce_bucket_size,
rank=dst,
divide=False,
process_group=process_group)
if self.is_zenflow_select_boundary():
self.timers(SELECTIVE_OPTIMIZER_UPDATE_TIMER).start()
self.update_selected_channels(tensor, curr_column_size, communication_data_type)
self.timers(SELECTIVE_OPTIMIZER_UPDATE_TIMER).stop()
elif self.zenflow:
self.timers(SELECTIVE_OPTIMIZER_UPDATE_TIMER).start()
self.timers(SELECTIVE_OPTIMIZER_UPDATE_TIMER).stop()
if self.zenflow and self.micro_step >= self.full_warm_up_rounds:
self.timers(SELECTIVE_OPTIMIZER_PROCESS_TIMER).start()
self._process_selected_fp32_groups_grad(tensor, curr_selected_reduce_size, communication_data_type)
self.timers(SELECTIVE_OPTIMIZER_PROCESS_TIMER).stop()
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
self.backward_prologue()
self.micro_step += 1
if self.auto_update:
self.zenflow_need_update[self.zenflow_state] = False
self.num_need_update = 0
if self.zenflow_need_update[self.zenflow_state ^ 1]:
self.update_interval = 0
for group in self.bit16_groups:
for p in group:
p.non_critic_sum = 0
self.update_interval += 1
if self.is_zenflow_select_boundary():
self.timers(SELECTIVE_OPTIMIZER_SYNC_TIMER).start()
self.sync_fp32_param_from_gpu()
self.selective_optimizer.clear_selected_mv()
self.timers(SELECTIVE_OPTIMIZER_SYNC_TIMER).stop()
self.enter_backward()
if self.custom_loss_scaler:
scaled_loss = self.external_loss_scale * loss
scaled_loss.backward(retain_graph=retain_graph)
else:
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
self.backward_epilogue()
self.exit_backward()
def log_selective_optimizer_timers(self):
self.timers.log(SELECTIVE_OPTIMIZER_TIMERS)
def _sync_selective_optimizer_lr(self):
for group_selected, group in zip(self.selective_optimizer.param_groups, self.optimizer.param_groups):
group_selected["lr"] = group["lr"]
def _selective_optimizer_step(self, group_no):
original_param_groups = self.selective_optimizer.param_groups
self.selective_optimizer.param_groups = [original_param_groups[group_no]]
self.selective_optimizer.step()
self.selective_optimizer.param_groups = original_param_groups
def selective_optimizer_step(self, closure=None):
for i, group in enumerate(self.bit16_groups):
self.timers(SELECTIVE_OPTIMIZER_STEP_TIMER).start()
self._selective_optimizer_step(i)
self.timers(SELECTIVE_OPTIMIZER_STEP_TIMER).stop()
self.timers.log(SELECTIVE_OPTIMIZER_TIMERS)
class ZenFlowZeroOptimizerSequential(ZenFlowZeroOptimizer):
def __init__(self, *args, **kwargs):
super(ZenFlowZeroOptimizerSequential, self).__init__(*args, **kwargs)
def zenflow_cpu_optimizer_step(self, group_no):
self.optimizer.step(step_id=self.micro_step + 1)
class ZenFlowZeroOptimizerParallel(ZenFlowZeroOptimizer):
def __init__(self, *args, **kwargs):
super(ZenFlowZeroOptimizerParallel, self).__init__(*args, **kwargs)
self.process_optimizer_established = False
self.first_update_round_after_warmup = True
def initialize_optimizer_states(self):
for i, group in enumerate(self.bit16_groups):
single_grad_partition = torch.zeros(int(self.partition_size[i]),
dtype=self.single_partition_of_fp32_groups[i].dtype,
device=self.device)
self.single_partition_of_fp32_groups[i].grad = None
buffer = get_accelerator().pin_memory(
single_grad_partition) if self.cpu_offload_pin_memory else single_grad_partition
self.single_partition_of_fp32_groups[i].overlap_grad = [buffer, buffer.clone()]
# Initialize the optimizer states with the flattened fp32 partition.
# State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers
# which do lazy initialization of the state at the first call to step.
if isinstance(self.optimizer, torch.optim.Adagrad):
self.optimizer = torch.optim.Adagrad(self.single_partition_of_fp32_groups, **self.optimizer.defaults)
if not self.cpu_offload:
for group in self.single_partition_of_fp32_groups:
group.grad = None #class init
return
def _get_offload_gradient_dict(self):
for param_group_index, _ in enumerate(self.optimizer.param_groups):
self.offload_gradient_dict[param_group_index] = []
for lp_param in self.params_in_partition[param_group_index]:
param_id = self.get_param_id(lp_param)
[_, _, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[param_group_index].overlap_grad[0].view(-1).narrow(
0, dest_offset, num_elements)
self.offload_gradient_dict[param_group_index].append(dest_tensor)
def get_overlap_step_state(self):
if self.micro_step < self.full_warm_up_rounds:
return self.micro_step & 1
else:
if not self.auto_update:
return (self.micro_step // self.update_interval) & 1
else:
return self.zenflow_state
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param):
param_id = self.get_param_id(param)
now_state = self.get_overlap_step_state()
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[i].overlap_grad[now_state].view(-1).narrow(
0, dest_offset, num_elements)
grad_accum = self.get_param_gradient_attribute(param)
if grad_accum is None:
src_tensor = grad_accum.view(-1).narrow(0, source_offset, num_elements)
else:
src_tensor = grad_accum.view(-1).narrow(0, source_offset, num_elements)
if src_tensor.dtype != self.master_weights_and_grads_dtype:
src_tensor = src_tensor.to(self.master_weights_and_grads_dtype)
dest_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None #offload only
def wait_last_update_and_copy(self):
if not hasattr(self, 'parent_conn'):
return
if self.micro_step + 1 > self.full_warm_up_rounds and self.first_update_round_after_warmup:
self.first_update_round_after_warmup = False
return
self.timers(OPTIMIZER_RECV_PARAMS_TIMER).start()
msg = self.parent_conn.recv()
assert msg["type"] == "done", "Optimizer process did not finish stepping correctly."
self.timers(OPTIMIZER_RECV_PARAMS_TIMER).stop()
for i, group in enumerate(self.bit16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
bit16_partitions = self.parallel_partitioned_bit16_groups[i]
fp32_partition = self.optimizer.param_groups[i]['params'][0].stale_param.data
self.timers(OPTIMIZER_TRANSMIT_TIMER).start()
bit16_partitions[partition_id].data.copy_(fp32_partition.to(get_accelerator().current_device_name()).data)
self.timers(OPTIMIZER_TRANSMIT_TIMER).stop()
see_memory_usage('After optimizer before all-gather')
if self.cpu_offload:
self.reset_cpu_buffers()
self.timers(OPTIMIZER_ALLGATHER_TIMER).start()
# Gather the updated weights from everyone.
# Then all partitions of the model parameters are updated and ready for next round forward.
all_gather_dp_groups(groups_flat=self.bit16_groups_flat,
partitioned_param_groups=self.parallel_partitioned_bit16_groups,
dp_process_group=self.real_dp_process_group,
start_alignment_factor=self.nccl_start_alignment_factor,
allgather_bucket_size=self.allgather_bucket_size)
self.timers(OPTIMIZER_ALLGATHER_TIMER).stop()
self.timers(OPTIMIZER_UPDATE_MODEL_TIMER).start()
# TODO: we probably don't need this? just to be safe
for i in range(len(self.bit16_groups)):
self._update_model_bit16_weights(i)
self.timers(OPTIMIZER_UPDATE_MODEL_TIMER).stop()
self.timers.log(OPTIMIZER_TIMERS)
see_memory_usage('After zero_optimizer step')
def zenflow_cpu_optimizer_step(self, now_state, scaled_global_grad_norm):
if not self.process_optimizer_established:
self.start_optimizer_process()
group_infos = []
for group_no, group in enumerate(self.bit16_groups):
single_grad_partition = self.single_partition_of_fp32_groups[group_no].overlap_grad[now_state]
self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm)
group_info = {
"lr": self.optimizer.param_groups[group_no]["lr"],
"betas": self.optimizer.param_groups[group_no]["betas"],
"eps": self.optimizer.param_groups[group_no]["eps"],
"weight_decay": self.optimizer.param_groups[group_no]["weight_decay"],
"bias_correction": self.optimizer.param_groups[group_no]["bias_correction"],
}
group_infos.append(group_info)
self.parent_conn.send({
"type": "step",
"now_state": now_state,
"micro_step": self.micro_step,
"group_infos": group_infos
})
def step(self, closure=None):
"""
Not supporting closure.
"""
self.micro_step_id = INITIAL_MICRO_STEP_ID
see_memory_usage(f"In step before checking overflow")
# First compute norm for all group so we know if there is overflow
if self.dtype == torch.float16:
self.check_overflow()
self._update_scale(self.overflow)
if self.overflow:
see_memory_usage('After overflow before clearing gradients')
self.zero_grad(set_to_none=True)
if self.cpu_offload:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
see_memory_usage('After overflow after clearing gradients')
for timer in OPTIMIZER_TIMERS:
self.timers(timer).start()
self.timers(timer).stop()
return
prev_scale = self.loss_scale
# Step 1:- Calculate gradient norm using bit-16 grads
see_memory_usage('Before norm calculation')
scaled_global_grad_norm = self.scaled_global_norm()
self._global_grad_norm = scaled_global_grad_norm / prev_scale
see_memory_usage('After norm before optimizer')
if self.micro_step < self.full_warm_up_rounds:
self.zenflow_cpu_optimizer_step(self.get_overlap_step_state(), scaled_global_grad_norm)
self.wait_last_update_and_copy()
if self.micro_step >= self.full_warm_up_rounds:
self.zenflow_cpu_optimizer_step(self.get_overlap_step_state(), scaled_global_grad_norm)
return
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/zenflow/zenflow_stage_1_and_2.py",
"license": "Apache License 2.0",
"lines": 625,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:deepspeed/runtime/zenflow/zenflow_utils.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import math
import torch
import psutil
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Args:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
transposed_tensors = [t.transpose(0, 1).contiguous() if t.dim() == 2 else t for t in tensors]
return torch._C._nn.flatten_dense_tensors(transposed_tensors)
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Args:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
transposed_tensors = [t.transpose(0, 1) if t.dim() == 2 else t for t in tensors]
unflat = torch._C._nn.unflatten_dense_tensors(flat, transposed_tensors)
return [t.transpose(0, 1) if t.dim() == 2 else t for t in unflat]
def disable_accelerator():
accelerator = get_accelerator()
accelerator.is_available = lambda: False
accelerator.device_count = lambda: 0
accelerator.current_device = lambda: -1
# Optionally mark it as initialized if needed
if hasattr(accelerator, "_initialized"):
accelerator._initialized = True
def zenflow_optimizer_process(pipe, param_groups, shared_overlap_grad_map, shared_stale_param_map, zf_affinity):
disable_accelerator()
current_process = psutil.Process()
current_process.cpu_affinity(zf_affinity)
os.environ['OMP_NUM_THREADS'] = str(len(zf_affinity))
from deepspeed.ops.adam import ZenFlowCPUAdam
optimizer = ZenFlowCPUAdam(param_groups, overlap_step=True)
pipe.send({"type": "ready"})
# TODO: replace this with rpc
while True:
cmd = pipe.recv()
if cmd["type"] == "step":
now_state = cmd["now_state"]
micro_step = cmd["micro_step"]
group_infos = cmd["group_infos"]
for group_no, group_info in enumerate(group_infos):
original_param_groups = optimizer.param_groups
optimizer.param_groups = [original_param_groups[group_no]]
group = optimizer.param_groups[0]
for param_idx, param in enumerate(group["params"]):
key = (group_no, param_idx)
if key in shared_overlap_grad_map:
param.overlap_grad = shared_overlap_grad_map[key]
if key in shared_stale_param_map:
param.stale_param = shared_stale_param_map[key]
optimizer.step(step_id=micro_step + 1, now_state=now_state, group_info=group_info)
optimizer.param_groups = original_param_groups
pipe.send({"type": "done"})
elif cmd["type"] == "exit":
break
def all_tensors_equal(tensor_list):
first_tensor = tensor_list[0]
for tensor in tensor_list[1:]:
if not torch.equal(first_tensor, tensor):
return False
return True
def start_optimizer_process(zf_optimizer):
from multiprocessing import Pipe, get_context, Manager
ctx = get_context("spawn")
zf_optimizer.parent_conn, zf_optimizer.child_conn = Pipe()
manager = Manager()
zf_optimizer.shared_overlap_grad_map = manager.dict()
zf_optimizer.shared_stale_param_map = manager.dict()
if zf_optimizer.zf_stage3:
params_iter = [((group_no, 0), param)
for group_no, param in enumerate(zf_optimizer.fp32_partitioned_groups_flat)]
else:
params_iter = [((group_no, param_idx), param)
for group_no, group in enumerate(zf_optimizer.optimizer.param_groups)
for param_idx, param in enumerate(group["params"])]
for key, param in params_iter:
param.data.share_memory_()
if not hasattr(param, "stale_param"):
param.stale_param = torch.zeros_like(param.data, dtype=param.dtype, device=param.device)
param.stale_param.data.share_memory_()
zf_optimizer.shared_stale_param_map[key] = param.stale_param
if getattr(param, "overlap_grad", None) is not None:
param.overlap_grad[0].data.share_memory_()
param.overlap_grad[1].data.share_memory_()
zf_optimizer.shared_overlap_grad_map[key] = param.overlap_grad
param_groups_data = ([{
"params": [param]
} for param in zf_optimizer.fp32_partitioned_groups_flat]
if zf_optimizer.zf_stage3 else zf_optimizer.optimizer.param_groups)
curr_rank = dist.get_rank()
total_rank = dist.get_world_size()
current_process = psutil.Process()
current_affinity = current_process.cpu_affinity()
all_affinities = [
torch.zeros(len(current_affinity),
dtype=type(current_affinity[0]),
device=get_accelerator().current_device_name()) for _ in range(total_rank)
]
dist.all_gather(
all_affinities,
torch.tensor(current_affinity, dtype=type(current_affinity[0]),
device=get_accelerator().current_device_name()))
# When affinity across all ranks are the same, the workers are not binded. Do a soft bind here
if all_tensors_equal(all_affinities):
num_phy_cores = psutil.cpu_count(logical=False)
available_phy_cores = [i for i in current_affinity if i < num_phy_cores]
num_available_phy_cores = len(available_phy_cores)
my_rank = curr_rank
my_size = total_rank
cores_per_rank = num_available_phy_cores // my_size
current_affinity = available_phy_cores[my_rank * cores_per_rank:(my_rank + 1) * cores_per_rank]
pt_num_cores = math.ceil(zf_optimizer.pt_reserved_cores_perc * len(current_affinity))
if pt_num_cores > 0 and pt_num_cores < len(current_affinity):
zf_affinity = current_affinity[pt_num_cores:]
pt_affinity = current_affinity[:pt_num_cores]
else:
zf_affinity = current_affinity
pt_affinity = current_affinity
zf_optimizer.process = ctx.Process(
target=zenflow_optimizer_process,
args=(zf_optimizer.child_conn, param_groups_data, zf_optimizer.shared_overlap_grad_map,
zf_optimizer.shared_stale_param_map, zf_affinity),
)
zf_optimizer.process.daemon = True
zf_optimizer.process.start()
current_process.cpu_affinity(pt_affinity)
os.environ['OMP_NUM_THREADS'] = str(len(pt_affinity))
msg = zf_optimizer.parent_conn.recv()
assert msg["type"] == "ready", "Optimizer process did not initialize correctly."
zf_optimizer.process_optimizer_established = True
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "deepspeed/runtime/zenflow/zenflow_utils.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:tests/unit/ops/adam/test_zf_torch_adam.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import numpy as np
from torch.nn import Parameter
from deepspeed.ops.adam import ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3
def make_param(Opt, shape, selected_indices=None):
param = Parameter(torch.randn(*shape))
if Opt is ZenFlowSelectiveAdamW_stage3:
if param.dim() == 2:
param.ds_shape = (param.shape[1], param.shape[0])
param.ds_tensor = param.clone().T.contiguous().view(-1)
else:
param.ds_shape = tuple(param.shape)
param.ds_tensor = param.clone()
param.complete_column_offset = 0
param.complete_numel = param.numel()
param.group_id = 0
if selected_indices is not None:
param.selected_indices = selected_indices
if param.dim() == 2:
param.selected_grad = torch.randn(
param.shape[0], len(selected_indices)) if Opt is not ZenFlowSelectiveAdamW_stage3 else torch.randn(
len(selected_indices), param.ds_shape[1])
param.temp_selected_param = param.data[:, selected_indices].clone(
) if Opt is not ZenFlowSelectiveAdamW_stage3 else param.ds_tensor.view(
param.ds_shape)[selected_indices, :].clone()
else:
param.selected_grad = torch.randn_like(param.data)
param.temp_selected_param = param.data.clone()
return param
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_init_methods(Opt):
opt1 = Opt([torch.nn.Parameter(torch.randn(2, 4))], lr=1e-3, offload=False)
assert opt1.step == opt1._step_without_offload
opt2 = Opt([torch.nn.Parameter(torch.randn(2, 4))], lr=1e-3, offload=True)
assert opt2.step == opt2._step_with_offload
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_step_without_offload(Opt):
param = make_param(Opt, (4, 6), torch.tensor([1, 3, 4]))
param.requires_grad_(True)
opt = Opt([param], lr=1e-3, offload=False)
old_selected = param.data[:, param.selected_indices].clone(
) if Opt is not ZenFlowSelectiveAdamW_stage3 else param.ds_tensor.view(
param.ds_shape)[param.selected_indices, :].clone()
opt.step()
new_selected = param.data[:, param.
selected_indices] if Opt is not ZenFlowSelectiveAdamW_stage3 else param.ds_tensor.view(
param.ds_shape)[param.selected_indices, :]
diff_norm = (old_selected - new_selected).abs().sum().item()
assert diff_norm > 1e-5, "param was not updated"
assert param.temp_selected_param is None
assert param.selected_grad is None
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_step_with_offload_bucket_flush(Opt):
param1 = make_param(Opt, (2, 4), torch.tensor([1, 2]))
param2 = make_param(Opt, (2, 4), torch.tensor([0, 3]))
param1.exp_avg = torch.zeros_like(param1.temp_selected_param)
param1.exp_avg_sq = torch.zeros_like(param1.temp_selected_param)
param1.exp_avg_cpu_data = param1.exp_avg.clone().cpu()
param1.exp_avg_sq_cpu_data = param1.exp_avg_sq.clone().cpu()
param2.exp_avg = torch.zeros_like(param2.temp_selected_param)
param2.exp_avg_sq = torch.zeros_like(param2.temp_selected_param)
param2.exp_avg_cpu_data = param2.exp_avg.clone().cpu()
param2.exp_avg_sq_cpu_data = param2.exp_avg_sq.clone().cpu()
opt = Opt([param1, param2], lr=1e-3, offload=True, bucket_size=1)
opt.step()
assert param1.temp_selected_param is None
assert param2.temp_selected_param is None
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_clear_selected_mv(Opt):
param = make_param(Opt, (2, 4), torch.tensor([0, 2]))
opt = Opt([param], lr=1e-3, offload=False)
opt.step()
state = opt.state[param]
assert "exp_avg" in state
opt.clear_selected_mv()
assert state["exp_avg"].abs().sum() == 0
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_group_step_without_offload(Opt):
param = make_param(Opt, (2, 6), torch.tensor([0, 1, 3]))
opt = Opt([param], lr=1e-3, offload=False)
group_to_paramlist = {0: [param]} if not Opt is ZenFlowSelectiveAdamW_stage3 else [param]
opt.group_step(group_to_paramlist)
assert param.selected_grad is None
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_group_step_with_offload(Opt):
param = make_param(Opt, (2, 6), torch.tensor([0, 1, 3]))
opt = Opt([param], lr=1e-3, offload=True)
state = opt.state.setdefault(param, {})
state["step"] = torch.zeros((), dtype=param.dtype, device=param.device)
param.exp_avg = torch.zeros_like(param.data[:, param.selected_indices])
param.exp_avg_sq = torch.zeros_like(param.data[:, param.selected_indices])
param.exp_avg_cpu_data = param.exp_avg.clone().cpu()
param.exp_avg_sq_cpu_data = param.exp_avg_sq.clone().cpu()
group_to_paramlist = {0: [param]} if Opt is not ZenFlowSelectiveAdamW_stage3 else [param]
opt.group_step(group_to_paramlist)
assert param.selected_grad is None
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_1d_param_support(Opt):
param = make_param(Opt, (10, ), torch.arange(10))
opt = Opt([param], lr=1e-3, offload=False)
opt.step()
assert param.temp_selected_param is None
assert param.selected_grad is None
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_state_increment(Opt):
param = make_param(Opt, (2, 4), torch.arange(4))
opt = Opt([param], lr=1e-3, offload=False)
opt.step()
step1 = opt.state[param]['step'].item()
param.selected_grad = torch.randn(2, 4) if Opt is not ZenFlowSelectiveAdamW_stage3 else torch.randn(4, 2)
param.temp_selected_param = param.data.clone() if Opt is not ZenFlowSelectiveAdamW_stage3 else torch.randn(4, 2)
param.selected_indices = torch.arange(4)
opt.step()
step2 = opt.state[param]['step'].item()
assert step2 == step1 + 1
def _compare_with_torch_adamw(param, zenflow_opt, atol=1e-4):
torch_param = torch.nn.Parameter(param.detach().clone())
torch_opt = torch.optim.AdamW([torch_param], lr=zenflow_opt.param_groups[0]['lr'])
for _ in range(10):
grad = torch.randn_like(param)
param.selected_indices = torch.arange(param.shape[1])
param.selected_grad = grad if not isinstance(zenflow_opt, ZenFlowSelectiveAdamW_stage3) else grad.T
param.temp_selected_param = param.data.clone() if not isinstance(
zenflow_opt, ZenFlowSelectiveAdamW_stage3) else param.ds_tensor.view(param.ds_shape).clone()
torch_param.grad = grad.clone()
zenflow_opt.step()
torch_opt.step()
if not isinstance(zenflow_opt, ZenFlowSelectiveAdamW_stage3):
np.testing.assert_allclose(torch_param.data.cpu().numpy(),
param.data.cpu().numpy(),
atol=atol,
err_msg="Mismatch with torch.AdamW")
else:
np.testing.assert_allclose(torch_param.data.cpu().numpy(),
param.ds_tensor.view(param.ds_shape).T.clone().data.cpu().numpy(),
atol=atol,
err_msg="Mismatch with torch.AdamW")
@pytest.mark.parametrize("Opt", [ZenFlowSelectiveAdamW, ZenFlowSelectiveAdamW_stage3])
def test_against_torch_adamw(Opt):
param = make_param(Opt, (2, 4), torch.arange(4))
opt = Opt([param], lr=1e-3, offload=False)
_compare_with_torch_adamw(param, opt)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/ops/adam/test_zf_torch_adam.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:tests/unit/runtime/zenflow/test_zf.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import deepspeed.comm as dist
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
from unit.simple_model import SimpleModel, random_dataloader
import deepspeed
class BaseZenFlowTest:
hidden_dim = 10
batch_size = 4
grad_acc_steps = 1
def get_config_dict(self, stage, offload_selective_optimizer, select_strategy, select_interval, update_interval,
full_warm_up_rounds):
config = {
"train_batch_size": self.batch_size,
"gradient_accumulation_steps": self.grad_acc_steps,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 1e-4
}
},
"zero_optimization": {
"stage": stage,
"offload_optimizer": {
"device": "cpu"
},
"overlap_comm": True,
"zenflow": {
"topk_ratio": 0.2,
"select_strategy": select_strategy,
"select_interval": select_interval,
"update_interval": update_interval,
"overlap_step": False,
"offload": offload_selective_optimizer,
"auto_ratio": 0.99,
"full_warm_up_rounds": full_warm_up_rounds,
}
},
"zero_allow_untested_optimizer": True,
}
if get_accelerator().is_bf16_supported():
config["bf16"] = {"enabled": True}
return config
def run_training_distributed(self, config_dict):
if get_accelerator().device_name() == "cpu":
return
model = SimpleModel(self.hidden_dim)
model, _, _, _ = deepspeed.initialize(model=model, model_parameters=model.parameters(), config=config_dict)
train_dataloader = random_dataloader(model=model,
total_samples=20,
hidden_dim=self.hidden_dim,
device=model.device)
dist.barrier()
for step, batch in enumerate(train_dataloader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
model.destroy()
@pytest.mark.parametrize("stage", [1, 2, 3])
@pytest.mark.parametrize("full_warm_up_rounds", [0, 3])
@pytest.mark.parametrize("offload_selective_optimizer", [True, False])
@pytest.mark.parametrize("select_strategy,select_interval,update_interval", [
("auto", "auto", "auto"),
("step", 10, 3),
("epoch", 1, 4),
])
class TestZenFlowSingleGPU(DistributedTest, BaseZenFlowTest):
world_size = 1
def test_zenflow_single_gpu(self, stage, offload_selective_optimizer, select_strategy, select_interval,
update_interval, full_warm_up_rounds):
tester = BaseZenFlowTest()
config_dict = tester.get_config_dict(stage, offload_selective_optimizer, select_strategy, select_interval,
update_interval, full_warm_up_rounds)
tester.run_training_distributed(config_dict)
@pytest.mark.parametrize("stage", [1, 2, 3])
@pytest.mark.parametrize("full_warm_up_rounds", [0, 3])
@pytest.mark.parametrize("offload_selective_optimizer", [True, False])
@pytest.mark.parametrize("select_strategy,select_interval,update_interval", [
("auto", "auto", "auto"),
("step", 10, 3),
("epoch", 1, 4),
])
class TestZenFlowDistributed(DistributedTest, BaseZenFlowTest):
world_size = 2
def test_zenflow_distributed(self, stage, offload_selective_optimizer, select_strategy, select_interval,
update_interval, full_warm_up_rounds):
config_dict = self.get_config_dict(stage, offload_selective_optimizer, select_strategy, select_interval,
update_interval, full_warm_up_rounds)
self.run_training_distributed(config_dict)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/runtime/zenflow/test_zf.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:tests/unit/runtime/zenflow/test_zf_config.py | # Copyright (c) DeepSpeed Team.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
from pydantic import ValidationError
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig, ZeroStageEnum
from deepspeed.runtime.zenflow.zenflow_config import ZenFlowConfig
from deepspeed.runtime.zero.offload_config import DeepSpeedZeroOffloadOptimizerConfig
def test_stage_enum_accepts_int_and_enum():
"""`stage` can be passed as either an int or the ZeroStageEnum."""
c1 = DeepSpeedZeroConfig(stage=2)
assert c1.stage == ZeroStageEnum.gradients
c2 = DeepSpeedZeroConfig(stage=ZeroStageEnum.weights)
assert c2.stage == ZeroStageEnum.weights
def test_offload_optimizer_config_from_dict():
"""A dict for offload_optimizer should be coerced into DeepSpeedZeroOffloadOptimizerConfig."""
cfg = DeepSpeedZeroConfig(offload_optimizer={"device": "cpu", "pin_memory": True})
assert isinstance(cfg.offload_optimizer, DeepSpeedZeroOffloadOptimizerConfig)
assert cfg.offload_optimizer.device == "cpu"
assert cfg.offload_optimizer.pin_memory is True
def test_invalid_offload_optimizer_type_raises():
"""Passing a non-dict to offload_optimizer must error out."""
with pytest.raises(ValidationError):
DeepSpeedZeroConfig(offload_optimizer="not a dict")
def test_zenflow_config_from_dict():
"""A dict for zenflow should be coerced into ZenFlowConfig."""
zenflow_payload = {
"topk_ratio": 0.25,
"select_strategy": "auto",
"select_interval": 4,
"update_interval": 8,
"full_warm_up_rounds": 1,
"overlap_step": True
}
cfg = DeepSpeedZeroConfig(zenflow=zenflow_payload)
assert isinstance(cfg.zenflow, ZenFlowConfig)
assert cfg.zenflow.topk_ratio == 0.25
assert cfg.zenflow.select_strategy == "auto"
assert cfg.zenflow.select_interval == 4
assert cfg.zenflow.update_interval == 8
assert cfg.zenflow.full_warm_up_rounds == 1
assert cfg.zenflow.overlap_step is True
def test_invalid_zenflow_type_raises():
"""Passing a non-dict to zenflow must error out."""
with pytest.raises(ValidationError):
DeepSpeedZeroConfig(zenflow=123)
def test_offload_and_zenflow_combined():
"""
offload_optimizer and zenflow can be used together under stage 2
without validation errors.
"""
payload = {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": True
},
"zenflow": {
"topk_ratio": 0.3,
"select_strategy": "epoch",
"select_interval": 3,
"update_interval": 6,
"full_warm_up_rounds": 0,
"overlap_step": False
}
}
cfg = DeepSpeedZeroConfig(**payload)
assert isinstance(cfg.offload_optimizer, DeepSpeedZeroOffloadOptimizerConfig)
assert cfg.offload_optimizer.device == "cpu"
assert isinstance(cfg.zenflow, ZenFlowConfig)
assert cfg.zenflow.select_strategy == "epoch"
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "tests/unit/runtime/zenflow/test_zf_config.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
deepspeedai/DeepSpeed:ci/accelerate.py | # Copyright (c) Snowflake.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pathlib import Path
import modal
ROOT_PATH = Path(__file__).parents[1]
# yapf: disable
image = (modal.Image
.from_registry("pytorch/pytorch:2.9.1-cuda12.8-cudnn9-devel", add_python="3.10")
.apt_install("git")
.pip_install("uv")
# uv_pip_install already includes --compile-bytecode
.uv_pip_install("datasets==3.6.0", extra_options="--system")
.pip_install_from_requirements(ROOT_PATH / "requirements/requirements.txt", gpu="any")
.pip_install_from_requirements(ROOT_PATH / "requirements/requirements-dev.txt", gpu="any")
.add_local_dir(ROOT_PATH , remote_path="/root/", copy=True)
.run_commands("pip install /root")
.add_local_dir(ROOT_PATH / "accelerator", remote_path="/root/deepspeed/accelerator")
.add_local_dir(ROOT_PATH / "csrc", remote_path="/root/deepspeed/ops/csrc")
.add_local_dir(ROOT_PATH / "op_builder", remote_path="/root/deepspeed/ops/op_builder")
)
app = modal.App("deepspeedai-accelerate-ci", image=image)
@app.function(
gpu="l40s:1",
timeout=1800,
)
def pytest():
import subprocess
cmd = "git clone https://github.com/huggingface/accelerate"
print(f"running: {cmd}")
subprocess.run(
cmd.split(),
check=True,
cwd=ROOT_PATH / ".",
)
cmd = "uv pip install --system --compile-bytecode ./accelerate[testing]"
print(f"running: {cmd}")
subprocess.run(
cmd.split(),
check=True,
cwd=ROOT_PATH / ".",
)
cmd = "pytest ./accelerate/tests/deepspeed"
print(f"running: {cmd}")
subprocess.run(
cmd.split(),
check=True,
cwd=ROOT_PATH / ".",
)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "ci/accelerate.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
deepspeedai/DeepSpeed:ci/torch_latest.py | # Copyright (c) Snowflake.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from pathlib import Path
import modal
ROOT_PATH = Path(__file__).parents[1]
# yapf: disable
image = (modal.Image
.from_registry("pytorch/pytorch:2.9.1-cuda12.8-cudnn9-devel", add_python="3.10")
.run_commands("apt update && apt install -y libaio-dev")
.pip_install_from_requirements(ROOT_PATH / "requirements/requirements.txt", gpu="any")
.pip_install_from_requirements(ROOT_PATH / "requirements/requirements-dev.txt", gpu="any")
.pip_install_from_requirements(ROOT_PATH / "requirements/requirements-deepcompile.txt", gpu="any")
.add_local_dir(ROOT_PATH , remote_path="/root/", copy=True)
.run_commands("pip install /root")
.add_local_dir(ROOT_PATH / "accelerator", remote_path="/root/deepspeed/accelerator")
.add_local_dir(ROOT_PATH / "csrc", remote_path="/root/deepspeed/ops/csrc")
.add_local_dir(ROOT_PATH / "op_builder", remote_path="/root/deepspeed/ops/op_builder")
)
app = modal.App("deepspeedai-torch-latest-ci", image=image)
@app.function(
gpu="l40s:2",
timeout=1800,
)
def pytest():
import subprocess
subprocess.run(
"pytest -n 4 --verbose tests/unit/v1/ --torch_ver=2.9 --cuda_ver=12.8".split(),
check=True,
cwd=ROOT_PATH / ".",
)
| {
"repo_id": "deepspeedai/DeepSpeed",
"file_path": "ci/torch_latest.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.