navred61's picture
copied from private space
99a41ea
"""
Main function with multiple helper functions for testing function analysis.
"""
import os
import json
from datetime import datetime
from typing import Dict, List, Any
def read_config_file(config_path):
"""Read configuration from JSON file."""
try:
with open(config_path, 'r') as file:
return json.load(file)
except FileNotFoundError:
print(f"Config file not found: {config_path}")
return {}
except json.JSONDecodeError:
print(f"Invalid JSON in config file: {config_path}")
return {}
def validate_config(config):
"""Validate configuration parameters."""
required_keys = ['input_directory', 'output_directory', 'file_extensions']
for key in required_keys:
if key not in config:
print(f"Missing required config key: {key}")
return False
# Validate directories exist
if not os.path.exists(config['input_directory']):
print(f"Input directory does not exist: {config['input_directory']}")
return False
return True
def create_output_directory(output_path):
"""Create output directory if it doesn't exist."""
try:
os.makedirs(output_path, exist_ok=True)
print(f"Output directory ready: {output_path}")
return True
except PermissionError:
print(f"Permission denied creating directory: {output_path}")
return False
def scan_files(directory, extensions):
"""Scan directory for files with specified extensions."""
found_files = []
for root, dirs, files in os.walk(directory):
for file in files:
file_extension = os.path.splitext(file)[1].lower()
if file_extension in extensions:
full_path = os.path.join(root, file)
found_files.append(full_path)
return found_files
def analyze_file(file_path):
"""Analyze a single file and return statistics."""
try:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
stats = {
'file_path': file_path,
'file_size': os.path.getsize(file_path),
'line_count': len(content.splitlines()),
'character_count': len(content),
'word_count': len(content.split()),
'last_modified': datetime.fromtimestamp(os.path.getmtime(file_path)).isoformat()
}
return stats
except Exception as e:
print(f"Error analyzing file {file_path}: {e}")
return None
def process_files(file_list):
"""Process multiple files and return aggregated results."""
results = []
total_size = 0
total_lines = 0
print(f"Processing {len(file_list)} files...")
for file_path in file_list:
print(f" Analyzing: {os.path.basename(file_path)}")
file_stats = analyze_file(file_path)
if file_stats:
results.append(file_stats)
total_size += file_stats['file_size']
total_lines += file_stats['line_count']
summary = {
'total_files': len(results),
'total_size_bytes': total_size,
'total_lines': total_lines,
'average_file_size': total_size / len(results) if results else 0,
'average_lines_per_file': total_lines / len(results) if results else 0
}
return results, summary
def generate_report(results, summary, output_file):
"""Generate a detailed report of the analysis."""
report = {
'generated_at': datetime.now().isoformat(),
'summary': summary,
'file_details': results
}
try:
with open(output_file, 'w') as file:
json.dump(report, file, indent=2)
print(f"Report generated: {output_file}")
return True
except Exception as e:
print(f"Error generating report: {e}")
return False
def print_summary(summary):
"""Print a human-readable summary."""
print("\n" + "="*50)
print("FILE ANALYSIS SUMMARY")
print("="*50)
print(f"Total files processed: {summary['total_files']}")
print(f"Total size: {summary['total_size_bytes']:,} bytes")
print(f"Total lines: {summary['total_lines']:,}")
print(f"Average file size: {summary['average_file_size']:.1f} bytes")
print(f"Average lines per file: {summary['average_lines_per_file']:.1f}")
print("="*50)
def cleanup_temp_files(temp_directory):
"""Clean up temporary files."""
if not os.path.exists(temp_directory):
return
try:
temp_files = os.listdir(temp_directory)
for temp_file in temp_files:
if temp_file.startswith('temp_'):
file_path = os.path.join(temp_directory, temp_file)
os.remove(file_path)
print(f"Removed temp file: {temp_file}")
except Exception as e:
print(f"Error during cleanup: {e}")
def main():
"""Main function that orchestrates the file analysis process."""
print("Starting File Analysis Tool")
print("-" * 30)
# Step 1: Read configuration
config_file = "config.json"
config = read_config_file(config_file)
if not config:
print("Using default configuration...")
config = {
'input_directory': '.',
'output_directory': './output',
'file_extensions': ['.py', '.txt', '.md'],
'generate_report': True
}
# Step 2: Validate configuration
if not validate_config(config):
print("Configuration validation failed. Exiting.")
return False
# Step 3: Create output directory
if not create_output_directory(config['output_directory']):
print("Failed to create output directory. Exiting.")
return False
# Step 4: Scan for files
print(f"Scanning for files in: {config['input_directory']}")
file_list = scan_files(config['input_directory'], config['file_extensions'])
if not file_list:
print("No files found matching criteria.")
return False
print(f"Found {len(file_list)} files to process")
# Step 5: Process files
results, summary = process_files(file_list)
# Step 6: Generate report
if config.get('generate_report', True):
report_file = os.path.join(config['output_directory'], 'analysis_report.json')
generate_report(results, summary, report_file)
# Step 7: Display summary
print_summary(summary)
# Step 8: Cleanup
temp_dir = config.get('temp_directory')
if temp_dir:
cleanup_temp_files(temp_dir)
print("\nFile analysis completed successfully!")
return True
if __name__ == "__main__":
success = main()
exit(0 if success else 1)