{ "model_config": { "model_name": "meta-llama/Llama-2-7b-hf", "target_layers": [ "model.layers.28", "model.layers.29", "model.layers.30", "model.layers.31" ], "target_component": "mlp.down_proj", "layer_selection_reason": "Last 4 layers chosen for semantic richness and memory optimization" }, "coupling_analysis": { "method": "gradient_cosine_similarity", "gradient_computation": "∇_θ log P(answer|question)", "normalization": "L2 normalization", "high_coupling_threshold": 0.4, "batch_size": 2000, "memory_optimization": true }, "dataset_processing": { "source_dataset": "hotpotqa", "total_samples": 97852, "format": "cloze_style_questions", "question_template": "Given the context: {context}, the answer to '{question}' is [MASK]." }, "hardware_specs": { "gpu": "NVIDIA A40", "vram": "46GB", "gpu_memory_allocated": "~21GB during analysis", "gpu_memory_reserved": "~43GB during analysis" } }