{"repo_name": "claude-task-master", "file_name": "/claude-task-master/scripts/modules/task-manager/set-task-status.js", "inference_info": {"prefix_code": "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tfindTaskById,\n\tensureTagMetadata\n} from '../utils.js';\nimport { displayBanner } from '../ui.js';\nimport { validateTaskDependencies } from '../dependency-manager.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport updateSingleTaskStatus from './update-single-task-status.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport {\n\tisValidTaskStatus,\n\tTASK_STATUS_OPTIONS\n} from '../../../src/constants/task-status.js';\n\n/**\n * Set the status of a task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} taskIdInput - Task ID(s) to update\n * @param {string} newStatus - New status\n * @param {Object} options - Additional options (mcpLog for MCP mode, projectRoot for tag resolution)\n * @param {string} [options.projectRoot] - Project root path\n * @param {string} [options.tag] - Optional tag to override current tag resolution\n * @param {string} [options.mcpLog] - MCP logger object\n * @returns {Object|undefined} Result object in MCP mode, undefined in CLI mode\n */\nasync function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {\n\tconst { projectRoot, tag } = options;\n\ttry {\n\t\tif (!isValidTaskStatus(newStatus)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`\n\t\t\t);\n\t\t}\n\t\t// Determine if we're in MCP mode by checking for mcpLog\n\t\tconst isMcpMode = !!options?.mcpLog;\n\n\t\t// Only display UI elements if not in MCP mode\n\t\tif (!isMcpMode) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating Task Status to: ${newStatus}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\n\t\tlog('info', `Reading tasks from ${tasksPath}...`);\n\n\t\t// Read the raw data without tag resolution to preserve tagged structure\n\t\tlet rawData = readJSON(tasksPath, projectRoot, tag); // No tag parameter\n\n\t\t// Handle the case where readJSON returns resolved data with _rawTaggedData\n\t\tif (rawData && rawData._rawTaggedData) {\n\t\t\t// Use the raw tagged data and discard the resolved view\n\t\t\trawData = rawData._rawTaggedData;\n\t\t}\n\n\t\t// Ensure the tag exists in the raw data\n\t\tif (!rawData || !rawData[tag] || !Array.isArray(rawData[tag].tasks)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid tasks file or tag \"${tag}\" not found at ${tasksPath}`\n\t\t\t);\n\t\t}\n\n\t\t// Get the tasks for the current tag\n\t\tconst data = {\n\t\t\ttasks: rawData[tag].tasks,\n\t\t\ttag,\n\t\t\t_rawTaggedData: rawData\n\t\t};\n\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}`);\n\t\t}\n\n\t\t// Handle multiple task IDs (comma-separated)\n\t\tconst taskIds = taskIdInput.split(',').map((id) => id.trim());\n\t\tconst updatedTasks = [];\n\n\t\t// Update each task and capture old status for display\n\t\t", "suffix_code": "\n\n\t\t// Update the raw data structure with the modified tasks\n\t\trawData[tag].tasks = data.tasks;\n\n\t\t// Ensure the tag has proper metadata\n\t\tensureTagMetadata(rawData[tag], {\n\t\t\tdescription: `Tasks for ${tag} context`\n\t\t});\n\n\t\t// Write the updated raw data back to the file\n\t\t// The writeJSON function will automatically filter out _rawTaggedData\n\t\twriteJSON(tasksPath, rawData, projectRoot, tag);\n\n\t\t// Validate dependencies after status update\n\t\tlog('info', 'Validating dependencies after status update...');\n\t\tvalidateTaskDependencies(data.tasks);\n\n\t\t// Generate individual task files\n\t\t// log('info', 'Regenerating task files...');\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t// \tmcpLog: options.mcpLog\n\t\t// });\n\n\t\t// Display success message - only in CLI mode\n\t\tif (!isMcpMode) {\n\t\t\tfor (const updateInfo of updatedTasks) {\n\t\t\t\tconst { id, oldStatus, newStatus: updatedStatus } = updateInfo;\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.white.bold(`Successfully updated task ${id} status:`) +\n\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t`From: ${chalk.yellow(oldStatus)}\\n` +\n\t\t\t\t\t\t\t`To: ${chalk.green(updatedStatus)}`,\n\t\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Return success value for programmatic use\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tupdatedTasks: updatedTasks.map(({ id, oldStatus, newStatus }) => ({\n\t\t\t\tid,\n\t\t\t\toldStatus,\n\t\t\t\tnewStatus\n\t\t\t}))\n\t\t};\n\t} catch (error) {\n\t\tlog('error', `Error setting task status: ${error.message}`);\n\n\t\t// Only show error UI in CLI mode\n\t\tif (!options?.mcpLog) {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\t// Pass session to getDebugFlag\n\t\t\tif (getDebugFlag(options?.session)) {\n\t\t\t\t// Use getter\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\t// In MCP mode, throw the error for the caller to handle\n\t\t\tthrow error;\n\t\t}\n\t}\n}\n\nexport default setTaskStatus;\n", "middle_code": "for (const id of taskIds) {\n\t\t\tlet oldStatus = 'unknown';\n\t\t\tif (id.includes('.')) {\n\t\t\t\tconst [parentId, subtaskId] = id\n\t\t\t\t\t.split('.')\n\t\t\t\t\t.map((id) => parseInt(id, 10));\n\t\t\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\t\t\t\tif (parentTask?.subtasks) {\n\t\t\t\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\t\t\t\toldStatus = subtask?.status || 'pending';\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconst taskId = parseInt(id, 10);\n\t\t\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\t\t\t\toldStatus = task?.status || 'pending';\n\t\t\t}\n\t\t\tawait updateSingleTaskStatus(tasksPath, id, newStatus, data, !isMcpMode);\n\t\t\tupdatedTasks.push({ id, oldStatus, newStatus });\n\t\t}", "code_description": null, "fill_type": "BLOCK_TYPE", "language_type": "javascript", "sub_task_type": "for_statement"}, "context_code": [["/claude-task-master/scripts/modules/task-manager/add-task.js", "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\nimport { z } from 'zod';\nimport Fuse from 'fuse.js'; // Import Fuse.js for advanced fuzzy search\n\nimport {\n\tdisplayBanner,\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tsucceedLoadingIndicator,\n\tfailLoadingIndicator,\n\tdisplayAiUsageSummary,\n\tdisplayContextAnalysis\n} from '../ui.js';\nimport {\n\treadJSON,\n\twriteJSON,\n\tlog as consoleLog,\n\ttruncate,\n\tensureTagMetadata,\n\tperformCompleteTagMigration,\n\tmarkMigrationForNotice\n} from '../utils.js';\nimport { generateObjectService } from '../ai-services-unified.js';\nimport { getDefaultPriority } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport ContextGatherer from '../utils/contextGatherer.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport {\n\tTASK_PRIORITY_OPTIONS,\n\tDEFAULT_TASK_PRIORITY,\n\tisValidTaskPriority,\n\tnormalizeTaskPriority\n} from '../../../src/constants/task-priority.js';\n\n// Define Zod schema for the expected AI output object\nconst AiTaskDataSchema = z.object({\n\ttitle: z.string().describe('Clear, concise title for the task'),\n\tdescription: z\n\t\t.string()\n\t\t.describe('A one or two sentence description of the task'),\n\tdetails: z\n\t\t.string()\n\t\t.describe('In-depth implementation details, considerations, and guidance'),\n\ttestStrategy: z\n\t\t.string()\n\t\t.describe('Detailed approach for verifying task completion'),\n\tdependencies: z\n\t\t.array(z.number())\n\t\t.nullable()\n\t\t.describe(\n\t\t\t'Array of task IDs that this task depends on (must be completed before this task can start)'\n\t\t)\n});\n\n/**\n * Get all tasks from all tags\n * @param {Object} rawData - The raw tagged data object\n * @returns {Array} A flat array of all task objects\n */\nfunction getAllTasks(rawData) {\n\tlet allTasks = [];\n\tfor (const tagName in rawData) {\n\t\tif (\n\t\t\tObject.prototype.hasOwnProperty.call(rawData, tagName) &&\n\t\t\trawData[tagName] &&\n\t\t\tArray.isArray(rawData[tagName].tasks)\n\t\t) {\n\t\t\tallTasks = allTasks.concat(rawData[tagName].tasks);\n\t\t}\n\t}\n\treturn allTasks;\n}\n\n/**\n * Add a new task using AI\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} prompt - Description of the task to add (required for AI-driven creation)\n * @param {Array} dependencies - Task dependencies\n * @param {string} priority - Task priority\n * @param {function} reportProgress - Function to report progress to MCP server (optional)\n * @param {Object} mcpLog - MCP logger object (optional)\n * @param {Object} session - Session object from MCP server (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated\n * @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI)\n * @param {boolean} useResearch - Whether to use the research model (passed to unified service)\n * @param {Object} context - Context object containing session and potentially projectRoot\n * @param {string} [context.projectRoot] - Project root path (for MCP/env fallback)\n * @param {string} [context.commandName] - The name of the command being executed (for telemetry)\n * @param {string} [context.outputType] - The output type ('cli' or 'mcp', for telemetry)\n * @param {string} [context.tag] - Tag for the task (optional)\n * @returns {Promise} An object containing newTaskId and telemetryData\n */\nasync function addTask(\n\ttasksPath,\n\tprompt,\n\tdependencies = [],\n\tpriority = null,\n\tcontext = {},\n\toutputFormat = 'text', // Default to text for CLI\n\tmanualTaskData = null,\n\tuseResearch = false\n) {\n\tconst { session, mcpLog, projectRoot, commandName, outputType, tag } =\n\t\tcontext;\n\tconst isMCP = !!mcpLog;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = isMCP\n\t\t? mcpLog // Use MCP logger if provided\n\t\t: {\n\t\t\t\t// Create a wrapper around consoleLog for CLI\n\t\t\t\tinfo: (...args) => consoleLog('info', ...args),\n\t\t\t\twarn: (...args) => consoleLog('warn', ...args),\n\t\t\t\terror: (...args) => consoleLog('error', ...args),\n\t\t\t\tdebug: (...args) => consoleLog('debug', ...args),\n\t\t\t\tsuccess: (...args) => consoleLog('success', ...args)\n\t\t\t};\n\n\t// Validate priority - only accept high, medium, or low\n\tlet effectivePriority =\n\t\tpriority || getDefaultPriority(projectRoot) || DEFAULT_TASK_PRIORITY;\n\n\t// If priority is provided, validate and normalize it\n\tif (priority) {\n\t\tconst normalizedPriority = normalizeTaskPriority(priority);\n\t\tif (normalizedPriority) {\n\t\t\teffectivePriority = normalizedPriority;\n\t\t} else {\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsoleLog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Invalid priority \"${priority}\". Using default priority \"${DEFAULT_TASK_PRIORITY}\".`\n\t\t\t\t);\n\t\t\t}\n\t\t\teffectivePriority = DEFAULT_TASK_PRIORITY;\n\t\t}\n\t}\n\n\tlogFn.info(\n\t\t`Adding new task with prompt: \"${prompt}\", Priority: ${effectivePriority}, Dependencies: ${dependencies.join(', ') || 'None'}, Research: ${useResearch}, ProjectRoot: ${projectRoot}`\n\t);\n\tif (tag) {\n\t\tlogFn.info(`Using tag context: ${tag}`);\n\t}\n\n\tlet loadingIndicator = null;\n\tlet aiServiceResponse = null; // To store the full response from AI service\n\n\t// Create custom reporter that checks for MCP log\n\tconst report = (message, level = 'info') => {\n\t\tif (mcpLog) {\n\t\t\tmcpLog[level](message);\n\t\t} else if (outputFormat === 'text') {\n\t\t\tconsoleLog(level, message);\n\t\t}\n\t};\n\n\t/**\n\t * Recursively builds a dependency graph for a given task\n\t * @param {Array} tasks - All tasks from tasks.json\n\t * @param {number} taskId - ID of the task to analyze\n\t * @param {Set} visited - Set of already visited task IDs\n\t * @param {Map} depthMap - Map of task ID to its depth in the graph\n\t * @param {number} depth - Current depth in the recursion\n\t * @return {Object} Dependency graph data\n\t */\n\tfunction buildDependencyGraph(\n\t\ttasks,\n\t\ttaskId,\n\t\tvisited = new Set(),\n\t\tdepthMap = new Map(),\n\t\tdepth = 0\n\t) {\n\t\t// Skip if we've already visited this task or it doesn't exist\n\t\tif (visited.has(taskId)) {\n\t\t\treturn null;\n\t\t}\n\n\t\t// Find the task\n\t\tconst task = tasks.find((t) => t.id === taskId);\n\t\tif (!task) {\n\t\t\treturn null;\n\t\t}\n\n\t\t// Mark as visited\n\t\tvisited.add(taskId);\n\n\t\t// Update depth if this is a deeper path to this task\n\t\tif (!depthMap.has(taskId) || depth < depthMap.get(taskId)) {\n\t\t\tdepthMap.set(taskId, depth);\n\t\t}\n\n\t\t// Process dependencies\n\t\tconst dependencyData = [];\n\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\tfor (const depId of task.dependencies) {\n\t\t\t\tconst depData = buildDependencyGraph(\n\t\t\t\t\ttasks,\n\t\t\t\t\tdepId,\n\t\t\t\t\tvisited,\n\t\t\t\t\tdepthMap,\n\t\t\t\t\tdepth + 1\n\t\t\t\t);\n\t\t\t\tif (depData) {\n\t\t\t\t\tdependencyData.push(depData);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn {\n\t\t\tid: task.id,\n\t\t\ttitle: task.title,\n\t\t\tdescription: task.description,\n\t\t\tstatus: task.status,\n\t\t\tdependencies: dependencyData\n\t\t};\n\t}\n\n\ttry {\n\t\t// Read the existing tasks - IMPORTANT: Read the raw data without tag resolution\n\t\tlet rawData = readJSON(tasksPath, projectRoot, tag); // No tag parameter\n\n\t\t// Handle the case where readJSON returns resolved data with _rawTaggedData\n\t\tif (rawData && rawData._rawTaggedData) {\n\t\t\t// Use the raw tagged data and discard the resolved view\n\t\t\trawData = rawData._rawTaggedData;\n\t\t}\n\n\t\t// If file doesn't exist or is invalid, create a new structure in memory\n\t\tif (!rawData) {\n\t\t\treport(\n\t\t\t\t'tasks.json not found or invalid. Initializing new structure.',\n\t\t\t\t'info'\n\t\t\t);\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: [],\n\t\t\t\t\tmetadata: {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Default tasks context'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t\t// Do not write the file here; it will be written later with the new task.\n\t\t}\n\n\t\t// Handle legacy format migration using utilities\n\t\tif (rawData && Array.isArray(rawData.tasks) && !rawData._rawTaggedData) {\n\t\t\treport('Legacy format detected. Migrating to tagged format...', 'info');\n\n\t\t\t// This is legacy format - migrate it to tagged format\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: rawData.tasks,\n\t\t\t\t\tmetadata: rawData.metadata || {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Tasks for master context'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t\t// Ensure proper metadata using utility\n\t\t\tensureTagMetadata(rawData.master, {\n\t\t\t\tdescription: 'Tasks for master context'\n\t\t\t});\n\t\t\t// Do not write the file here; it will be written later with the new task.\n\n\t\t\t// Perform complete migration (config.json, state.json)\n\t\t\tperformCompleteTagMigration(tasksPath);\n\t\t\tmarkMigrationForNotice(tasksPath);\n\n\t\t\treport('Successfully migrated to tagged format.', 'success');\n\t\t}\n\n\t\t// Use the provided tag, or the current active tag, or default to 'master'\n\t\tconst targetTag = tag;\n\n\t\t// Ensure the target tag exists\n\t\tif (!rawData[targetTag]) {\n\t\t\treport(\n\t\t\t\t`Tag \"${targetTag}\" does not exist. Please create it first using the 'add-tag' command.`,\n\t\t\t\t'error'\n\t\t\t);\n\t\t\tthrow new Error(`Tag \"${targetTag}\" not found.`);\n\t\t}\n\n\t\t// Ensure the target tag has a tasks array and metadata object\n\t\tif (!rawData[targetTag].tasks) {\n\t\t\trawData[targetTag].tasks = [];\n\t\t}\n\t\tif (!rawData[targetTag].metadata) {\n\t\t\trawData[targetTag].metadata = {\n\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription: ``\n\t\t\t};\n\t\t}\n\n\t\t// Get a flat list of ALL tasks across ALL tags to validate dependencies\n\t\tconst allTasks = getAllTasks(rawData);\n\n\t\t// Find the highest task ID *within the target tag* to determine the next ID\n\t\tconst tasksInTargetTag = rawData[targetTag].tasks;\n\t\tconst highestId =\n\t\t\ttasksInTargetTag.length > 0\n\t\t\t\t? Math.max(...tasksInTargetTag.map((t) => t.id))\n\t\t\t\t: 0;\n\t\tconst newTaskId = highestId + 1;\n\n\t\t// Only show UI box for CLI mode\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Creating New Task #${newTaskId}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\n\t\t// Validate dependencies before proceeding\n\t\tconst invalidDeps = dependencies.filter((depId) => {\n\t\t\t// Ensure depId is parsed as a number for comparison\n\t\t\tconst numDepId = parseInt(depId, 10);\n\t\t\treturn Number.isNaN(numDepId) || !allTasks.some((t) => t.id === numDepId);\n\t\t});\n\n\t\tif (invalidDeps.length > 0) {\n\t\t\treport(\n\t\t\t\t`The following dependencies do not exist or are invalid: ${invalidDeps.join(', ')}`,\n\t\t\t\t'warn'\n\t\t\t);\n\t\t\treport('Removing invalid dependencies...', 'info');\n\t\t\tdependencies = dependencies.filter(\n\t\t\t\t(depId) => !invalidDeps.includes(depId)\n\t\t\t);\n\t\t}\n\t\t// Ensure dependencies are numbers\n\t\tconst numericDependencies = dependencies.map((dep) => parseInt(dep, 10));\n\n\t\t// Build dependency graphs for explicitly specified dependencies\n\t\tconst dependencyGraphs = [];\n\t\tconst allRelatedTaskIds = new Set();\n\t\tconst depthMap = new Map();\n\n\t\t// First pass: build a complete dependency graph for each specified dependency\n\t\tfor (const depId of numericDependencies) {\n\t\t\tconst graph = buildDependencyGraph(allTasks, depId, new Set(), depthMap);\n\t\t\tif (graph) {\n\t\t\t\tdependencyGraphs.push(graph);\n\t\t\t}\n\t\t}\n\n\t\t// Second pass: build a set of all related task IDs for flat analysis\n\t\tfor (const [taskId, depth] of depthMap.entries()) {\n\t\t\tallRelatedTaskIds.add(taskId);\n\t\t}\n\n\t\tlet taskData;\n\n\t\t// Check if manual task data is provided\n\t\tif (manualTaskData) {\n\t\t\treport('Using manually provided task data', 'info');\n\t\t\ttaskData = manualTaskData;\n\t\t\treport('DEBUG: Taking MANUAL task data path.', 'debug');\n\n\t\t\t// Basic validation for manual data\n\t\t\tif (\n\t\t\t\t!taskData.title ||\n\t\t\t\ttypeof taskData.title !== 'string' ||\n\t\t\t\t!taskData.description ||\n\t\t\t\ttypeof taskData.description !== 'string'\n\t\t\t) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t'Manual task data must include at least a title and description.'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\treport('DEBUG: Taking AI task generation path.', 'debug');\n\t\t\t// --- Refactored AI Interaction ---\n\t\t\treport(`Generating task data with AI with prompt:\\n${prompt}`, 'info');\n\n\t\t\t// --- Use the new ContextGatherer ---\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst gatherResult = await contextGatherer.gather({\n\t\t\t\tsemanticQuery: prompt,\n\t\t\t\tdependencyTasks: numericDependencies,\n\t\t\t\tformat: 'research'\n\t\t\t});\n\n\t\t\tconst gatheredContext = gatherResult.context;\n\t\t\tconst analysisData = gatherResult.analysisData;\n\n\t\t\t// Display context analysis if not in silent mode\n\t\t\tif (outputFormat === 'text' && analysisData) {\n\t\t\t\tdisplayContextAnalysis(analysisData, prompt, gatheredContext.length);\n\t\t\t}\n\n\t\t\t// Add any manually provided details to the prompt for context\n\t\t\tlet contextFromArgs = '';\n\t\t\tif (manualTaskData?.title)\n\t\t\t\tcontextFromArgs += `\\n- Suggested Title: \"${manualTaskData.title}\"`;\n\t\t\tif (manualTaskData?.description)\n\t\t\t\tcontextFromArgs += `\\n- Suggested Description: \"${manualTaskData.description}\"`;\n\t\t\tif (manualTaskData?.details)\n\t\t\t\tcontextFromArgs += `\\n- Additional Details Context: \"${manualTaskData.details}\"`;\n\t\t\tif (manualTaskData?.testStrategy)\n\t\t\t\tcontextFromArgs += `\\n- Additional Test Strategy Context: \"${manualTaskData.testStrategy}\"`;\n\n\t\t\t// Load prompts using PromptManager\n\t\t\tconst promptManager = getPromptManager();\n\t\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t\t'add-task',\n\t\t\t\t{\n\t\t\t\t\tprompt,\n\t\t\t\t\tnewTaskId,\n\t\t\t\t\texistingTasks: allTasks,\n\t\t\t\t\tgatheredContext,\n\t\t\t\t\tcontextFromArgs,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\tpriority: effectivePriority,\n\t\t\t\t\tdependencies: numericDependencies\n\t\t\t\t}\n\t\t\t);\n\n\t\t\t// Start the loading indicator - only for text mode\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\t\t`Generating new task with ${useResearch ? 'Research' : 'Main'} AI... \\n`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\tconst serviceRole = useResearch ? 'research' : 'main';\n\t\t\t\treport('DEBUG: Calling generateObjectService...', 'debug');\n\n\t\t\t\taiServiceResponse = await generateObjectService({\n\t\t\t\t\t// Capture the full response\n\t\t\t\t\trole: serviceRole,\n\t\t\t\t\tsession: session,\n\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\tschema: AiTaskDataSchema,\n\t\t\t\t\tobjectName: 'newTaskData',\n\t\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\t\tprompt: userPrompt,\n\t\t\t\t\tcommandName: commandName || 'add-task', // Use passed commandName or default\n\t\t\t\t\toutputType: outputType || (isMCP ? 'mcp' : 'cli') // Use passed outputType or derive\n\t\t\t\t});\n\t\t\t\treport('DEBUG: generateObjectService returned successfully.', 'debug');\n\n\t\t\t\tif (!aiServiceResponse || !aiServiceResponse.mainResult) {\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t'AI service did not return the expected object structure.'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Prefer mainResult if it looks like a valid task object, otherwise try mainResult.object\n\t\t\t\tif (\n\t\t\t\t\taiServiceResponse.mainResult.title &&\n\t\t\t\t\taiServiceResponse.mainResult.description\n\t\t\t\t) {\n\t\t\t\t\ttaskData = aiServiceResponse.mainResult;\n\t\t\t\t} else if (\n\t\t\t\t\taiServiceResponse.mainResult.object &&\n\t\t\t\t\taiServiceResponse.mainResult.object.title &&\n\t\t\t\t\taiServiceResponse.mainResult.object.description\n\t\t\t\t) {\n\t\t\t\t\ttaskData = aiServiceResponse.mainResult.object;\n\t\t\t\t} else {\n\t\t\t\t\tthrow new Error('AI service did not return a valid task object.');\n\t\t\t\t}\n\n\t\t\t\treport('Successfully generated task data from AI.', 'success');\n\n\t\t\t\t// Success! Show checkmark\n\t\t\t\tif (loadingIndicator) {\n\t\t\t\t\tsucceedLoadingIndicator(\n\t\t\t\t\t\tloadingIndicator,\n\t\t\t\t\t\t'Task generated successfully'\n\t\t\t\t\t);\n\t\t\t\t\tloadingIndicator = null; // Clear it\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// Failure! Show X\n\t\t\t\tif (loadingIndicator) {\n\t\t\t\t\tfailLoadingIndicator(loadingIndicator, 'AI generation failed');\n\t\t\t\t\tloadingIndicator = null;\n\t\t\t\t}\n\t\t\t\treport(\n\t\t\t\t\t`DEBUG: generateObjectService caught error: ${error.message}`,\n\t\t\t\t\t'debug'\n\t\t\t\t);\n\t\t\t\treport(`Error generating task with AI: ${error.message}`, 'error');\n\t\t\t\tthrow error; // Re-throw error after logging\n\t\t\t} finally {\n\t\t\t\treport('DEBUG: generateObjectService finally block reached.', 'debug');\n\t\t\t\t// Clean up if somehow still running\n\t\t\t\tif (loadingIndicator) {\n\t\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\t}\n\t\t\t}\n\t\t\t// --- End Refactored AI Interaction ---\n\t\t}\n\n\t\t// Create the new task object\n\t\tconst newTask = {\n\t\t\tid: newTaskId,\n\t\t\ttitle: taskData.title,\n\t\t\tdescription: taskData.description,\n\t\t\tdetails: taskData.details || '',\n\t\t\ttestStrategy: taskData.testStrategy || '',\n\t\t\tstatus: 'pending',\n\t\t\tdependencies: taskData.dependencies?.length\n\t\t\t\t? taskData.dependencies\n\t\t\t\t: numericDependencies, // Use AI-suggested dependencies if available, fallback to manually specified\n\t\t\tpriority: effectivePriority,\n\t\t\tsubtasks: [] // Initialize with empty subtasks array\n\t\t};\n\n\t\t// Additional check: validate all dependencies in the AI response\n\t\tif (taskData.dependencies?.length) {\n\t\t\tconst allValidDeps = taskData.dependencies.every((depId) => {\n\t\t\t\tconst numDepId = parseInt(depId, 10);\n\t\t\t\treturn (\n\t\t\t\t\t!Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId)\n\t\t\t\t);\n\t\t\t});\n\n\t\t\tif (!allValidDeps) {\n\t\t\t\treport(\n\t\t\t\t\t'AI suggested invalid dependencies. Filtering them out...',\n\t\t\t\t\t'warn'\n\t\t\t\t);\n\t\t\t\tnewTask.dependencies = taskData.dependencies.filter((depId) => {\n\t\t\t\t\tconst numDepId = parseInt(depId, 10);\n\t\t\t\t\treturn (\n\t\t\t\t\t\t!Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId)\n\t\t\t\t\t);\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\t// Add the task to the tasks array OF THE CORRECT TAG\n\t\trawData[targetTag].tasks.push(newTask);\n\t\t// Update the tag's metadata\n\t\tensureTagMetadata(rawData[targetTag], {\n\t\t\tdescription: `Tasks for ${targetTag} context`\n\t\t});\n\n\t\treport('DEBUG: Writing tasks.json...', 'debug');\n\t\t// Write the updated raw data back to the file\n\t\t// The writeJSON function will automatically filter out _rawTaggedData\n\t\twriteJSON(tasksPath, rawData, projectRoot, targetTag);\n\t\treport('DEBUG: tasks.json written.', 'debug');\n\n\t\t// Show success message - only for text output (CLI)\n\t\tif (outputFormat === 'text') {\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Description')\n\t\t\t\t],\n\t\t\t\tcolWidths: [5, 30, 50] // Adjust widths as needed\n\t\t\t});\n\n\t\t\ttable.push([\n\t\t\t\tnewTask.id,\n\t\t\t\ttruncate(newTask.title, 27),\n\t\t\t\ttruncate(newTask.description, 47)\n\t\t\t]);\n\n\t\t\tconsole.log(chalk.green('✓ New task created successfully:'));\n\t\t\tconsole.log(table.toString());\n\n\t\t\t// Helper to get priority color\n\t\t\tconst getPriorityColor = (p) => {\n\t\t\t\tswitch (p?.toLowerCase()) {\n\t\t\t\t\tcase 'high':\n\t\t\t\t\t\treturn 'red';\n\t\t\t\t\tcase 'low':\n\t\t\t\t\t\treturn 'gray';\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn 'yellow';\n\t\t\t\t}\n\t\t\t};\n\n\t\t\t// Check if AI added new dependencies that weren't explicitly provided\n\t\t\tconst aiAddedDeps = newTask.dependencies.filter(\n\t\t\t\t(dep) => !numericDependencies.includes(dep)\n\t\t\t);\n\n\t\t\t// Check if AI removed any dependencies that were explicitly provided\n\t\t\tconst aiRemovedDeps = numericDependencies.filter(\n\t\t\t\t(dep) => !newTask.dependencies.includes(dep)\n\t\t\t);\n\n\t\t\t// Get task titles for dependencies to display\n\t\t\tconst depTitles = {};\n\t\t\tnewTask.dependencies.forEach((dep) => {\n\t\t\t\tconst depTask = allTasks.find((t) => t.id === dep);\n\t\t\t\tif (depTask) {\n\t\t\t\t\tdepTitles[dep] = truncate(depTask.title, 30);\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t// Prepare dependency display string\n\t\t\tlet dependencyDisplay = '';\n\t\t\tif (newTask.dependencies.length > 0) {\n\t\t\t\tdependencyDisplay = chalk.white('Dependencies:') + '\\n';\n\t\t\t\tnewTask.dependencies.forEach((dep) => {\n\t\t\t\t\tconst isAiAdded = aiAddedDeps.includes(dep);\n\t\t\t\t\tconst depType = isAiAdded ? chalk.yellow(' (AI suggested)') : '';\n\t\t\t\t\tdependencyDisplay +=\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t` - ${dep}: ${depTitles[dep] || 'Unknown task'}${depType}`\n\t\t\t\t\t\t) + '\\n';\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tdependencyDisplay = chalk.white('Dependencies: None') + '\\n';\n\t\t\t}\n\n\t\t\t// Add info about removed dependencies if any\n\t\t\tif (aiRemovedDeps.length > 0) {\n\t\t\t\tdependencyDisplay +=\n\t\t\t\t\tchalk.gray('\\nUser-specified dependencies that were not used:') +\n\t\t\t\t\t'\\n';\n\t\t\t\taiRemovedDeps.forEach((dep) => {\n\t\t\t\t\tconst depTask = allTasks.find((t) => t.id === dep);\n\t\t\t\t\tconst title = depTask ? truncate(depTask.title, 30) : 'Unknown task';\n\t\t\t\t\tdependencyDisplay += chalk.gray(` - ${dep}: ${title}`) + '\\n';\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Add dependency analysis summary\n\t\t\tlet dependencyAnalysis = '';\n\t\t\tif (aiAddedDeps.length > 0 || aiRemovedDeps.length > 0) {\n\t\t\t\tdependencyAnalysis =\n\t\t\t\t\t'\\n' + chalk.white.bold('Dependency Analysis:') + '\\n';\n\t\t\t\tif (aiAddedDeps.length > 0) {\n\t\t\t\t\tdependencyAnalysis +=\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`AI identified ${aiAddedDeps.length} additional dependencies`\n\t\t\t\t\t\t) + '\\n';\n\t\t\t\t}\n\t\t\t\tif (aiRemovedDeps.length > 0) {\n\t\t\t\t\tdependencyAnalysis +=\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`AI excluded ${aiRemovedDeps.length} user-provided dependencies`\n\t\t\t\t\t\t) + '\\n';\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Show success message box\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold(`Task ${newTaskId} Created Successfully`) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white(`Title: ${newTask.title}`) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(`Status: ${getStatusWithColor(newTask.status)}`) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t`Priority: ${chalk[getPriorityColor(newTask.priority)](newTask.priority)}`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tdependencyDisplay +\n\t\t\t\t\t\tdependencyAnalysis +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t`1. Run ${chalk.yellow(`task-master show ${newTaskId}`)} to see complete task details`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t`2. Run ${chalk.yellow(`task-master set-status --id=${newTaskId} --status=in-progress`)} to start working on it`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t`3. Run ${chalk.yellow(`task-master expand --id=${newTaskId}`)} to break it down into subtasks`\n\t\t\t\t\t\t),\n\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// Display AI Usage Summary if telemetryData is available\n\t\t\tif (\n\t\t\t\taiServiceResponse &&\n\t\t\t\taiServiceResponse.telemetryData &&\n\t\t\t\t(outputType === 'cli' || outputType === 'text')\n\t\t\t) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t}\n\t\t}\n\n\t\treport(\n\t\t\t`DEBUG: Returning new task ID: ${newTaskId} and telemetry.`,\n\t\t\t'debug'\n\t\t);\n\t\treturn {\n\t\t\tnewTaskId: newTaskId,\n\t\t\ttelemetryData: aiServiceResponse ? aiServiceResponse.telemetryData : null,\n\t\t\ttagInfo: aiServiceResponse ? aiServiceResponse.tagInfo : null\n\t\t};\n\t} catch (error) {\n\t\t// Stop any loading indicator on error\n\t\tif (loadingIndicator) {\n\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t}\n\n\t\treport(`Error adding task: ${error.message}`, 'error');\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t}\n\t\t// In MCP mode, we let the direct function handler catch and format\n\t\tthrow error;\n\t}\n}\n\nexport default addTask;\n"], ["/claude-task-master/scripts/modules/task-manager/generate-task-files.js", "import path from 'path';\nimport fs from 'fs';\nimport chalk from 'chalk';\n\nimport { log, readJSON } from '../utils.js';\nimport { formatDependenciesWithStatus } from '../ui.js';\nimport { validateAndFixDependencies } from '../dependency-manager.js';\nimport { getDebugFlag } from '../config-manager.js';\n\n/**\n * Generate individual task files from tasks.json\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} outputDir - Output directory for task files\n * @param {Object} options - Additional options (mcpLog for MCP mode, projectRoot, tag)\n * @param {string} [options.projectRoot] - Project root path\n * @param {string} [options.tag] - Tag for the task\n * @param {Object} [options.mcpLog] - MCP logger object\n * @returns {Object|undefined} Result object in MCP mode, undefined in CLI mode\n */\nfunction generateTaskFiles(tasksPath, outputDir, options = {}) {\n\ttry {\n\t\tconst isMcpMode = !!options?.mcpLog;\n\t\tconst { projectRoot, tag } = options;\n\n\t\t// 1. Read the raw data structure, ensuring we have all tags.\n\t\t// We call readJSON without a specific tag to get the resolved default view,\n\t\t// which correctly contains the full structure in `_rawTaggedData`.\n\t\tconst resolvedData = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!resolvedData) {\n\t\t\tthrow new Error(`Could not read or parse tasks file: ${tasksPath}`);\n\t\t}\n\t\t// Prioritize the _rawTaggedData if it exists, otherwise use the data as is.\n\t\tconst rawData = resolvedData._rawTaggedData || resolvedData;\n\n\t\t// 2. Determine the target tag we need to generate files for.\n\t\tconst tagData = rawData[tag];\n\n\t\tif (!tagData || !tagData.tasks) {\n\t\t\tthrow new Error(`Tag '${tag}' not found or has no tasks in the data.`);\n\t\t}\n\t\tconst tasksForGeneration = tagData.tasks;\n\n\t\t// Create the output directory if it doesn't exist\n\t\tif (!fs.existsSync(outputDir)) {\n\t\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t\t}\n\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Preparing to regenerate ${tasksForGeneration.length} task files for tag '${tag}'`\n\t\t);\n\n\t\t// 3. Validate dependencies using the FULL, raw data structure to prevent data loss.\n\t\tvalidateAndFixDependencies(\n\t\t\trawData, // Pass the entire object with all tags\n\t\t\ttasksPath,\n\t\t\tprojectRoot,\n\t\t\ttag // Provide the current tag context for the operation\n\t\t);\n\n\t\tconst allTasksInTag = tagData.tasks;\n\t\tconst validTaskIds = allTasksInTag.map((task) => task.id);\n\n\t\t// Cleanup orphaned task files\n\t\tlog('info', 'Checking for orphaned task files to clean up...');\n\t\ttry {\n\t\t\tconst files = fs.readdirSync(outputDir);\n\t\t\t// Tag-aware file patterns: master -> task_001.txt, other tags -> task_001_tagname.txt\n\t\t\tconst masterFilePattern = /^task_(\\d+)\\.txt$/;\n\t\t\tconst taggedFilePattern = new RegExp(`^task_(\\\\d+)_${tag}\\\\.txt$`);\n\n\t\t\tconst orphanedFiles = files.filter((file) => {\n\t\t\t\tlet match = null;\n\t\t\t\tlet fileTaskId = null;\n\n\t\t\t\t// Check if file belongs to current tag\n\t\t\t\tif (tag === 'master') {\n\t\t\t\t\tmatch = file.match(masterFilePattern);\n\t\t\t\t\tif (match) {\n\t\t\t\t\t\tfileTaskId = parseInt(match[1], 10);\n\t\t\t\t\t\t// Only clean up master files when processing master tag\n\t\t\t\t\t\treturn !validTaskIds.includes(fileTaskId);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tmatch = file.match(taggedFilePattern);\n\t\t\t\t\tif (match) {\n\t\t\t\t\t\tfileTaskId = parseInt(match[1], 10);\n\t\t\t\t\t\t// Only clean up files for the current tag\n\t\t\t\t\t\treturn !validTaskIds.includes(fileTaskId);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false;\n\t\t\t});\n\n\t\t\tif (orphanedFiles.length > 0) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Found ${orphanedFiles.length} orphaned task files to remove for tag '${tag}'`\n\t\t\t\t);\n\t\t\t\torphanedFiles.forEach((file) => {\n\t\t\t\t\tconst filePath = path.join(outputDir, file);\n\t\t\t\t\tfs.unlinkSync(filePath);\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tlog('info', 'No orphaned task files found.');\n\t\t\t}\n\t\t} catch (err) {\n\t\t\tlog('warn', `Error cleaning up orphaned task files: ${err.message}`);\n\t\t}\n\n\t\t// Generate task files for the target tag\n\t\tlog('info', `Generating individual task files for tag '${tag}'...`);\n\t\ttasksForGeneration.forEach((task) => {\n\t\t\t// Tag-aware file naming: master -> task_001.txt, other tags -> task_001_tagname.txt\n\t\t\tconst taskFileName =\n\t\t\t\ttag === 'master'\n\t\t\t\t\t? `task_${task.id.toString().padStart(3, '0')}.txt`\n\t\t\t\t\t: `task_${task.id.toString().padStart(3, '0')}_${tag}.txt`;\n\n\t\t\tconst taskPath = path.join(outputDir, taskFileName);\n\n\t\t\tlet content = `# Task ID: ${task.id}\\n`;\n\t\t\tcontent += `# Title: ${task.title}\\n`;\n\t\t\tcontent += `# Status: ${task.status || 'pending'}\\n`;\n\n\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\tcontent += `# Dependencies: ${formatDependenciesWithStatus(task.dependencies, allTasksInTag, false)}\\n`;\n\t\t\t} else {\n\t\t\t\tcontent += '# Dependencies: None\\n';\n\t\t\t}\n\n\t\t\tcontent += `# Priority: ${task.priority || 'medium'}\\n`;\n\t\t\tcontent += `# Description: ${task.description || ''}\\n`;\n\t\t\tcontent += '# Details:\\n';\n\t\t\tcontent += (task.details || '')\n\t\t\t\t.split('\\n')\n\t\t\t\t.map((line) => line)\n\t\t\t\t.join('\\n');\n\t\t\tcontent += '\\n\\n';\n\t\t\tcontent += '# Test Strategy:\\n';\n\t\t\tcontent += (task.testStrategy || '')\n\t\t\t\t.split('\\n')\n\t\t\t\t.map((line) => line)\n\t\t\t\t.join('\\n');\n\t\t\tcontent += '\\n';\n\n\t\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\t\tcontent += '\\n# Subtasks:\\n';\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tcontent += `## ${subtask.id}. ${subtask.title} [${subtask.status || 'pending'}]\\n`;\n\t\t\t\t\tif (subtask.dependencies && subtask.dependencies.length > 0) {\n\t\t\t\t\t\tconst subtaskDeps = subtask.dependencies\n\t\t\t\t\t\t\t.map((depId) =>\n\t\t\t\t\t\t\t\ttypeof depId === 'number'\n\t\t\t\t\t\t\t\t\t? `${task.id}.${depId}`\n\t\t\t\t\t\t\t\t\t: depId.toString()\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t.join(', ');\n\t\t\t\t\t\tcontent += `### Dependencies: ${subtaskDeps}\\n`;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontent += '### Dependencies: None\\n';\n\t\t\t\t\t}\n\t\t\t\t\tcontent += `### Description: ${subtask.description || ''}\\n`;\n\t\t\t\t\tcontent += '### Details:\\n';\n\t\t\t\t\tcontent += (subtask.details || '')\n\t\t\t\t\t\t.split('\\n')\n\t\t\t\t\t\t.map((line) => line)\n\t\t\t\t\t\t.join('\\n');\n\t\t\t\t\tcontent += '\\n\\n';\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tfs.writeFileSync(taskPath, content);\n\t\t});\n\n\t\tlog(\n\t\t\t'success',\n\t\t\t`All ${tasksForGeneration.length} tasks for tag '${tag}' have been generated into '${outputDir}'.`\n\t\t);\n\n\t\tif (isMcpMode) {\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tcount: tasksForGeneration.length,\n\t\t\t\tdirectory: outputDir\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `Error generating task files: ${error.message}`);\n\t\tif (!options?.mcpLog) {\n\t\t\tconsole.error(chalk.red(`Error generating task files: ${error.message}`));\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error;\n\t\t}\n\t}\n}\n\nexport default generateTaskFiles;\n"], ["/claude-task-master/scripts/modules/task-manager/tag-management.js", "import path from 'path';\nimport fs from 'fs';\nimport inquirer from 'inquirer';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tgetCurrentTag,\n\tresolveTag,\n\tgetTasksForTag,\n\tsetTasksForTag,\n\tfindProjectRoot,\n\ttruncate\n} from '../utils.js';\nimport { displayBanner, getStatusWithColor } from '../ui.js';\nimport findNextTask from './find-next-task.js';\n\n/**\n * Create a new tag context\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} tagName - Name of the new tag to create\n * @param {Object} options - Options object\n * @param {boolean} [options.copyFromCurrent=false] - Whether to copy tasks from current tag\n * @param {string} [options.copyFromTag] - Specific tag to copy tasks from\n * @param {string} [options.description] - Optional description for the tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with tag creation details\n */\nasync function createTag(\n\ttasksPath,\n\ttagName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { copyFromCurrent = false, copyFromTag, description } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate tag name\n\t\tif (!tagName || typeof tagName !== 'string') {\n\t\t\tthrow new Error('Tag name is required and must be a string');\n\t\t}\n\n\t\t// Validate tag name format (alphanumeric, hyphens, underscores only)\n\t\tif (!/^[a-zA-Z0-9_-]+$/.test(tagName)) {\n\t\t\tthrow new Error(\n\t\t\t\t'Tag name can only contain letters, numbers, hyphens, and underscores'\n\t\t\t);\n\t\t}\n\n\t\t// Reserved tag names\n\t\tconst reservedNames = ['master', 'main', 'default'];\n\t\tif (reservedNames.includes(tagName.toLowerCase())) {\n\t\t\tthrow new Error(`\"${tagName}\" is a reserved tag name`);\n\t\t}\n\n\t\tlogFn.info(`Creating new tag: ${tagName}`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations - ensure we get the actual tagged structure\n\t\tlet rawData;\n\t\tif (data._rawTaggedData) {\n\t\t\t// If we have _rawTaggedData, use it (this is the clean tagged structure)\n\t\t\trawData = data._rawTaggedData;\n\t\t} else if (data.tasks && !data.master) {\n\t\t\t// This is legacy format - create a master tag structure\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: data.tasks,\n\t\t\t\t\tmetadata: data.metadata || {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Tasks live here by default'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// This is already in tagged format, use it directly but exclude internal fields\n\t\t\trawData = {};\n\t\t\tfor (const [key, value] of Object.entries(data)) {\n\t\t\t\tif (key !== '_rawTaggedData' && key !== 'tag') {\n\t\t\t\t\trawData[key] = value;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check if tag already exists\n\t\tif (rawData[tagName]) {\n\t\t\tthrow new Error(`Tag \"${tagName}\" already exists`);\n\t\t}\n\n\t\t// Determine source for copying tasks (only if explicitly requested)\n\t\tlet sourceTasks = [];\n\t\tif (copyFromCurrent || copyFromTag) {\n\t\t\tconst sourceTag = copyFromTag || getCurrentTag(projectRoot);\n\t\t\tsourceTasks = getTasksForTag(rawData, sourceTag);\n\n\t\t\tif (copyFromTag && sourceTasks.length === 0) {\n\t\t\t\tlogFn.warn(`Source tag \"${copyFromTag}\" not found or has no tasks`);\n\t\t\t}\n\n\t\t\tlogFn.info(`Copying ${sourceTasks.length} tasks from tag \"${sourceTag}\"`);\n\t\t} else {\n\t\t\tlogFn.info('Creating empty tag (no tasks copied)');\n\t\t}\n\n\t\t// Create the new tag structure in raw data\n\t\trawData[tagName] = {\n\t\t\ttasks: [...sourceTasks], // Create a copy of the tasks array\n\t\t\tmetadata: {\n\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created on ${new Date().toLocaleDateString()}`\n\t\t\t}\n\t\t};\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\tlogFn.success(`Successfully created tag \"${tagName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\ttagName,\n\t\t\t\tcreated: true,\n\t\t\t\ttasksCopied: sourceTasks.length,\n\t\t\t\tsourceTag:\n\t\t\t\t\tcopyFromCurrent || copyFromTag\n\t\t\t\t\t\t? copyFromTag || getCurrentTag(projectRoot)\n\t\t\t\t\t\t: null,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created on ${new Date().toLocaleDateString()}`\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Created Successfully') +\n\t\t\t\t\t\t`\\n\\nTag Name: ${chalk.cyan(tagName)}` +\n\t\t\t\t\t\t`\\nTasks Copied: ${chalk.yellow(sourceTasks.length)}` +\n\t\t\t\t\t\t(copyFromCurrent || copyFromTag\n\t\t\t\t\t\t\t? `\\nSource Tag: ${chalk.cyan(copyFromTag || getCurrentTag(projectRoot))}`\n\t\t\t\t\t\t\t: '') +\n\t\t\t\t\t\t(description ? `\\nDescription: ${chalk.gray(description)}` : ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\ttagName,\n\t\t\tcreated: true,\n\t\t\ttasksCopied: sourceTasks.length,\n\t\t\tsourceTag:\n\t\t\t\tcopyFromCurrent || copyFromTag\n\t\t\t\t\t? copyFromTag || getCurrentTag(projectRoot)\n\t\t\t\t\t: null,\n\t\t\tdescription:\n\t\t\t\tdescription || `Tag created on ${new Date().toLocaleDateString()}`\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error creating tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Delete an existing tag\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} tagName - Name of the tag to delete\n * @param {Object} options - Options object\n * @param {boolean} [options.yes=false] - Skip confirmation prompts\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with deletion details\n */\nasync function deleteTag(\n\ttasksPath,\n\ttagName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { yes = false } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate tag name\n\t\tif (!tagName || typeof tagName !== 'string') {\n\t\t\tthrow new Error('Tag name is required and must be a string');\n\t\t}\n\n\t\t// Prevent deletion of master tag\n\t\tif (tagName === 'master') {\n\t\t\tthrow new Error('Cannot delete the \"master\" tag');\n\t\t}\n\n\t\tlogFn.info(`Deleting tag: ${tagName}`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations - ensure we get the actual tagged structure\n\t\tlet rawData;\n\t\tif (data._rawTaggedData) {\n\t\t\t// If we have _rawTaggedData, use it (this is the clean tagged structure)\n\t\t\trawData = data._rawTaggedData;\n\t\t} else if (data.tasks && !data.master) {\n\t\t\t// This is legacy format - create a master tag structure\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: data.tasks,\n\t\t\t\t\tmetadata: data.metadata || {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Tasks live here by default'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// This is already in tagged format, use it directly but exclude internal fields\n\t\t\trawData = {};\n\t\t\tfor (const [key, value] of Object.entries(data)) {\n\t\t\t\tif (key !== '_rawTaggedData' && key !== 'tag') {\n\t\t\t\t\trawData[key] = value;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check if tag exists\n\t\tif (!rawData[tagName]) {\n\t\t\tthrow new Error(`Tag \"${tagName}\" does not exist`);\n\t\t}\n\n\t\t// Get current tag to check if we're deleting the active tag\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\t\tconst isCurrentTag = currentTag === tagName;\n\n\t\t// Get task count for confirmation\n\t\tconst tasks = getTasksForTag(rawData, tagName);\n\t\tconst taskCount = tasks.length;\n\n\t\t// If not forced and has tasks, require confirmation (for CLI)\n\t\tif (!yes && taskCount > 0 && outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.yellow.bold('⚠ WARNING: Tag Deletion') +\n\t\t\t\t\t\t`\\n\\nYou are about to delete tag \"${chalk.cyan(tagName)}\"` +\n\t\t\t\t\t\t`\\nThis will permanently delete ${chalk.red.bold(taskCount)} tasks` +\n\t\t\t\t\t\t'\\n\\nThis action cannot be undone!',\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// First confirmation\n\t\t\tconst firstConfirm = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'confirm',\n\t\t\t\t\tname: 'proceed',\n\t\t\t\t\tmessage: `Are you sure you want to delete tag \"${tagName}\" and its ${taskCount} tasks?`,\n\t\t\t\t\tdefault: false\n\t\t\t\t}\n\t\t\t]);\n\n\t\t\tif (!firstConfirm.proceed) {\n\t\t\t\tlogFn.info('Tag deletion cancelled by user');\n\t\t\t\tthrow new Error('Tag deletion cancelled');\n\t\t\t}\n\n\t\t\t// Second confirmation (double-check)\n\t\t\tconst secondConfirm = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'tagNameConfirm',\n\t\t\t\t\tmessage: `To confirm deletion, please type the tag name \"${tagName}\":`,\n\t\t\t\t\tvalidate: (input) => {\n\t\t\t\t\t\tif (input === tagName) {\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn `Please type exactly \"${tagName}\" to confirm deletion`;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t]);\n\n\t\t\tif (secondConfirm.tagNameConfirm !== tagName) {\n\t\t\t\tlogFn.info('Tag deletion cancelled - incorrect tag name confirmation');\n\t\t\t\tthrow new Error('Tag deletion cancelled');\n\t\t\t}\n\n\t\t\tlogFn.info('Double confirmation received, proceeding with deletion...');\n\t\t}\n\n\t\t// Delete the tag\n\t\tdelete rawData[tagName];\n\n\t\t// If we're deleting the current tag, switch to master\n\t\tif (isCurrentTag) {\n\t\t\tawait switchCurrentTag(projectRoot, 'master');\n\t\t\tlogFn.info('Switched current tag to \"master\"');\n\t\t}\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\tlogFn.success(`Successfully deleted tag \"${tagName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\ttagName,\n\t\t\t\tdeleted: true,\n\t\t\t\ttasksDeleted: taskCount,\n\t\t\t\twasCurrentTag: isCurrentTag,\n\t\t\t\tswitchedToMaster: isCurrentTag\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.red.bold('✓ Tag Deleted Successfully') +\n\t\t\t\t\t\t`\\n\\nTag Name: ${chalk.cyan(tagName)}` +\n\t\t\t\t\t\t`\\nTasks Deleted: ${chalk.yellow(taskCount)}` +\n\t\t\t\t\t\t(isCurrentTag\n\t\t\t\t\t\t\t? `\\n${chalk.yellow('⚠ Switched current tag to \"master\"')}`\n\t\t\t\t\t\t\t: ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\ttagName,\n\t\t\tdeleted: true,\n\t\t\ttasksDeleted: taskCount,\n\t\t\twasCurrentTag: isCurrentTag,\n\t\t\tswitchedToMaster: isCurrentTag\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error deleting tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Enhance existing tags with metadata if they don't have it\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} rawData - The raw tagged data\n * @param {Object} context - Context object\n * @returns {Promise} True if any tags were enhanced\n */\nasync function enhanceTagsWithMetadata(tasksPath, rawData, context = {}) {\n\tlet enhanced = false;\n\n\ttry {\n\t\t// Get file stats for creation date fallback\n\t\tlet fileCreatedDate;\n\t\ttry {\n\t\t\tconst stats = fs.statSync(tasksPath);\n\t\t\tfileCreatedDate =\n\t\t\t\tstats.birthtime < stats.mtime ? stats.birthtime : stats.mtime;\n\t\t} catch (error) {\n\t\t\tfileCreatedDate = new Date();\n\t\t}\n\n\t\tfor (const [tagName, tagData] of Object.entries(rawData)) {\n\t\t\t// Skip non-tag properties\n\t\t\tif (\n\t\t\t\ttagName === 'tasks' ||\n\t\t\t\ttagName === 'tag' ||\n\t\t\t\ttagName === '_rawTaggedData' ||\n\t\t\t\t!tagData ||\n\t\t\t\ttypeof tagData !== 'object' ||\n\t\t\t\t!Array.isArray(tagData.tasks)\n\t\t\t) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Check if tag needs metadata enhancement\n\t\t\tif (!tagData.metadata) {\n\t\t\t\ttagData.metadata = {};\n\t\t\t\tenhanced = true;\n\t\t\t}\n\n\t\t\t// Add missing metadata fields\n\t\t\tif (!tagData.metadata.created) {\n\t\t\t\ttagData.metadata.created = fileCreatedDate.toISOString();\n\t\t\t\tenhanced = true;\n\t\t\t}\n\n\t\t\tif (!tagData.metadata.description) {\n\t\t\t\tif (tagName === 'master') {\n\t\t\t\t\ttagData.metadata.description = 'Tasks live here by default';\n\t\t\t\t} else {\n\t\t\t\t\ttagData.metadata.description = `Tag created on ${new Date(tagData.metadata.created).toLocaleDateString()}`;\n\t\t\t\t}\n\t\t\t\tenhanced = true;\n\t\t\t}\n\n\t\t\t// Add updated field if missing (set to created date initially)\n\t\t\tif (!tagData.metadata.updated) {\n\t\t\t\ttagData.metadata.updated = tagData.metadata.created;\n\t\t\t\tenhanced = true;\n\t\t\t}\n\t\t}\n\n\t\t// If we enhanced any tags, write the data back\n\t\tif (enhanced) {\n\t\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\t\tconst cleanData = {};\n\t\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\t\tcleanData[key] = value;\n\t\t\t\t}\n\t\t\t}\n\t\t\twriteJSON(tasksPath, cleanData, context.projectRoot);\n\t\t}\n\t} catch (error) {\n\t\t// Don't throw - just log and continue\n\t\tconst logFn = context.mcpLog || {\n\t\t\twarn: (...args) => log('warn', ...args)\n\t\t};\n\t\tlogFn.warn(`Could not enhance tag metadata: ${error.message}`);\n\t}\n\n\treturn enhanced;\n}\n\n/**\n * List all available tags with metadata\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} options - Options object\n * @param {boolean} [options.showTaskCounts=true] - Whether to show task counts\n * @param {boolean} [options.showMetadata=false] - Whether to show metadata\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with tags list\n */\nasync function tags(\n\ttasksPath,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { showTaskCounts = true, showMetadata = false } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\tlogFn.info('Listing available tags');\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Get current tag\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\n\t\t// Use raw tagged data if available, otherwise use the data directly\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Enhance existing tags with metadata if they don't have it\n\t\tawait enhanceTagsWithMetadata(tasksPath, rawData, context);\n\n\t\t// Extract all tags\n\t\tconst tagList = [];\n\t\tfor (const [tagName, tagData] of Object.entries(rawData)) {\n\t\t\t// Skip non-tag properties (like legacy 'tasks' array, 'tag', '_rawTaggedData')\n\t\t\tif (\n\t\t\t\ttagName === 'tasks' ||\n\t\t\t\ttagName === 'tag' ||\n\t\t\t\ttagName === '_rawTaggedData' ||\n\t\t\t\t!tagData ||\n\t\t\t\ttypeof tagData !== 'object' ||\n\t\t\t\t!Array.isArray(tagData.tasks)\n\t\t\t) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tconst tasks = tagData.tasks || [];\n\t\t\tconst metadata = tagData.metadata || {};\n\n\t\t\ttagList.push({\n\t\t\t\tname: tagName,\n\t\t\t\tisCurrent: tagName === currentTag,\n\t\t\t\tcompletedTasks: tasks.filter(\n\t\t\t\t\t(t) => t.status === 'done' || t.status === 'completed'\n\t\t\t\t).length,\n\t\t\t\ttasks: tasks || [],\n\t\t\t\tcreated: metadata.created || 'Unknown',\n\t\t\t\tdescription: metadata.description || 'No description'\n\t\t\t});\n\t\t}\n\n\t\t// Sort tags: current tag first, then alphabetically\n\t\ttagList.sort((a, b) => {\n\t\t\tif (a.isCurrent) return -1;\n\t\t\tif (b.isCurrent) return 1;\n\t\t\treturn a.name.localeCompare(b.name);\n\t\t});\n\n\t\tlogFn.success(`Found ${tagList.length} tags`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\ttags: tagList,\n\t\t\t\tcurrentTag,\n\t\t\t\ttotalTags: tagList.length\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display formatted table\n\t\tif (outputFormat === 'text') {\n\t\t\tif (tagList.length === 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(chalk.yellow('No tags found'), {\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t})\n\t\t\t\t);\n\t\t\t\treturn { tags: [], currentTag, totalTags: 0 };\n\t\t\t}\n\n\t\t\t// Create table headers based on options\n\t\t\tconst headers = [chalk.cyan.bold('Tag Name')];\n\t\t\tif (showTaskCounts) {\n\t\t\t\theaders.push(chalk.cyan.bold('Tasks'));\n\t\t\t\theaders.push(chalk.cyan.bold('Completed'));\n\t\t\t}\n\t\t\tif (showMetadata) {\n\t\t\t\theaders.push(chalk.cyan.bold('Created'));\n\t\t\t\theaders.push(chalk.cyan.bold('Description'));\n\t\t\t}\n\n\t\t\tconst table = new Table({\n\t\t\t\thead: headers,\n\t\t\t\tcolWidths: showMetadata ? [20, 10, 12, 15, 50] : [25, 10, 12]\n\t\t\t});\n\n\t\t\t// Add rows\n\t\t\ttagList.forEach((tag) => {\n\t\t\t\tconst row = [];\n\n\t\t\t\t// Tag name with current indicator\n\t\t\t\tconst tagDisplay = tag.isCurrent\n\t\t\t\t\t? `${chalk.green('●')} ${chalk.green.bold(tag.name)} ${chalk.gray('(current)')}`\n\t\t\t\t\t: ` ${tag.name}`;\n\t\t\t\trow.push(tagDisplay);\n\n\t\t\t\tif (showTaskCounts) {\n\t\t\t\t\trow.push(chalk.white(tag.tasks.length.toString()));\n\t\t\t\t\trow.push(chalk.green(tag.completedTasks.toString()));\n\t\t\t\t}\n\n\t\t\t\tif (showMetadata) {\n\t\t\t\t\tconst createdDate =\n\t\t\t\t\t\ttag.created !== 'Unknown'\n\t\t\t\t\t\t\t? new Date(tag.created).toLocaleDateString()\n\t\t\t\t\t\t\t: 'Unknown';\n\t\t\t\t\trow.push(chalk.gray(createdDate));\n\t\t\t\t\trow.push(chalk.gray(truncate(tag.description, 50)));\n\t\t\t\t}\n\n\t\t\t\ttable.push(row);\n\t\t\t});\n\n\t\t\t// console.log(\n\t\t\t// \tboxen(\n\t\t\t// \t\tchalk.white.bold('Available Tags') +\n\t\t\t// \t\t\t`\\n\\nCurrent Tag: ${chalk.green.bold(currentTag)}`,\n\t\t\t// \t\t{\n\t\t\t// \t\t\tpadding: { top: 0, bottom: 1, left: 1, right: 1 },\n\t\t\t// \t\t\tborderColor: 'blue',\n\t\t\t// \t\t\tborderStyle: 'round',\n\t\t\t// \t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t// \t\t}\n\t\t\t// \t)\n\t\t\t// );\n\n\t\t\tconsole.log(table.toString());\n\t\t}\n\n\t\treturn {\n\t\t\ttags: tagList,\n\t\t\tcurrentTag,\n\t\t\ttotalTags: tagList.length\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error listing tags: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Switch to a different tag context\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} tagName - Name of the tag to switch to\n * @param {Object} options - Options object\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with switch details\n */\nasync function useTag(\n\ttasksPath,\n\ttagName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate tag name\n\t\tif (!tagName || typeof tagName !== 'string') {\n\t\t\tthrow new Error('Tag name is required and must be a string');\n\t\t}\n\n\t\tlogFn.info(`Switching to tag: ${tagName}`);\n\n\t\t// Read current tasks data to verify tag exists\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data to check if tag exists\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Check if tag exists\n\t\tif (!rawData[tagName]) {\n\t\t\tthrow new Error(`Tag \"${tagName}\" does not exist`);\n\t\t}\n\n\t\t// Get current tag\n\t\tconst previousTag = getCurrentTag(projectRoot);\n\n\t\t// Switch to the new tag\n\t\tawait switchCurrentTag(projectRoot, tagName);\n\n\t\t// Get task count for the new tag - read tasks specifically for this tag\n\t\tconst tagData = readJSON(tasksPath, projectRoot, tagName);\n\t\tconst tasks = tagData ? tagData.tasks || [] : [];\n\t\tconst taskCount = tasks.length;\n\n\t\t// Find the next task to work on in this tag\n\t\tconst nextTask = findNextTask(tasks);\n\n\t\tlogFn.success(`Successfully switched to tag \"${tagName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\tpreviousTag,\n\t\t\t\tcurrentTag: tagName,\n\t\t\t\tswitched: true,\n\t\t\t\ttaskCount,\n\t\t\t\tnextTask\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tlet nextTaskInfo = '';\n\t\t\tif (nextTask) {\n\t\t\t\tnextTaskInfo = `\\nNext Task: ${chalk.cyan(`#${nextTask.id}`)} - ${chalk.white(nextTask.title)}`;\n\t\t\t} else {\n\t\t\t\tnextTaskInfo = `\\nNext Task: ${chalk.gray('No eligible tasks available')}`;\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Switched Successfully') +\n\t\t\t\t\t\t`\\n\\nPrevious Tag: ${chalk.cyan(previousTag)}` +\n\t\t\t\t\t\t`\\nCurrent Tag: ${chalk.green.bold(tagName)}` +\n\t\t\t\t\t\t`\\nAvailable Tasks: ${chalk.yellow(taskCount)}` +\n\t\t\t\t\t\tnextTaskInfo,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\tpreviousTag,\n\t\t\tcurrentTag: tagName,\n\t\t\tswitched: true,\n\t\t\ttaskCount,\n\t\t\tnextTask\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error switching tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Rename an existing tag\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} oldName - Current name of the tag\n * @param {string} newName - New name for the tag\n * @param {Object} options - Options object\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with rename details\n */\nasync function renameTag(\n\ttasksPath,\n\toldName,\n\tnewName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate parameters\n\t\tif (!oldName || typeof oldName !== 'string') {\n\t\t\tthrow new Error('Old tag name is required and must be a string');\n\t\t}\n\t\tif (!newName || typeof newName !== 'string') {\n\t\t\tthrow new Error('New tag name is required and must be a string');\n\t\t}\n\n\t\t// Validate new tag name format\n\t\tif (!/^[a-zA-Z0-9_-]+$/.test(newName)) {\n\t\t\tthrow new Error(\n\t\t\t\t'New tag name can only contain letters, numbers, hyphens, and underscores'\n\t\t\t);\n\t\t}\n\n\t\t// Prevent renaming master tag\n\t\tif (oldName === 'master') {\n\t\t\tthrow new Error('Cannot rename the \"master\" tag');\n\t\t}\n\n\t\t// Reserved tag names\n\t\tconst reservedNames = ['master', 'main', 'default'];\n\t\tif (reservedNames.includes(newName.toLowerCase())) {\n\t\t\tthrow new Error(`\"${newName}\" is a reserved tag name`);\n\t\t}\n\n\t\tlogFn.info(`Renaming tag from \"${oldName}\" to \"${newName}\"`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Check if old tag exists\n\t\tif (!rawData[oldName]) {\n\t\t\tthrow new Error(`Tag \"${oldName}\" does not exist`);\n\t\t}\n\n\t\t// Check if new tag name already exists\n\t\tif (rawData[newName]) {\n\t\t\tthrow new Error(`Tag \"${newName}\" already exists`);\n\t\t}\n\n\t\t// Get current tag to check if we're renaming the active tag\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\t\tconst isCurrentTag = currentTag === oldName;\n\n\t\t// Rename the tag by copying data and deleting old\n\t\trawData[newName] = { ...rawData[oldName] };\n\n\t\t// Update metadata if it exists\n\t\tif (rawData[newName].metadata) {\n\t\t\trawData[newName].metadata.renamed = {\n\t\t\t\tfrom: oldName,\n\t\t\t\tdate: new Date().toISOString()\n\t\t\t};\n\t\t}\n\n\t\tdelete rawData[oldName];\n\n\t\t// If we're renaming the current tag, update the current tag reference\n\t\tif (isCurrentTag) {\n\t\t\tawait switchCurrentTag(projectRoot, newName);\n\t\t\tlogFn.info(`Updated current tag reference to \"${newName}\"`);\n\t\t}\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\t// Get task count\n\t\tconst tasks = getTasksForTag(rawData, newName);\n\t\tconst taskCount = tasks.length;\n\n\t\tlogFn.success(`Successfully renamed tag from \"${oldName}\" to \"${newName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\toldName,\n\t\t\t\tnewName,\n\t\t\t\trenamed: true,\n\t\t\t\ttaskCount,\n\t\t\t\twasCurrentTag: isCurrentTag,\n\t\t\t\tisCurrentTag: isCurrentTag\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Renamed Successfully') +\n\t\t\t\t\t\t`\\n\\nOld Name: ${chalk.cyan(oldName)}` +\n\t\t\t\t\t\t`\\nNew Name: ${chalk.green.bold(newName)}` +\n\t\t\t\t\t\t`\\nTasks: ${chalk.yellow(taskCount)}` +\n\t\t\t\t\t\t(isCurrentTag ? `\\n${chalk.green('✓ Current tag updated')}` : ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\toldName,\n\t\t\tnewName,\n\t\t\trenamed: true,\n\t\t\ttaskCount,\n\t\t\twasCurrentTag: isCurrentTag,\n\t\t\tisCurrentTag: isCurrentTag\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error renaming tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Copy an existing tag to create a new tag with the same tasks\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} sourceName - Name of the source tag to copy from\n * @param {string} targetName - Name of the new tag to create\n * @param {Object} options - Options object\n * @param {string} [options.description] - Optional description for the new tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with copy details\n */\nasync function copyTag(\n\ttasksPath,\n\tsourceName,\n\ttargetName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { description } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate parameters\n\t\tif (!sourceName || typeof sourceName !== 'string') {\n\t\t\tthrow new Error('Source tag name is required and must be a string');\n\t\t}\n\t\tif (!targetName || typeof targetName !== 'string') {\n\t\t\tthrow new Error('Target tag name is required and must be a string');\n\t\t}\n\n\t\t// Validate target tag name format\n\t\tif (!/^[a-zA-Z0-9_-]+$/.test(targetName)) {\n\t\t\tthrow new Error(\n\t\t\t\t'Target tag name can only contain letters, numbers, hyphens, and underscores'\n\t\t\t);\n\t\t}\n\n\t\t// Reserved tag names\n\t\tconst reservedNames = ['master', 'main', 'default'];\n\t\tif (reservedNames.includes(targetName.toLowerCase())) {\n\t\t\tthrow new Error(`\"${targetName}\" is a reserved tag name`);\n\t\t}\n\n\t\tlogFn.info(`Copying tag from \"${sourceName}\" to \"${targetName}\"`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Check if source tag exists\n\t\tif (!rawData[sourceName]) {\n\t\t\tthrow new Error(`Source tag \"${sourceName}\" does not exist`);\n\t\t}\n\n\t\t// Check if target tag already exists\n\t\tif (rawData[targetName]) {\n\t\t\tthrow new Error(`Target tag \"${targetName}\" already exists`);\n\t\t}\n\n\t\t// Get source tasks\n\t\tconst sourceTasks = getTasksForTag(rawData, sourceName);\n\n\t\t// Create deep copy of the source tag data\n\t\trawData[targetName] = {\n\t\t\ttasks: JSON.parse(JSON.stringify(sourceTasks)), // Deep copy tasks\n\t\t\tmetadata: {\n\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription:\n\t\t\t\t\tdescription ||\n\t\t\t\t\t`Copy of \"${sourceName}\" created on ${new Date().toLocaleDateString()}`,\n\t\t\t\tcopiedFrom: {\n\t\t\t\t\ttag: sourceName,\n\t\t\t\t\tdate: new Date().toISOString()\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\tlogFn.success(\n\t\t\t`Successfully copied tag from \"${sourceName}\" to \"${targetName}\"`\n\t\t);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\tsourceName,\n\t\t\t\ttargetName,\n\t\t\t\tcopied: true,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription ||\n\t\t\t\t\t`Copy of \"${sourceName}\" created on ${new Date().toLocaleDateString()}`\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Copied Successfully') +\n\t\t\t\t\t\t`\\n\\nSource Tag: ${chalk.cyan(sourceName)}` +\n\t\t\t\t\t\t`\\nTarget Tag: ${chalk.green.bold(targetName)}` +\n\t\t\t\t\t\t`\\nTasks Copied: ${chalk.yellow(sourceTasks.length)}` +\n\t\t\t\t\t\t(description ? `\\nDescription: ${chalk.gray(description)}` : ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\tsourceName,\n\t\t\ttargetName,\n\t\t\tcopied: true,\n\t\t\tdescription:\n\t\t\t\tdescription ||\n\t\t\t\t`Copy of \"${sourceName}\" created on ${new Date().toLocaleDateString()}`\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error copying tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Helper function to switch the current tag in state.json\n * @param {string} projectRoot - Project root directory\n * @param {string} tagName - Name of the tag to switch to\n * @returns {Promise}\n */\nasync function switchCurrentTag(projectRoot, tagName) {\n\ttry {\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\t// Read current state or create default\n\t\tlet state = {};\n\t\tif (fs.existsSync(statePath)) {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tstate = JSON.parse(rawState);\n\t\t}\n\n\t\t// Update current tag and timestamp\n\t\tstate.currentTag = tagName;\n\t\tstate.lastSwitched = new Date().toISOString();\n\n\t\t// Ensure other required state properties exist\n\t\tif (!state.branchTagMapping) {\n\t\t\tstate.branchTagMapping = {};\n\t\t}\n\t\tif (state.migrationNoticeShown === undefined) {\n\t\t\tstate.migrationNoticeShown = false;\n\t\t}\n\n\t\t// Write updated state\n\t\tfs.writeFileSync(statePath, JSON.stringify(state, null, 2), 'utf8');\n\t} catch (error) {\n\t\tlog('warn', `Could not update current tag in state.json: ${error.message}`);\n\t\t// Don't throw - this is not critical for tag operations\n\t}\n}\n\n/**\n * Update branch-tag mapping in state.json\n * @param {string} projectRoot - Project root directory\n * @param {string} branchName - Git branch name\n * @param {string} tagName - Tag name to map to\n * @returns {Promise}\n */\nasync function updateBranchTagMapping(projectRoot, branchName, tagName) {\n\ttry {\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\t// Read current state or create default\n\t\tlet state = {};\n\t\tif (fs.existsSync(statePath)) {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tstate = JSON.parse(rawState);\n\t\t}\n\n\t\t// Ensure branchTagMapping exists\n\t\tif (!state.branchTagMapping) {\n\t\t\tstate.branchTagMapping = {};\n\t\t}\n\n\t\t// Update the mapping\n\t\tstate.branchTagMapping[branchName] = tagName;\n\n\t\t// Write updated state\n\t\tfs.writeFileSync(statePath, JSON.stringify(state, null, 2), 'utf8');\n\t} catch (error) {\n\t\tlog('warn', `Could not update branch-tag mapping: ${error.message}`);\n\t\t// Don't throw - this is not critical for tag operations\n\t}\n}\n\n/**\n * Get tag name for a git branch from state.json mapping\n * @param {string} projectRoot - Project root directory\n * @param {string} branchName - Git branch name\n * @returns {Promise} Mapped tag name or null if not found\n */\nasync function getTagForBranch(projectRoot, branchName) {\n\ttry {\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\tif (!fs.existsSync(statePath)) {\n\t\t\treturn null;\n\t\t}\n\n\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\tconst state = JSON.parse(rawState);\n\n\t\treturn state.branchTagMapping?.[branchName] || null;\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Create a tag from a git branch name\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} branchName - Git branch name to create tag from\n * @param {Object} options - Options object\n * @param {boolean} [options.copyFromCurrent] - Copy tasks from current tag\n * @param {string} [options.copyFromTag] - Copy tasks from specific tag\n * @param {string} [options.description] - Custom description for the tag\n * @param {boolean} [options.autoSwitch] - Automatically switch to the new tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with creation details\n */\nasync function createTagFromBranch(\n\ttasksPath,\n\tbranchName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { copyFromCurrent, copyFromTag, description, autoSwitch } = options;\n\n\t// Import git utilities\n\tconst { sanitizeBranchNameForTag, isValidBranchForTag } = await import(\n\t\t'../utils/git-utils.js'\n\t);\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate branch name\n\t\tif (!branchName || typeof branchName !== 'string') {\n\t\t\tthrow new Error('Branch name is required and must be a string');\n\t\t}\n\n\t\t// Check if branch name is valid for tag creation\n\t\tif (!isValidBranchForTag(branchName)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Branch \"${branchName}\" cannot be converted to a valid tag name`\n\t\t\t);\n\t\t}\n\n\t\t// Sanitize branch name to create tag name\n\t\tconst tagName = sanitizeBranchNameForTag(branchName);\n\n\t\tlogFn.info(`Creating tag \"${tagName}\" from git branch \"${branchName}\"`);\n\n\t\t// Create the tag using existing createTag function\n\t\tconst createResult = await createTag(\n\t\t\ttasksPath,\n\t\t\ttagName,\n\t\t\t{\n\t\t\t\tcopyFromCurrent,\n\t\t\t\tcopyFromTag,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created from git branch \"${branchName}\"`\n\t\t\t},\n\t\t\tcontext,\n\t\t\toutputFormat\n\t\t);\n\n\t\t// Update branch-tag mapping\n\t\tawait updateBranchTagMapping(projectRoot, branchName, tagName);\n\t\tlogFn.info(`Updated branch-tag mapping: ${branchName} -> ${tagName}`);\n\n\t\t// Auto-switch to the new tag if requested\n\t\tif (autoSwitch) {\n\t\t\tawait switchCurrentTag(projectRoot, tagName);\n\t\t\tlogFn.info(`Automatically switched to tag \"${tagName}\"`);\n\t\t}\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\t...createResult,\n\t\t\t\tbranchName,\n\t\t\t\ttagName,\n\t\t\t\tmappingUpdated: true,\n\t\t\t\tautoSwitched: autoSwitch || false\n\t\t\t};\n\t\t}\n\n\t\t// For text output, the createTag function already handles display\n\t\treturn {\n\t\t\tbranchName,\n\t\t\ttagName,\n\t\t\tcreated: true,\n\t\t\tmappingUpdated: true,\n\t\t\tautoSwitched: autoSwitch || false\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error creating tag from branch: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Automatically switch tag based on current git branch\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} options - Options object\n * @param {boolean} [options.createIfMissing] - Create tag if it doesn't exist\n * @param {boolean} [options.copyFromCurrent] - Copy tasks when creating new tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise} Result object with switch details\n */\nasync function autoSwitchTagForBranch(\n\ttasksPath,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { createIfMissing, copyFromCurrent } = options;\n\n\t// Import git utilities\n\tconst {\n\t\tgetCurrentBranch,\n\t\tisGitRepository,\n\t\tsanitizeBranchNameForTag,\n\t\tisValidBranchForTag\n\t} = await import('../utils/git-utils.js');\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Check if we're in a git repository\n\t\tif (!(await isGitRepository(projectRoot))) {\n\t\t\tlogFn.warn('Not in a git repository, cannot auto-switch tags');\n\t\t\treturn { switched: false, reason: 'not_git_repo' };\n\t\t}\n\n\t\t// Get current git branch\n\t\tconst currentBranch = await getCurrentBranch(projectRoot);\n\t\tif (!currentBranch) {\n\t\t\tlogFn.warn('Could not determine current git branch');\n\t\t\treturn { switched: false, reason: 'no_current_branch' };\n\t\t}\n\n\t\tlogFn.info(`Current git branch: ${currentBranch}`);\n\n\t\t// Check if branch is valid for tag creation\n\t\tif (!isValidBranchForTag(currentBranch)) {\n\t\t\tlogFn.info(`Branch \"${currentBranch}\" is not suitable for tag creation`);\n\t\t\treturn {\n\t\t\t\tswitched: false,\n\t\t\t\treason: 'invalid_branch_for_tag',\n\t\t\t\tbranchName: currentBranch\n\t\t\t};\n\t\t}\n\n\t\t// Check if there's already a mapping for this branch\n\t\tlet tagName = await getTagForBranch(projectRoot, currentBranch);\n\n\t\tif (!tagName) {\n\t\t\t// No mapping exists, create tag name from branch\n\t\t\ttagName = sanitizeBranchNameForTag(currentBranch);\n\t\t}\n\n\t\t// Check if tag exists\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tconst rawData = data._rawTaggedData || data;\n\t\tconst tagExists = rawData[tagName];\n\n\t\tif (!tagExists && createIfMissing) {\n\t\t\t// Create the tag from branch\n\t\t\tlogFn.info(`Creating new tag \"${tagName}\" for branch \"${currentBranch}\"`);\n\n\t\t\tconst createResult = await createTagFromBranch(\n\t\t\t\ttasksPath,\n\t\t\t\tcurrentBranch,\n\t\t\t\t{\n\t\t\t\t\tcopyFromCurrent,\n\t\t\t\t\tautoSwitch: true\n\t\t\t\t},\n\t\t\t\tcontext,\n\t\t\t\toutputFormat\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\tswitched: true,\n\t\t\t\tcreated: true,\n\t\t\t\tbranchName: currentBranch,\n\t\t\t\ttagName,\n\t\t\t\t...createResult\n\t\t\t};\n\t\t} else if (tagExists) {\n\t\t\t// Tag exists, switch to it\n\t\t\tlogFn.info(\n\t\t\t\t`Switching to existing tag \"${tagName}\" for branch \"${currentBranch}\"`\n\t\t\t);\n\n\t\t\tconst switchResult = await useTag(\n\t\t\t\ttasksPath,\n\t\t\t\ttagName,\n\t\t\t\t{},\n\t\t\t\tcontext,\n\t\t\t\toutputFormat\n\t\t\t);\n\n\t\t\t// Update mapping if it didn't exist\n\t\t\tif (!(await getTagForBranch(projectRoot, currentBranch))) {\n\t\t\t\tawait updateBranchTagMapping(projectRoot, currentBranch, tagName);\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\tswitched: true,\n\t\t\t\tcreated: false,\n\t\t\t\tbranchName: currentBranch,\n\t\t\t\ttagName,\n\t\t\t\t...switchResult\n\t\t\t};\n\t\t} else {\n\t\t\t// Tag doesn't exist and createIfMissing is false\n\t\t\tlogFn.warn(\n\t\t\t\t`Tag \"${tagName}\" for branch \"${currentBranch}\" does not exist`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tswitched: false,\n\t\t\t\treason: 'tag_not_found',\n\t\t\t\tbranchName: currentBranch,\n\t\t\t\ttagName\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlogFn.error(`Error in auto-switch tag for branch: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Check git workflow configuration and perform auto-switch if enabled\n * @param {string} projectRoot - Project root directory\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} context - Context object\n * @returns {Promise} Switch result or null if not enabled\n */\nasync function checkAndAutoSwitchTag(projectRoot, tasksPath, context = {}) {\n\ttry {\n\t\t// Read configuration\n\t\tconst configPath = path.join(projectRoot, '.taskmaster', 'config.json');\n\t\tif (!fs.existsSync(configPath)) {\n\t\t\treturn null;\n\t\t}\n\n\t\tconst rawConfig = fs.readFileSync(configPath, 'utf8');\n\t\tconst config = JSON.parse(rawConfig);\n\n\t\t// Git workflow has been removed - return null to disable auto-switching\n\t\treturn null;\n\n\t\t// Perform auto-switch\n\t\treturn await autoSwitchTagForBranch(\n\t\t\ttasksPath,\n\t\t\t{ createIfMissing: true, copyFromCurrent: false },\n\t\t\tcontext,\n\t\t\t'json'\n\t\t);\n\t} catch (error) {\n\t\t// Silently fail - this is not critical\n\t\treturn null;\n\t}\n}\n\n// Export all tag management functions\nexport {\n\tcreateTag,\n\tdeleteTag,\n\ttags,\n\tuseTag,\n\trenameTag,\n\tcopyTag,\n\tswitchCurrentTag,\n\tupdateBranchTagMapping,\n\tgetTagForBranch,\n\tcreateTagFromBranch,\n\tautoSwitchTagForBranch,\n\tcheckAndAutoSwitchTag\n};\n"], ["/claude-task-master/scripts/modules/task-manager/update-subtask-by-id.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport {\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\nimport {\n\tlog as consoleLog,\n\treadJSON,\n\twriteJSON,\n\ttruncate,\n\tisSilentMode,\n\tfindProjectRoot,\n\tflattenTasksWithSubtasks\n} from '../utils.js';\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\n\n/**\n * Update a subtask by appending additional timestamped information using the unified AI service.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} subtaskId - ID of the subtask to update in format \"parentId.subtaskId\"\n * @param {string} prompt - Prompt for generating additional information\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP server.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path (needed for AI service key resolution).\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json'). Automatically 'json' if mcpLog is present.\n * @returns {Promise} - The updated subtask or null if update failed.\n */\nasync function updateSubtaskById(\n\ttasksPath,\n\tsubtaskId,\n\tprompt,\n\tuseResearch = false,\n\tcontext = {},\n\toutputFormat = context.mcpLog ? 'json' : 'text'\n) {\n\tconst { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;\n\tconst logFn = mcpLog || consoleLog;\n\tconst isMCP = !!mcpLog;\n\n\t// Report helper\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\tlogFn(level, ...args);\n\t\t}\n\t};\n\n\tlet loadingIndicator = null;\n\n\ttry {\n\t\treport('info', `Updating subtask ${subtaskId} with prompt: \"${prompt}\"`);\n\n\t\tif (\n\t\t\t!subtaskId ||\n\t\t\ttypeof subtaskId !== 'string' ||\n\t\t\t!subtaskId.includes('.')\n\t\t) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format \"parentId.subtaskId\"`\n\t\t\t);\n\t\t}\n\n\t\tif (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {\n\t\t\tthrow new Error(\n\t\t\t\t'Prompt cannot be empty. Please provide context for the subtask update.'\n\t\t\t);\n\t\t}\n\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\tthrow new Error(`Tasks file not found at path: ${tasksPath}`);\n\t\t}\n\n\t\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\t\tif (!projectRoot) {\n\t\t\tthrow new Error('Could not determine project root directory');\n\t\t}\n\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(\n\t\t\t\t`No valid tasks found in ${tasksPath}. The file may be corrupted or have an invalid format.`\n\t\t\t);\n\t\t}\n\n\t\tconst [parentIdStr, subtaskIdStr] = subtaskId.split('.');\n\t\tconst parentId = parseInt(parentIdStr, 10);\n\t\tconst subtaskIdNum = parseInt(subtaskIdStr, 10);\n\n\t\tif (\n\t\t\tNumber.isNaN(parentId) ||\n\t\t\tparentId <= 0 ||\n\t\t\tNumber.isNaN(subtaskIdNum) ||\n\t\t\tsubtaskIdNum <= 0\n\t\t) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid subtask ID format: ${subtaskId}. Both parent ID and subtask ID must be positive integers.`\n\t\t\t);\n\t\t}\n\n\t\tconst parentTask = data.tasks.find((task) => task.id === parentId);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(\n\t\t\t\t`Parent task with ID ${parentId} not found. Please verify the task ID and try again.`\n\t\t\t);\n\t\t}\n\n\t\tif (!parentTask.subtasks || !Array.isArray(parentTask.subtasks)) {\n\t\t\tthrow new Error(`Parent task ${parentId} has no subtasks.`);\n\t\t}\n\n\t\tconst subtaskIndex = parentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === subtaskIdNum\n\t\t);\n\t\tif (subtaskIndex === -1) {\n\t\t\tthrow new Error(\n\t\t\t\t`Subtask with ID ${subtaskId} not found. Please verify the subtask ID and try again.`\n\t\t\t);\n\t\t}\n\n\t\tconst subtask = parentTask.subtasks[subtaskIndex];\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update-subtask');\n\t\t\tconst searchQuery = `${parentTask.title} ${subtask.title} ${prompt}`;\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([subtaskId.toString(), ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\treport('warn', `Could not gather context: ${contextError.message}`);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\tif (outputFormat === 'text') {\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Status')\n\t\t\t\t],\n\t\t\t\tcolWidths: [10, 55, 10]\n\t\t\t});\n\t\t\ttable.push([\n\t\t\t\tsubtaskId,\n\t\t\t\ttruncate(subtask.title, 52),\n\t\t\t\tgetStatusWithColor(subtask.status)\n\t\t\t]);\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating Subtask #${subtaskId}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t})\n\t\t\t);\n\t\t\tconsole.log(table.toString());\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\tuseResearch\n\t\t\t\t\t? 'Updating subtask with research...'\n\t\t\t\t\t: 'Updating subtask...'\n\t\t\t);\n\t\t}\n\n\t\tlet generatedContentString = '';\n\t\tlet newlyAddedSnippet = '';\n\t\tlet aiServiceResponse = null;\n\n\t\ttry {\n\t\t\tconst parentContext = {\n\t\t\t\tid: parentTask.id,\n\t\t\t\ttitle: parentTask.title\n\t\t\t};\n\t\t\tconst prevSubtask =\n\t\t\t\tsubtaskIndex > 0\n\t\t\t\t\t? {\n\t\t\t\t\t\t\tid: `${parentTask.id}.${parentTask.subtasks[subtaskIndex - 1].id}`,\n\t\t\t\t\t\t\ttitle: parentTask.subtasks[subtaskIndex - 1].title,\n\t\t\t\t\t\t\tstatus: parentTask.subtasks[subtaskIndex - 1].status\n\t\t\t\t\t\t}\n\t\t\t\t\t: undefined;\n\t\t\tconst nextSubtask =\n\t\t\t\tsubtaskIndex < parentTask.subtasks.length - 1\n\t\t\t\t\t? {\n\t\t\t\t\t\t\tid: `${parentTask.id}.${parentTask.subtasks[subtaskIndex + 1].id}`,\n\t\t\t\t\t\t\ttitle: parentTask.subtasks[subtaskIndex + 1].title,\n\t\t\t\t\t\t\tstatus: parentTask.subtasks[subtaskIndex + 1].status\n\t\t\t\t\t\t}\n\t\t\t\t\t: undefined;\n\n\t\t\t// Build prompts using PromptManager\n\t\t\tconst promptManager = getPromptManager();\n\n\t\t\tconst promptParams = {\n\t\t\t\tparentTask: parentContext,\n\t\t\t\tprevSubtask: prevSubtask,\n\t\t\t\tnextSubtask: nextSubtask,\n\t\t\t\tcurrentDetails: subtask.details || '(No existing details)',\n\t\t\t\tupdatePrompt: prompt,\n\t\t\t\tuseResearch: useResearch,\n\t\t\t\tgatheredContext: gatheredContext || ''\n\t\t\t};\n\n\t\t\tconst variantKey = useResearch ? 'research' : 'default';\n\t\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t\t'update-subtask',\n\t\t\t\tpromptParams,\n\t\t\t\tvariantKey\n\t\t\t);\n\n\t\t\tconst role = useResearch ? 'research' : 'main';\n\t\t\treport('info', `Using AI text service with role: ${role}`);\n\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\trole,\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tmaxRetries: 2,\n\t\t\t\tcommandName: 'update-subtask',\n\t\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (\n\t\t\t\taiServiceResponse &&\n\t\t\t\taiServiceResponse.mainResult &&\n\t\t\t\ttypeof aiServiceResponse.mainResult === 'string'\n\t\t\t) {\n\t\t\t\tgeneratedContentString = aiServiceResponse.mainResult;\n\t\t\t} else {\n\t\t\t\tgeneratedContentString = '';\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'AI service response did not contain expected text string.'\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (outputFormat === 'text' && loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t} catch (aiError) {\n\t\t\treport('error', `AI service call failed: ${aiError.message}`);\n\t\t\tif (outputFormat === 'text' && loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t\tthrow aiError;\n\t\t}\n\n\t\tif (generatedContentString && generatedContentString.trim()) {\n\t\t\t// Check if the string is not empty\n\t\t\tconst timestamp = new Date().toISOString();\n\t\t\tconst formattedBlock = `\\n${generatedContentString.trim()}\\n`;\n\t\t\tnewlyAddedSnippet = formattedBlock; // <--- ADD THIS LINE: Store for display\n\n\t\t\tsubtask.details =\n\t\t\t\t(subtask.details ? subtask.details + '\\n' : '') + formattedBlock;\n\t\t} else {\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t'AI response was empty or whitespace after trimming. Original details remain unchanged.'\n\t\t\t);\n\t\t\tnewlyAddedSnippet = 'No new details were added by the AI.';\n\t\t}\n\n\t\tconst updatedSubtask = parentTask.subtasks[subtaskIndex];\n\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.log(\n\t\t\t\t'>>> DEBUG: Subtask details AFTER AI update:',\n\t\t\t\tupdatedSubtask.details\n\t\t\t);\n\t\t}\n\n\t\tif (updatedSubtask.description) {\n\t\t\tif (prompt.length < 100) {\n\t\t\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t'>>> DEBUG: Subtask description BEFORE append:',\n\t\t\t\t\t\tupdatedSubtask.description\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tupdatedSubtask.description += ` [Updated: ${new Date().toLocaleDateString()}]`;\n\t\t\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t'>>> DEBUG: Subtask description AFTER append:',\n\t\t\t\t\t\tupdatedSubtask.description\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.log('>>> DEBUG: About to call writeJSON with updated data...');\n\t\t}\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.log('>>> DEBUG: writeJSON call completed.');\n\t\t}\n\n\t\treport('success', `Successfully updated subtask ${subtaskId}`);\n\t\t// Updated function call to make sure if uncommented it will generate the task files for the updated subtask based on the tag\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t// \ttag: tag,\n\t\t// \tprojectRoot: projectRoot\n\t\t// });\n\n\t\tif (outputFormat === 'text') {\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(`Successfully updated subtask #${subtaskId}`) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white.bold('Title:') +\n\t\t\t\t\t\t' ' +\n\t\t\t\t\t\tupdatedSubtask.title +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white.bold('Newly Added Snippet:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(newlyAddedSnippet),\n\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t}\n\n\t\treturn {\n\t\t\tupdatedSubtask: updatedSubtask,\n\t\t\ttelemetryData: aiServiceResponse.telemetryData,\n\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t};\n\t} catch (error) {\n\t\tif (outputFormat === 'text' && loadingIndicator) {\n\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\tloadingIndicator = null;\n\t\t}\n\t\treport('error', `Error updating subtask: ${error.message}`);\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\tif (error.message?.includes('ANTHROPIC_API_KEY')) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow('\\nTo fix this issue, set your Anthropic API key:')\n\t\t\t\t);\n\t\t\t\tconsole.log(' export ANTHROPIC_API_KEY=your_api_key_here');\n\t\t\t} else if (error.message?.includes('PERPLEXITY_API_KEY')) {\n\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 2. Or run without the research flag: task-master update-subtask --id= --prompt=\"...\"'\n\t\t\t\t);\n\t\t\t} else if (error.message?.includes('overloaded')) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'\\nAI model overloaded, and fallback failed or was unavailable:'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(' 1. Try again in a few minutes.');\n\t\t\t\tconsole.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');\n\t\t\t} else if (error.message?.includes('not found')) {\n\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 1. Run task-master list --with-subtasks to see all available subtask IDs'\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 2. Use a valid subtask ID with the --id parameter in format \"parentId.subtaskId\"'\n\t\t\t\t);\n\t\t\t} else if (\n\t\t\t\terror.message?.includes('empty stream response') ||\n\t\t\t\terror.message?.includes('AI did not return a valid text string')\n\t\t\t) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'\\nThe AI model returned an empty or invalid response. This might be due to the prompt or API issues. Try rephrasing or trying again later.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\tif (getDebugFlag(session)) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t} else {\n\t\t\tthrow error;\n\t\t}\n\t\treturn null;\n\t}\n}\n\nexport default updateSubtaskById;\n"], ["/claude-task-master/scripts/modules/task-manager/update-task-by-id.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\nimport { z } from 'zod'; // Keep Zod for post-parse validation\n\nimport {\n\tlog as consoleLog,\n\treadJSON,\n\twriteJSON,\n\ttruncate,\n\tisSilentMode,\n\tflattenTasksWithSubtasks,\n\tfindProjectRoot\n} from '../utils.js';\n\nimport {\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getDebugFlag, isApiKeySet } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\n\n// Zod schema for post-parsing validation of the updated task object\nconst updatedTaskSchema = z\n\t.object({\n\t\tid: z.number().int(),\n\t\ttitle: z.string(), // Title should be preserved, but check it exists\n\t\tdescription: z.string(),\n\t\tstatus: z.string(),\n\t\tdependencies: z.array(z.union([z.number().int(), z.string()])),\n\t\tpriority: z.string().nullable().default('medium'),\n\t\tdetails: z.string().nullable().default(''),\n\t\ttestStrategy: z.string().nullable().default(''),\n\t\tsubtasks: z\n\t\t\t.array(\n\t\t\t\tz.object({\n\t\t\t\t\tid: z\n\t\t\t\t\t\t.number()\n\t\t\t\t\t\t.int()\n\t\t\t\t\t\t.positive()\n\t\t\t\t\t\t.describe('Sequential subtask ID starting from 1'),\n\t\t\t\t\ttitle: z.string(),\n\t\t\t\t\tdescription: z.string(),\n\t\t\t\t\tstatus: z.string(),\n\t\t\t\t\tdependencies: z.array(z.number().int()).nullable().default([]),\n\t\t\t\t\tdetails: z.string().nullable().default(''),\n\t\t\t\t\ttestStrategy: z.string().nullable().default('')\n\t\t\t\t})\n\t\t\t)\n\t\t\t.nullable()\n\t\t\t.default([])\n\t})\n\t.strip(); // Allows parsing even if AI adds extra fields, but validation focuses on schema\n\n/**\n * Parses a single updated task object from AI's text response.\n * @param {string} text - Response text from AI.\n * @param {number} expectedTaskId - The ID of the task expected.\n * @param {Function | Object} logFn - Logging function or MCP logger.\n * @param {boolean} isMCP - Flag indicating MCP context.\n * @returns {Object} Parsed and validated task object.\n * @throws {Error} If parsing or validation fails.\n */\nfunction parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) {\n\t// Report helper consistent with the established pattern\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\tlogFn(level, ...args);\n\t\t}\n\t};\n\n\treport(\n\t\t'info',\n\t\t'Attempting to parse updated task object from text response...'\n\t);\n\tif (!text || text.trim() === '')\n\t\tthrow new Error('AI response text is empty.');\n\n\tlet cleanedResponse = text.trim();\n\tconst originalResponseForDebug = cleanedResponse;\n\tlet parseMethodUsed = 'raw'; // Keep track of which method worked\n\n\t// --- NEW Step 1: Try extracting between {} first ---\n\tconst firstBraceIndex = cleanedResponse.indexOf('{');\n\tconst lastBraceIndex = cleanedResponse.lastIndexOf('}');\n\tlet potentialJsonFromBraces = null;\n\n\tif (firstBraceIndex !== -1 && lastBraceIndex > firstBraceIndex) {\n\t\tpotentialJsonFromBraces = cleanedResponse.substring(\n\t\t\tfirstBraceIndex,\n\t\t\tlastBraceIndex + 1\n\t\t);\n\t\tif (potentialJsonFromBraces.length <= 2) {\n\t\t\tpotentialJsonFromBraces = null; // Ignore empty braces {}\n\t\t}\n\t}\n\n\t// If {} extraction yielded something, try parsing it immediately\n\tif (potentialJsonFromBraces) {\n\t\ttry {\n\t\t\tconst testParse = JSON.parse(potentialJsonFromBraces);\n\t\t\t// It worked! Use this as the primary cleaned response.\n\t\t\tcleanedResponse = potentialJsonFromBraces;\n\t\t\tparseMethodUsed = 'braces';\n\t\t} catch (e) {\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t'Content between {} looked promising but failed initial parse. Proceeding to other methods.'\n\t\t\t);\n\t\t\t// Reset cleanedResponse to original if brace parsing failed\n\t\t\tcleanedResponse = originalResponseForDebug;\n\t\t}\n\t}\n\n\t// --- Step 2: If brace parsing didn't work or wasn't applicable, try code block extraction ---\n\tif (parseMethodUsed === 'raw') {\n\t\tconst codeBlockMatch = cleanedResponse.match(\n\t\t\t/```(?:json|javascript)?\\s*([\\s\\S]*?)\\s*```/i\n\t\t);\n\t\tif (codeBlockMatch) {\n\t\t\tcleanedResponse = codeBlockMatch[1].trim();\n\t\t\tparseMethodUsed = 'codeblock';\n\t\t\treport('info', 'Extracted JSON content from Markdown code block.');\n\t\t} else {\n\t\t\t// --- Step 3: If code block failed, try stripping prefixes ---\n\t\t\tconst commonPrefixes = [\n\t\t\t\t'json\\n',\n\t\t\t\t'javascript\\n'\n\t\t\t\t// ... other prefixes ...\n\t\t\t];\n\t\t\tlet prefixFound = false;\n\t\t\tfor (const prefix of commonPrefixes) {\n\t\t\t\tif (cleanedResponse.toLowerCase().startsWith(prefix)) {\n\t\t\t\t\tcleanedResponse = cleanedResponse.substring(prefix.length).trim();\n\t\t\t\t\tparseMethodUsed = 'prefix';\n\t\t\t\t\treport('info', `Stripped prefix: \"${prefix.trim()}\"`);\n\t\t\t\t\tprefixFound = true;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (!prefixFound) {\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'Response does not appear to contain {}, code block, or known prefix. Attempting raw parse.'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t// --- Step 4: Attempt final parse ---\n\tlet parsedTask;\n\ttry {\n\t\tparsedTask = JSON.parse(cleanedResponse);\n\t} catch (parseError) {\n\t\treport('error', `Failed to parse JSON object: ${parseError.message}`);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`\n\t\t);\n\t\tthrow new Error(\n\t\t\t`Failed to parse JSON response object: ${parseError.message}`\n\t\t);\n\t}\n\n\tif (!parsedTask || typeof parsedTask !== 'object') {\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content is not an object. Type: ${typeof parsedTask}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content sample: ${JSON.stringify(parsedTask).substring(0, 200)}`\n\t\t);\n\t\tthrow new Error('Parsed AI response is not a valid JSON object.');\n\t}\n\n\t// Preprocess the task to ensure subtasks have proper structure\n\tconst preprocessedTask = {\n\t\t...parsedTask,\n\t\tstatus: parsedTask.status || 'pending',\n\t\tdependencies: Array.isArray(parsedTask.dependencies)\n\t\t\t? parsedTask.dependencies\n\t\t\t: [],\n\t\tdetails:\n\t\t\ttypeof parsedTask.details === 'string'\n\t\t\t\t? parsedTask.details\n\t\t\t\t: String(parsedTask.details || ''),\n\t\ttestStrategy:\n\t\t\ttypeof parsedTask.testStrategy === 'string'\n\t\t\t\t? parsedTask.testStrategy\n\t\t\t\t: String(parsedTask.testStrategy || ''),\n\t\t// Ensure subtasks is an array and each subtask has required fields\n\t\tsubtasks: Array.isArray(parsedTask.subtasks)\n\t\t\t? parsedTask.subtasks.map((subtask) => ({\n\t\t\t\t\t...subtask,\n\t\t\t\t\ttitle: subtask.title || '',\n\t\t\t\t\tdescription: subtask.description || '',\n\t\t\t\t\tstatus: subtask.status || 'pending',\n\t\t\t\t\tdependencies: Array.isArray(subtask.dependencies)\n\t\t\t\t\t\t? subtask.dependencies\n\t\t\t\t\t\t: [],\n\t\t\t\t\tdetails:\n\t\t\t\t\t\ttypeof subtask.details === 'string'\n\t\t\t\t\t\t\t? subtask.details\n\t\t\t\t\t\t\t: String(subtask.details || ''),\n\t\t\t\t\ttestStrategy:\n\t\t\t\t\t\ttypeof subtask.testStrategy === 'string'\n\t\t\t\t\t\t\t? subtask.testStrategy\n\t\t\t\t\t\t\t: String(subtask.testStrategy || '')\n\t\t\t\t}))\n\t\t\t: []\n\t};\n\n\t// Validate the parsed task object using Zod\n\tconst validationResult = updatedTaskSchema.safeParse(preprocessedTask);\n\tif (!validationResult.success) {\n\t\treport('error', 'Parsed task object failed Zod validation.');\n\t\tvalidationResult.error.errors.forEach((err) => {\n\t\t\treport('error', ` - Field '${err.path.join('.')}': ${err.message}`);\n\t\t});\n\t\tthrow new Error(\n\t\t\t`AI response failed task structure validation: ${validationResult.error.message}`\n\t\t);\n\t}\n\n\t// Final check: ensure ID matches expected ID (AI might hallucinate)\n\tif (validationResult.data.id !== expectedTaskId) {\n\t\treport(\n\t\t\t'warn',\n\t\t\t`AI returned task with ID ${validationResult.data.id}, but expected ${expectedTaskId}. Overwriting ID.`\n\t\t);\n\t\tvalidationResult.data.id = expectedTaskId; // Enforce correct ID\n\t}\n\n\treport('info', 'Successfully validated updated task structure.');\n\treturn validationResult.data; // Return the validated task data\n}\n\n/**\n * Update a task by ID with new information using the unified AI service.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} taskId - ID of the task to update\n * @param {string} prompt - Prompt for generating updated task information\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP server.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path.\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json').\n * @param {boolean} [appendMode=false] - If true, append to details instead of full update.\n * @returns {Promise} - The updated task or null if update failed.\n */\nasync function updateTaskById(\n\ttasksPath,\n\ttaskId,\n\tprompt,\n\tuseResearch = false,\n\tcontext = {},\n\toutputFormat = 'text',\n\tappendMode = false\n) {\n\tconst { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;\n\tconst logFn = mcpLog || consoleLog;\n\tconst isMCP = !!mcpLog;\n\n\t// Use report helper for logging\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\tlogFn(level, ...args);\n\t\t}\n\t};\n\n\ttry {\n\t\treport('info', `Updating single task ${taskId} with prompt: \"${prompt}\"`);\n\n\t\t// --- Input Validations (Keep existing) ---\n\t\tif (!Number.isInteger(taskId) || taskId <= 0)\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid task ID: ${taskId}. Task ID must be a positive integer.`\n\t\t\t);\n\t\tif (!prompt || typeof prompt !== 'string' || prompt.trim() === '')\n\t\t\tthrow new Error('Prompt cannot be empty.');\n\t\tif (useResearch && !isApiKeySet('perplexity', session)) {\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t'Perplexity research requested but API key not set. Falling back.'\n\t\t\t);\n\t\t\tif (outputFormat === 'text')\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow('Perplexity AI not available. Falling back to main AI.')\n\t\t\t\t);\n\t\t\tuseResearch = false;\n\t\t}\n\t\tif (!fs.existsSync(tasksPath))\n\t\t\tthrow new Error(`Tasks file not found: ${tasksPath}`);\n\t\t// --- End Input Validations ---\n\n\t\t// Determine project root\n\t\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\t\tif (!projectRoot) {\n\t\t\tthrow new Error('Could not determine project root directory');\n\t\t}\n\n\t\t// --- Task Loading and Status Check (Keep existing) ---\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks)\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}.`);\n\t\tconst taskIndex = data.tasks.findIndex((task) => task.id === taskId);\n\t\tif (taskIndex === -1) throw new Error(`Task with ID ${taskId} not found.`);\n\t\tconst taskToUpdate = data.tasks[taskIndex];\n\t\tif (taskToUpdate.status === 'done' || taskToUpdate.status === 'completed') {\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t`Task ${taskId} is already marked as done and cannot be updated`\n\t\t\t);\n\n\t\t\t// Only show warning box for text output (CLI)\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Task ${taskId} is already marked as ${taskToUpdate.status} and cannot be updated.`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t\t'Completed tasks are locked to maintain consistency. To modify a completed task, you must first:'\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t\t'1. Change its status to \"pending\" or \"in-progress\"'\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\tchalk.white('2. Then run the update-task command'),\n\t\t\t\t\t\t{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn null;\n\t\t}\n\t\t// --- End Task Loading ---\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update-task');\n\t\t\tconst searchQuery = `${taskToUpdate.title} ${taskToUpdate.description} ${prompt}`;\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([taskId.toString(), ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\treport('warn', `Could not gather context: ${contextError.message}`);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\t// --- Display Task Info (CLI Only - Keep existing) ---\n\t\tif (outputFormat === 'text') {\n\t\t\t// Show the task that will be updated\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Status')\n\t\t\t\t],\n\t\t\t\tcolWidths: [5, 60, 10]\n\t\t\t});\n\n\t\t\ttable.push([\n\t\t\t\ttaskToUpdate.id,\n\t\t\t\ttruncate(taskToUpdate.title, 57),\n\t\t\t\tgetStatusWithColor(taskToUpdate.status)\n\t\t\t]);\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating Task #${taskId}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t})\n\t\t\t);\n\n\t\t\tconsole.log(table.toString());\n\n\t\t\t// Display a message about how completed subtasks are handled\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.cyan.bold('How Completed Subtasks Are Handled:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• Subtasks marked as \"done\" or \"completed\" will be preserved\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• New subtasks will build upon what has already been completed\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• If completed work needs revision, a new subtask will be created instead of modifying done items\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• This approach maintains a clear record of completed work and new requirements'\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\t// --- Build Prompts using PromptManager ---\n\t\tconst promptManager = getPromptManager();\n\n\t\tconst promptParams = {\n\t\t\ttask: taskToUpdate,\n\t\t\ttaskJson: JSON.stringify(taskToUpdate, null, 2),\n\t\t\tupdatePrompt: prompt,\n\t\t\tappendMode: appendMode,\n\t\t\tuseResearch: useResearch,\n\t\t\tcurrentDetails: taskToUpdate.details || '(No existing details)',\n\t\t\tgatheredContext: gatheredContext || ''\n\t\t};\n\n\t\tconst variantKey = appendMode\n\t\t\t? 'append'\n\t\t\t: useResearch\n\t\t\t\t? 'research'\n\t\t\t\t: 'default';\n\n\t\treport(\n\t\t\t'info',\n\t\t\t`Loading prompt template with variant: ${variantKey}, appendMode: ${appendMode}, useResearch: ${useResearch}`\n\t\t);\n\n\t\tlet systemPrompt;\n\t\tlet userPrompt;\n\t\ttry {\n\t\t\tconst promptResult = await promptManager.loadPrompt(\n\t\t\t\t'update-task',\n\t\t\t\tpromptParams,\n\t\t\t\tvariantKey\n\t\t\t);\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t`Prompt result type: ${typeof promptResult}, keys: ${promptResult ? Object.keys(promptResult).join(', ') : 'null'}`\n\t\t\t);\n\n\t\t\t// Extract prompts - loadPrompt returns { systemPrompt, userPrompt, metadata }\n\t\t\tsystemPrompt = promptResult.systemPrompt;\n\t\t\tuserPrompt = promptResult.userPrompt;\n\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t`Loaded prompts - systemPrompt length: ${systemPrompt?.length}, userPrompt length: ${userPrompt?.length}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\treport('error', `Failed to load prompt template: ${error.message}`);\n\t\t\tthrow new Error(`Failed to load prompt template: ${error.message}`);\n\t\t}\n\n\t\t// If prompts are still not set, throw an error\n\t\tif (!systemPrompt || !userPrompt) {\n\t\t\tthrow new Error(\n\t\t\t\t`Failed to load prompts: systemPrompt=${!!systemPrompt}, userPrompt=${!!userPrompt}`\n\t\t\t);\n\t\t}\n\t\t// --- End Build Prompts ---\n\n\t\tlet loadingIndicator = null;\n\t\tlet aiServiceResponse = null;\n\n\t\tif (!isMCP && outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\tuseResearch ? 'Updating task with research...\\n' : 'Updating task...\\n'\n\t\t\t);\n\t\t}\n\n\t\ttry {\n\t\t\tconst serviceRole = useResearch ? 'research' : 'main';\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\trole: serviceRole,\n\t\t\t\tsession: session,\n\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tcommandName: 'update-task',\n\t\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (loadingIndicator)\n\t\t\t\tstopLoadingIndicator(loadingIndicator, 'AI update complete.');\n\n\t\t\tif (appendMode) {\n\t\t\t\t// Append mode: handle as plain text\n\t\t\t\tconst generatedContentString = aiServiceResponse.mainResult;\n\t\t\t\tlet newlyAddedSnippet = '';\n\n\t\t\t\tif (generatedContentString && generatedContentString.trim()) {\n\t\t\t\t\tconst timestamp = new Date().toISOString();\n\t\t\t\t\tconst formattedBlock = `\\n${generatedContentString.trim()}\\n`;\n\t\t\t\t\tnewlyAddedSnippet = formattedBlock;\n\n\t\t\t\t\t// Append to task details\n\t\t\t\t\ttaskToUpdate.details =\n\t\t\t\t\t\t(taskToUpdate.details ? taskToUpdate.details + '\\n' : '') +\n\t\t\t\t\t\tformattedBlock;\n\t\t\t\t} else {\n\t\t\t\t\treport(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t'AI response was empty or whitespace after trimming. Original details remain unchanged.'\n\t\t\t\t\t);\n\t\t\t\t\tnewlyAddedSnippet = 'No new details were added by the AI.';\n\t\t\t\t}\n\n\t\t\t\t// Update description with timestamp if prompt is short\n\t\t\t\tif (prompt.length < 100) {\n\t\t\t\t\tif (taskToUpdate.description) {\n\t\t\t\t\t\ttaskToUpdate.description += ` [Updated: ${new Date().toLocaleDateString()}]`;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Write the updated task back to file\n\t\t\t\tdata.tasks[taskIndex] = taskToUpdate;\n\t\t\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t\t\treport('success', `Successfully appended to task ${taskId}`);\n\n\t\t\t\t// Display success message for CLI\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.green(`Successfully appended to task #${taskId}`) +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white.bold('Title:') +\n\t\t\t\t\t\t\t\t' ' +\n\t\t\t\t\t\t\t\ttaskToUpdate.title +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white.bold('Newly Added Content:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.white(newlyAddedSnippet),\n\t\t\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Display AI usage telemetry for CLI users\n\t\t\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t\t}\n\n\t\t\t\t// Return the updated task\n\t\t\t\treturn {\n\t\t\t\t\tupdatedTask: taskToUpdate,\n\t\t\t\t\ttelemetryData: aiServiceResponse.telemetryData,\n\t\t\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Full update mode: Use mainResult (text) for parsing\n\t\t\tconst updatedTask = parseUpdatedTaskFromText(\n\t\t\t\taiServiceResponse.mainResult,\n\t\t\t\ttaskId,\n\t\t\t\tlogFn,\n\t\t\t\tisMCP\n\t\t\t);\n\n\t\t\t// --- Task Validation/Correction (Keep existing logic) ---\n\t\t\tif (!updatedTask || typeof updatedTask !== 'object')\n\t\t\t\tthrow new Error('Received invalid task object from AI.');\n\t\t\tif (!updatedTask.title || !updatedTask.description)\n\t\t\t\tthrow new Error('Updated task missing required fields.');\n\t\t\t// Preserve ID if AI changed it\n\t\t\tif (updatedTask.id !== taskId) {\n\t\t\t\treport('warn', `AI changed task ID. Restoring original ID ${taskId}.`);\n\t\t\t\tupdatedTask.id = taskId;\n\t\t\t}\n\t\t\t// Preserve status if AI changed it\n\t\t\tif (\n\t\t\t\tupdatedTask.status !== taskToUpdate.status &&\n\t\t\t\t!prompt.toLowerCase().includes('status')\n\t\t\t) {\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`AI changed task status. Restoring original status '${taskToUpdate.status}'.`\n\t\t\t\t);\n\t\t\t\tupdatedTask.status = taskToUpdate.status;\n\t\t\t}\n\t\t\t// Fix subtask IDs if they exist (ensure they are numeric and sequential)\n\t\t\tif (updatedTask.subtasks && Array.isArray(updatedTask.subtasks)) {\n\t\t\t\tlet currentSubtaskId = 1;\n\t\t\t\tupdatedTask.subtasks = updatedTask.subtasks.map((subtask) => {\n\t\t\t\t\t// Fix AI-generated subtask IDs that might be strings or use parent ID as prefix\n\t\t\t\t\tconst correctedSubtask = {\n\t\t\t\t\t\t...subtask,\n\t\t\t\t\t\tid: currentSubtaskId, // Override AI-generated ID with correct sequential ID\n\t\t\t\t\t\tdependencies: Array.isArray(subtask.dependencies)\n\t\t\t\t\t\t\t? subtask.dependencies\n\t\t\t\t\t\t\t\t\t.map((dep) =>\n\t\t\t\t\t\t\t\t\t\ttypeof dep === 'string' ? parseInt(dep, 10) : dep\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t.filter(\n\t\t\t\t\t\t\t\t\t\t(depId) =>\n\t\t\t\t\t\t\t\t\t\t\t!Number.isNaN(depId) &&\n\t\t\t\t\t\t\t\t\t\t\tdepId >= 1 &&\n\t\t\t\t\t\t\t\t\t\t\tdepId < currentSubtaskId\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t: [],\n\t\t\t\t\t\tstatus: subtask.status || 'pending'\n\t\t\t\t\t};\n\t\t\t\t\tcurrentSubtaskId++;\n\t\t\t\t\treturn correctedSubtask;\n\t\t\t\t});\n\t\t\t\treport(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Fixed ${updatedTask.subtasks.length} subtask IDs to be sequential numeric IDs.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Preserve completed subtasks (Keep existing logic)\n\t\t\tif (taskToUpdate.subtasks?.length > 0) {\n\t\t\t\tif (!updatedTask.subtasks) {\n\t\t\t\t\treport(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t'Subtasks removed by AI. Restoring original subtasks.'\n\t\t\t\t\t);\n\t\t\t\t\tupdatedTask.subtasks = taskToUpdate.subtasks;\n\t\t\t\t} else {\n\t\t\t\t\tconst completedOriginal = taskToUpdate.subtasks.filter(\n\t\t\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t\t\t);\n\t\t\t\t\tcompletedOriginal.forEach((compSub) => {\n\t\t\t\t\t\tconst updatedSub = updatedTask.subtasks.find(\n\t\t\t\t\t\t\t(st) => st.id === compSub.id\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t!updatedSub ||\n\t\t\t\t\t\t\tJSON.stringify(updatedSub) !== JSON.stringify(compSub)\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\treport(\n\t\t\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t\t\t`Completed subtask ${compSub.id} was modified or removed. Restoring.`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Remove potentially modified version\n\t\t\t\t\t\t\tupdatedTask.subtasks = updatedTask.subtasks.filter(\n\t\t\t\t\t\t\t\t(st) => st.id !== compSub.id\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Add back original\n\t\t\t\t\t\t\tupdatedTask.subtasks.push(compSub);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\t// Deduplicate just in case\n\t\t\t\t\tconst subtaskIds = new Set();\n\t\t\t\t\tupdatedTask.subtasks = updatedTask.subtasks.filter((st) => {\n\t\t\t\t\t\tif (!subtaskIds.has(st.id)) {\n\t\t\t\t\t\t\tsubtaskIds.add(st.id);\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treport('warn', `Duplicate subtask ID ${st.id} removed.`);\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t\t// --- End Task Validation/Correction ---\n\n\t\t\t// --- Update Task Data (Keep existing) ---\n\t\t\tdata.tasks[taskIndex] = updatedTask;\n\t\t\t// --- End Update Task Data ---\n\n\t\t\t// --- Write File and Generate (Unchanged) ---\n\t\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t\treport('success', `Successfully updated task ${taskId}`);\n\t\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\t\t\t// --- End Write File ---\n\n\t\t\t// --- Display CLI Telemetry ---\n\t\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli'); // <<< ADD display\n\t\t\t}\n\n\t\t\t// --- Return Success with Telemetry ---\n\t\t\treturn {\n\t\t\t\tupdatedTask: updatedTask, // Return the updated task object\n\t\t\t\ttelemetryData: aiServiceResponse.telemetryData, // <<< ADD telemetryData\n\t\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t\t};\n\t\t} catch (error) {\n\t\t\t// Catch errors from generateTextService\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\treport('error', `Error during AI service call: ${error.message}`);\n\t\t\tif (error.message.includes('API key')) {\n\t\t\t\treport('error', 'Please ensure API keys are configured correctly.');\n\t\t\t}\n\t\t\tthrow error; // Re-throw error\n\t\t}\n\t} catch (error) {\n\t\t// General error catch\n\t\t// --- General Error Handling (Keep existing) ---\n\t\treport('error', `Error updating task: ${error.message}`);\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t// ... helpful hints ...\n\t\t\tif (getDebugFlag(session)) console.error(error);\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error; // Re-throw for MCP\n\t\t}\n\t\treturn null; // Indicate failure in CLI case if process doesn't exit\n\t\t// --- End General Error Handling ---\n\t}\n}\n\nexport default updateTaskById;\n"], ["/claude-task-master/scripts/modules/commands.js", "/**\n * commands.js\n * Command-line interface for the Task Master CLI\n */\n\nimport { program } from 'commander';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport fs from 'fs';\nimport https from 'https';\nimport http from 'http';\nimport inquirer from 'inquirer';\nimport search from '@inquirer/search';\nimport ora from 'ora'; // Import ora\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tgetCurrentTag,\n\tdetectCamelCaseFlags,\n\ttoKebabCase\n} from './utils.js';\nimport {\n\tparsePRD,\n\tupdateTasks,\n\tgenerateTaskFiles,\n\tsetTaskStatus,\n\tlistTasks,\n\texpandTask,\n\texpandAllTasks,\n\tclearSubtasks,\n\taddTask,\n\taddSubtask,\n\tremoveSubtask,\n\tanalyzeTaskComplexity,\n\tupdateTaskById,\n\tupdateSubtaskById,\n\tremoveTask,\n\tfindTaskById,\n\ttaskExists,\n\tmoveTask,\n\tmigrateProject,\n\tsetResponseLanguage\n} from './task-manager.js';\n\nimport {\n\tcreateTag,\n\tdeleteTag,\n\ttags,\n\tuseTag,\n\trenameTag,\n\tcopyTag\n} from './task-manager/tag-management.js';\n\nimport {\n\taddDependency,\n\tremoveDependency,\n\tvalidateDependenciesCommand,\n\tfixDependenciesCommand\n} from './dependency-manager.js';\n\nimport {\n\tisApiKeySet,\n\tgetDebugFlag,\n\tgetConfig,\n\twriteConfig,\n\tConfigurationError,\n\tisConfigFilePresent,\n\tgetAvailableModels,\n\tgetBaseUrlForRole,\n\tgetDefaultNumTasks,\n\tgetDefaultSubtasks\n} from './config-manager.js';\n\nimport { CUSTOM_PROVIDERS } from '../../src/constants/providers.js';\n\nimport {\n\tCOMPLEXITY_REPORT_FILE,\n\tTASKMASTER_TASKS_FILE,\n\tTASKMASTER_DOCS_DIR\n} from '../../src/constants/paths.js';\n\nimport { initTaskMaster } from '../../src/task-master.js';\n\nimport {\n\tdisplayBanner,\n\tdisplayHelp,\n\tdisplayNextTask,\n\tdisplayTaskById,\n\tdisplayComplexityReport,\n\tgetStatusWithColor,\n\tconfirmTaskOverwrite,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayModelConfiguration,\n\tdisplayAvailableModels,\n\tdisplayApiKeyStatus,\n\tdisplayAiUsageSummary,\n\tdisplayMultipleTasksSummary,\n\tdisplayTaggedTasksFYI,\n\tdisplayCurrentTagIndicator\n} from './ui.js';\nimport {\n\tconfirmProfilesRemove,\n\tconfirmRemoveAllRemainingProfiles\n} from '../../src/ui/confirm.js';\nimport {\n\twouldRemovalLeaveNoProfiles,\n\tgetInstalledProfiles\n} from '../../src/utils/profiles.js';\n\nimport { initializeProject } from '../init.js';\nimport {\n\tgetModelConfiguration,\n\tgetAvailableModelsList,\n\tsetModel,\n\tgetApiKeyStatusReport\n} from './task-manager/models.js';\nimport {\n\tisValidTaskStatus,\n\tTASK_STATUS_OPTIONS\n} from '../../src/constants/task-status.js';\nimport {\n\tisValidRulesAction,\n\tRULES_ACTIONS,\n\tRULES_SETUP_ACTION\n} from '../../src/constants/rules-actions.js';\nimport { getTaskMasterVersion } from '../../src/utils/getVersion.js';\nimport { syncTasksToReadme } from './sync-readme.js';\nimport { RULE_PROFILES } from '../../src/constants/profiles.js';\nimport {\n\tconvertAllRulesToProfileRules,\n\tremoveProfileRules,\n\tisValidProfile,\n\tgetRulesProfile\n} from '../../src/utils/rule-transformer.js';\nimport {\n\trunInteractiveProfilesSetup,\n\tgenerateProfileSummary,\n\tcategorizeProfileResults,\n\tgenerateProfileRemovalSummary,\n\tcategorizeRemovalResults\n} from '../../src/utils/profiles.js';\n\n/**\n * Runs the interactive setup process for model configuration.\n * @param {string|null} projectRoot - The resolved project root directory.\n */\nasync function runInteractiveSetup(projectRoot) {\n\tif (!projectRoot) {\n\t\tconsole.error(\n\t\t\tchalk.red(\n\t\t\t\t'Error: Could not determine project root for interactive setup.'\n\t\t\t)\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\tconst currentConfigResult = await getModelConfiguration({ projectRoot });\n\tconst currentModels = currentConfigResult.success\n\t\t? currentConfigResult.data.activeModels\n\t\t: { main: null, research: null, fallback: null };\n\t// Handle potential config load failure gracefully for the setup flow\n\tif (\n\t\t!currentConfigResult.success &&\n\t\tcurrentConfigResult.error?.code !== 'CONFIG_MISSING'\n\t) {\n\t\tconsole.warn(\n\t\t\tchalk.yellow(\n\t\t\t\t`Warning: Could not load current model configuration: ${currentConfigResult.error?.message || 'Unknown error'}. Proceeding with defaults.`\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to fetch OpenRouter models (duplicated for CLI context)\n\tfunction fetchOpenRouterModelsCLI() {\n\t\treturn new Promise((resolve) => {\n\t\t\tconst options = {\n\t\t\t\thostname: 'openrouter.ai',\n\t\t\t\tpath: '/api/v1/models',\n\t\t\t\tmethod: 'GET',\n\t\t\t\theaders: {\n\t\t\t\t\tAccept: 'application/json'\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tconst req = https.request(options, (res) => {\n\t\t\t\tlet data = '';\n\t\t\t\tres.on('data', (chunk) => {\n\t\t\t\t\tdata += chunk;\n\t\t\t\t});\n\t\t\t\tres.on('end', () => {\n\t\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\t\tresolve(parsedData.data || []); // Return the array of models\n\t\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\t\tconsole.error('Error parsing OpenRouter response:', e);\n\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t`OpenRouter API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\n\t\t\treq.on('error', (e) => {\n\t\t\t\tconsole.error('Error fetching OpenRouter models:', e);\n\t\t\t\tresolve(null); // Indicate failure\n\t\t\t});\n\t\t\treq.end();\n\t\t});\n\t}\n\n\t// Helper function to fetch Ollama models (duplicated for CLI context)\n\tfunction fetchOllamaModelsCLI(baseURL = 'http://localhost:11434/api') {\n\t\treturn new Promise((resolve) => {\n\t\t\ttry {\n\t\t\t\t// Parse the base URL to extract hostname, port, and base path\n\t\t\t\tconst url = new URL(baseURL);\n\t\t\t\tconst isHttps = url.protocol === 'https:';\n\t\t\t\tconst port = url.port || (isHttps ? 443 : 80);\n\t\t\t\tconst basePath = url.pathname.endsWith('/')\n\t\t\t\t\t? url.pathname.slice(0, -1)\n\t\t\t\t\t: url.pathname;\n\n\t\t\t\tconst options = {\n\t\t\t\t\thostname: url.hostname,\n\t\t\t\t\tport: parseInt(port, 10),\n\t\t\t\t\tpath: `${basePath}/tags`,\n\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\theaders: {\n\t\t\t\t\t\tAccept: 'application/json'\n\t\t\t\t\t}\n\t\t\t\t};\n\n\t\t\t\tconst requestLib = isHttps ? https : http;\n\t\t\t\tconst req = requestLib.request(options, (res) => {\n\t\t\t\t\tlet data = '';\n\t\t\t\t\tres.on('data', (chunk) => {\n\t\t\t\t\t\tdata += chunk;\n\t\t\t\t\t});\n\t\t\t\t\tres.on('end', () => {\n\t\t\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\t\t\tresolve(parsedData.models || []); // Return the array of models\n\t\t\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\t\t\tconsole.error('Error parsing Ollama response:', e);\n\t\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t\t`Ollama API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t});\n\n\t\t\t\treq.on('error', (e) => {\n\t\t\t\t\tconsole.error('Error fetching Ollama models:', e);\n\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t});\n\t\t\t\treq.end();\n\t\t\t} catch (e) {\n\t\t\t\tconsole.error('Error parsing Ollama base URL:', e);\n\t\t\t\tresolve(null); // Indicate failure\n\t\t\t}\n\t\t});\n\t}\n\n\t// Helper to get choices and default index for a role\n\tconst getPromptData = (role, allowNone = false) => {\n\t\tconst currentModel = currentModels[role]; // Use the fetched data\n\t\tconst allModelsRaw = getAvailableModels(); // Get all available models\n\n\t\t// Manually group models by provider\n\t\tconst modelsByProvider = allModelsRaw.reduce((acc, model) => {\n\t\t\tif (!acc[model.provider]) {\n\t\t\t\tacc[model.provider] = [];\n\t\t\t}\n\t\t\tacc[model.provider].push(model);\n\t\t\treturn acc;\n\t\t}, {});\n\n\t\tconst cancelOption = { name: '⏹ Cancel Model Setup', value: '__CANCEL__' }; // Symbol updated\n\t\tconst noChangeOption = currentModel?.modelId\n\t\t\t? {\n\t\t\t\t\tname: `✔ No change to current ${role} model (${currentModel.modelId})`, // Symbol updated\n\t\t\t\t\tvalue: '__NO_CHANGE__'\n\t\t\t\t}\n\t\t\t: null;\n\n\t\t// Define custom provider options\n\t\tconst customProviderOptions = [\n\t\t\t{ name: '* Custom OpenRouter model', value: '__CUSTOM_OPENROUTER__' },\n\t\t\t{ name: '* Custom Ollama model', value: '__CUSTOM_OLLAMA__' },\n\t\t\t{ name: '* Custom Bedrock model', value: '__CUSTOM_BEDROCK__' },\n\t\t\t{ name: '* Custom Azure model', value: '__CUSTOM_AZURE__' },\n\t\t\t{ name: '* Custom Vertex model', value: '__CUSTOM_VERTEX__' }\n\t\t];\n\n\t\tlet choices = [];\n\t\tlet defaultIndex = 0; // Default to 'Cancel'\n\n\t\t// Filter and format models allowed for this role using the manually grouped data\n\t\tconst roleChoices = Object.entries(modelsByProvider)\n\t\t\t.map(([provider, models]) => {\n\t\t\t\tconst providerModels = models\n\t\t\t\t\t.filter((m) => m.allowed_roles.includes(role))\n\t\t\t\t\t.map((m) => ({\n\t\t\t\t\t\tname: `${provider} / ${m.id} ${\n\t\t\t\t\t\t\tm.cost_per_1m_tokens\n\t\t\t\t\t\t\t\t? chalk.gray(\n\t\t\t\t\t\t\t\t\t\t`($${m.cost_per_1m_tokens.input.toFixed(2)} input | $${m.cost_per_1m_tokens.output.toFixed(2)} output)`\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t: ''\n\t\t\t\t\t\t}`,\n\t\t\t\t\t\tvalue: { id: m.id, provider },\n\t\t\t\t\t\tshort: `${provider}/${m.id}`\n\t\t\t\t\t}));\n\t\t\t\tif (providerModels.length > 0) {\n\t\t\t\t\treturn [...providerModels];\n\t\t\t\t}\n\t\t\t\treturn null;\n\t\t\t})\n\t\t\t.filter(Boolean)\n\t\t\t.flat();\n\n\t\t// Find the index of the currently selected model for setting the default\n\t\tlet currentChoiceIndex = -1;\n\t\tif (currentModel?.modelId && currentModel?.provider) {\n\t\t\tcurrentChoiceIndex = roleChoices.findIndex(\n\t\t\t\t(choice) =>\n\t\t\t\t\ttypeof choice.value === 'object' &&\n\t\t\t\t\tchoice.value.id === currentModel.modelId &&\n\t\t\t\t\tchoice.value.provider === currentModel.provider\n\t\t\t);\n\t\t}\n\n\t\t// Construct final choices list with custom options moved to bottom\n\t\tconst systemOptions = [];\n\t\tif (noChangeOption) {\n\t\t\tsystemOptions.push(noChangeOption);\n\t\t}\n\t\tsystemOptions.push(cancelOption);\n\n\t\tconst systemLength = systemOptions.length;\n\n\t\tif (allowNone) {\n\t\t\tchoices = [\n\t\t\t\t...systemOptions,\n\t\t\t\tnew inquirer.Separator('\\n── Standard Models ──'),\n\t\t\t\t{ name: '⚪ None (disable)', value: null },\n\t\t\t\t...roleChoices,\n\t\t\t\tnew inquirer.Separator('\\n── Custom Providers ──'),\n\t\t\t\t...customProviderOptions\n\t\t\t];\n\t\t\t// Adjust default index: System + Sep1 + None (+2)\n\t\t\tconst noneOptionIndex = systemLength + 1;\n\t\t\tdefaultIndex =\n\t\t\t\tcurrentChoiceIndex !== -1\n\t\t\t\t\t? currentChoiceIndex + systemLength + 2 // Offset by system options and separators\n\t\t\t\t\t: noneOptionIndex; // Default to 'None' if no current model matched\n\t\t} else {\n\t\t\tchoices = [\n\t\t\t\t...systemOptions,\n\t\t\t\tnew inquirer.Separator('\\n── Standard Models ──'),\n\t\t\t\t...roleChoices,\n\t\t\t\tnew inquirer.Separator('\\n── Custom Providers ──'),\n\t\t\t\t...customProviderOptions\n\t\t\t];\n\t\t\t// Adjust default index: System + Sep (+1)\n\t\t\tdefaultIndex =\n\t\t\t\tcurrentChoiceIndex !== -1\n\t\t\t\t\t? currentChoiceIndex + systemLength + 1 // Offset by system options and separator\n\t\t\t\t\t: noChangeOption\n\t\t\t\t\t\t? 1\n\t\t\t\t\t\t: 0; // Default to 'No Change' if present, else 'Cancel'\n\t\t}\n\n\t\t// Ensure defaultIndex is valid within the final choices array length\n\t\tif (defaultIndex < 0 || defaultIndex >= choices.length) {\n\t\t\t// If default calculation failed or pointed outside bounds, reset intelligently\n\t\t\tdefaultIndex = 0; // Default to 'Cancel'\n\t\t\tconsole.warn(\n\t\t\t\t`Warning: Could not determine default model for role '${role}'. Defaulting to 'Cancel'.`\n\t\t\t); // Add warning\n\t\t}\n\n\t\treturn { choices, default: defaultIndex };\n\t};\n\n\t// --- Generate choices using the helper ---\n\tconst mainPromptData = getPromptData('main');\n\tconst researchPromptData = getPromptData('research');\n\tconst fallbackPromptData = getPromptData('fallback', true); // Allow 'None' for fallback\n\n\t// Display helpful intro message\n\tconsole.log(chalk.cyan('\\n🎯 Interactive Model Setup'));\n\tconsole.log(chalk.gray('━'.repeat(50)));\n\tconsole.log(chalk.yellow('💡 Navigation tips:'));\n\tconsole.log(chalk.gray(' • Type to search and filter options'));\n\tconsole.log(chalk.gray(' • Use ↑↓ arrow keys to navigate results'));\n\tconsole.log(\n\t\tchalk.gray(\n\t\t\t' • Standard models are listed first, custom providers at bottom'\n\t\t)\n\t);\n\tconsole.log(chalk.gray(' • Press Enter to select\\n'));\n\n\t// Helper function to create search source for models\n\tconst createSearchSource = (choices, defaultValue) => {\n\t\treturn (searchTerm = '') => {\n\t\t\tconst filteredChoices = choices.filter((choice) => {\n\t\t\t\tif (choice.type === 'separator') return true; // Always show separators\n\t\t\t\tconst searchText = choice.name || '';\n\t\t\t\treturn searchText.toLowerCase().includes(searchTerm.toLowerCase());\n\t\t\t});\n\t\t\treturn Promise.resolve(filteredChoices);\n\t\t};\n\t};\n\n\tconst answers = {};\n\n\t// Main model selection\n\tanswers.mainModel = await search({\n\t\tmessage: 'Select the main model for generation/updates:',\n\t\tsource: createSearchSource(mainPromptData.choices, mainPromptData.default),\n\t\tpageSize: 15\n\t});\n\n\tif (answers.mainModel !== '__CANCEL__') {\n\t\t// Research model selection\n\t\tanswers.researchModel = await search({\n\t\t\tmessage: 'Select the research model:',\n\t\t\tsource: createSearchSource(\n\t\t\t\tresearchPromptData.choices,\n\t\t\t\tresearchPromptData.default\n\t\t\t),\n\t\t\tpageSize: 15\n\t\t});\n\n\t\tif (answers.researchModel !== '__CANCEL__') {\n\t\t\t// Fallback model selection\n\t\t\tanswers.fallbackModel = await search({\n\t\t\t\tmessage: 'Select the fallback model (optional):',\n\t\t\t\tsource: createSearchSource(\n\t\t\t\t\tfallbackPromptData.choices,\n\t\t\t\t\tfallbackPromptData.default\n\t\t\t\t),\n\t\t\t\tpageSize: 15\n\t\t\t});\n\t\t}\n\t}\n\n\tlet setupSuccess = true;\n\tlet setupConfigModified = false;\n\tconst coreOptionsSetup = { projectRoot }; // Pass root for setup actions\n\n\t// Helper to handle setting a model (including custom)\n\tasync function handleSetModel(role, selectedValue, currentModelId) {\n\t\tif (selectedValue === '__CANCEL__') {\n\t\t\tconsole.log(\n\t\t\t\tchalk.yellow(`\\nSetup canceled during ${role} model selection.`)\n\t\t\t);\n\t\t\tsetupSuccess = false; // Also mark success as false on cancel\n\t\t\treturn false; // Indicate cancellation\n\t\t}\n\n\t\t// Handle the new 'No Change' option\n\t\tif (selectedValue === '__NO_CHANGE__') {\n\t\t\tconsole.log(chalk.gray(`No change selected for ${role} model.`));\n\t\t\treturn true; // Indicate success, continue setup\n\t\t}\n\n\t\tlet modelIdToSet = null;\n\t\tlet providerHint = null;\n\t\tlet isCustomSelection = false;\n\n\t\tif (selectedValue === '__CUSTOM_OPENROUTER__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom OpenRouter Model ID for the ${role} role:`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.OPENROUTER;\n\t\t\t// Validate against live OpenRouter list\n\t\t\tconst openRouterModels = await fetchOpenRouterModelsCLI();\n\t\t\tif (\n\t\t\t\t!openRouterModels ||\n\t\t\t\t!openRouterModels.some((m) => m.id === modelIdToSet)\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Model ID \"${modelIdToSet}\" not found in the live OpenRouter model list. Please check the ID.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\t\t} else if (selectedValue === '__CUSTOM_OLLAMA__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Ollama Model ID for the ${role} role:`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.OLLAMA;\n\t\t\t// Get the Ollama base URL from config for this role\n\t\t\tconst ollamaBaseURL = getBaseUrlForRole(role, projectRoot);\n\t\t\t// Validate against live Ollama list\n\t\t\tconst ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);\n\t\t\tif (ollamaModels === null) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t} else if (!ollamaModels.some((m) => m.model === modelIdToSet)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Model ID \"${modelIdToSet}\" not found in the Ollama instance. Please verify the model is pulled and available.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`You can check available models with: curl ${ollamaBaseURL}/tags`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\t\t} else if (selectedValue === '__CUSTOM_BEDROCK__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.BEDROCK;\n\n\t\t\t// Check if AWS environment variables exist\n\t\t\tif (\n\t\t\t\t!process.env.AWS_ACCESS_KEY_ID ||\n\t\t\t\t!process.env.AWS_SECRET_ACCESS_KEY\n\t\t\t) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'Warning: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Will fallback to system configuration. (ex: aws config files or ec2 instance profiles)'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Custom Bedrock model \"${modelIdToSet}\" will be used. No validation performed.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else if (selectedValue === '__CUSTOM_AZURE__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Azure OpenAI Model ID for the ${role} role (e.g., gpt-4o):`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.AZURE;\n\n\t\t\t// Check if Azure environment variables exist\n\t\t\tif (\n\t\t\t\t!process.env.AZURE_OPENAI_API_KEY ||\n\t\t\t\t!process.env.AZURE_OPENAI_ENDPOINT\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: AZURE_OPENAI_API_KEY and/or AZURE_OPENAI_ENDPOINT environment variables are missing. Please set them before using custom Azure models.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Custom Azure OpenAI model \"${modelIdToSet}\" will be used. No validation performed.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else if (selectedValue === '__CUSTOM_VERTEX__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Vertex AI Model ID for the ${role} role (e.g., gemini-1.5-pro-002):`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.VERTEX;\n\n\t\t\t// Check if Google/Vertex environment variables exist\n\t\t\tif (\n\t\t\t\t!process.env.GOOGLE_API_KEY &&\n\t\t\t\t!process.env.GOOGLE_APPLICATION_CREDENTIALS\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Either GOOGLE_API_KEY or GOOGLE_APPLICATION_CREDENTIALS environment variable is required. Please set one before using custom Vertex models.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Custom Vertex AI model \"${modelIdToSet}\" will be used. No validation performed.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else if (\n\t\t\tselectedValue &&\n\t\t\ttypeof selectedValue === 'object' &&\n\t\t\tselectedValue.id\n\t\t) {\n\t\t\t// Standard model selected from list\n\t\t\tmodelIdToSet = selectedValue.id;\n\t\t\tproviderHint = selectedValue.provider; // Provider is known\n\t\t} else if (selectedValue === null && role === 'fallback') {\n\t\t\t// Handle disabling fallback\n\t\t\tmodelIdToSet = null;\n\t\t\tproviderHint = null;\n\t\t} else if (selectedValue) {\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(\n\t\t\t\t\t`Internal Error: Unexpected selection value for ${role}: ${JSON.stringify(selectedValue)}`\n\t\t\t\t)\n\t\t\t);\n\t\t\tsetupSuccess = false;\n\t\t\treturn true;\n\t\t}\n\n\t\t// Only proceed if there's a change to be made\n\t\tif (modelIdToSet !== currentModelId) {\n\t\t\tif (modelIdToSet) {\n\t\t\t\t// Set a specific model (standard or custom)\n\t\t\t\tconst result = await setModel(role, modelIdToSet, {\n\t\t\t\t\t...coreOptionsSetup,\n\t\t\t\t\tproviderHint // Pass the hint\n\t\t\t\t});\n\t\t\t\tif (result.success) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`Set ${role} model: ${result.data.provider} / ${result.data.modelId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tif (result.data.warning) {\n\t\t\t\t\t\t// Display warning if returned by setModel\n\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t}\n\t\t\t\t\tsetupConfigModified = true;\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error setting ${role} model: ${result.error?.message || 'Unknown'}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tsetupSuccess = false;\n\t\t\t\t}\n\t\t\t} else if (role === 'fallback') {\n\t\t\t\t// Disable fallback model\n\t\t\t\tconst currentCfg = getConfig(projectRoot);\n\t\t\t\tif (currentCfg?.models?.fallback?.modelId) {\n\t\t\t\t\t// Check if it was actually set before clearing\n\t\t\t\t\tcurrentCfg.models.fallback = {\n\t\t\t\t\t\t...currentCfg.models.fallback,\n\t\t\t\t\t\tprovider: undefined,\n\t\t\t\t\t\tmodelId: undefined\n\t\t\t\t\t};\n\t\t\t\t\tif (writeConfig(currentCfg, projectRoot)) {\n\t\t\t\t\t\tconsole.log(chalk.blue('Fallback model disabled.'));\n\t\t\t\t\t\tsetupConfigModified = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red('Failed to disable fallback model in config file.')\n\t\t\t\t\t\t);\n\t\t\t\t\t\tsetupSuccess = false;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tconsole.log(chalk.blue('Fallback model was already disabled.'));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true; // Indicate setup should continue\n\t}\n\n\t// Process answers using the handler\n\tif (\n\t\t!(await handleSetModel(\n\t\t\t'main',\n\t\t\tanswers.mainModel,\n\t\t\tcurrentModels.main?.modelId // <--- Now 'currentModels' is defined\n\t\t))\n\t) {\n\t\treturn false; // Explicitly return false if cancelled\n\t}\n\tif (\n\t\t!(await handleSetModel(\n\t\t\t'research',\n\t\t\tanswers.researchModel,\n\t\t\tcurrentModels.research?.modelId // <--- Now 'currentModels' is defined\n\t\t))\n\t) {\n\t\treturn false; // Explicitly return false if cancelled\n\t}\n\tif (\n\t\t!(await handleSetModel(\n\t\t\t'fallback',\n\t\t\tanswers.fallbackModel,\n\t\t\tcurrentModels.fallback?.modelId // <--- Now 'currentModels' is defined\n\t\t))\n\t) {\n\t\treturn false; // Explicitly return false if cancelled\n\t}\n\n\tif (setupSuccess && setupConfigModified) {\n\t\tconsole.log(chalk.green.bold('\\nModel setup complete!'));\n\t} else if (setupSuccess && !setupConfigModified) {\n\t\tconsole.log(chalk.yellow('\\nNo changes made to model configuration.'));\n\t} else if (!setupSuccess) {\n\t\tconsole.error(\n\t\t\tchalk.red(\n\t\t\t\t'\\nErrors occurred during model selection. Please review and try again.'\n\t\t\t)\n\t\t);\n\t}\n\treturn true; // Indicate setup flow completed (not cancelled)\n\t// Let the main command flow continue to display results\n}\n\n/**\n * Configure and register CLI commands\n * @param {Object} program - Commander program instance\n */\nfunction registerCommands(programInstance) {\n\t// Add global error handler for unknown options\n\tprogramInstance.on('option:unknown', function (unknownOption) {\n\t\tconst commandName = this._name || 'unknown';\n\t\tconsole.error(chalk.red(`Error: Unknown option '${unknownOption}'`));\n\t\tconsole.error(\n\t\t\tchalk.yellow(\n\t\t\t\t`Run 'task-master ${commandName} --help' to see available options`\n\t\t\t)\n\t\t);\n\t\tprocess.exit(1);\n\t});\n\n\t// parse-prd command\n\tprogramInstance\n\t\t.command('parse-prd')\n\t\t.description('Parse a PRD file and generate tasks')\n\t\t.argument('[file]', 'Path to the PRD file')\n\t\t.option(\n\t\t\t'-i, --input ',\n\t\t\t'Path to the PRD file (alternative to positional argument)'\n\t\t)\n\t\t.option('-o, --output ', 'Output file path')\n\t\t.option(\n\t\t\t'-n, --num-tasks ',\n\t\t\t'Number of tasks to generate',\n\t\t\tgetDefaultNumTasks()\n\t\t)\n\t\t.option('-f, --force', 'Skip confirmation when overwriting existing tasks')\n\t\t.option(\n\t\t\t'--append',\n\t\t\t'Append new tasks to existing tasks.json instead of overwriting'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed task generation, providing more comprehensive and accurate task breakdown'\n\t\t)\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (file, options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tlet taskMaster;\n\t\t\ttry {\n\t\t\t\tconst initOptions = {\n\t\t\t\t\tprdPath: file || options.input || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t};\n\t\t\t\t// Only include tasksPath if output is explicitly specified\n\t\t\t\tif (options.output) {\n\t\t\t\t\tinitOptions.tasksPath = options.output;\n\t\t\t\t}\n\t\t\t\ttaskMaster = initTaskMaster(initOptions);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\t`${chalk.white.bold('Parse PRD Help')}\\n\\n${chalk.cyan('Usage:')}\\n task-master parse-prd [options]\\n\\n${chalk.cyan('Options:')}\\n -i, --input Path to the PRD file (alternative to positional argument)\\n -o, --output Output file path (default: .taskmaster/tasks/tasks.json)\\n -n, --num-tasks Number of tasks to generate (default: 10)\\n -f, --force Skip confirmation when overwriting existing tasks\\n --append Append new tasks to existing tasks.json instead of overwriting\\n -r, --research Use Perplexity AI for research-backed task generation\\n\\n${chalk.cyan('Example:')}\\n task-master parse-prd requirements.txt --num-tasks 15\\n task-master parse-prd --input=requirements.txt\\n task-master parse-prd --force\\n task-master parse-prd requirements_v2.txt --append\\n task-master parse-prd requirements.txt --research\\n\\n${chalk.yellow('Note: This command will:')}\\n 1. Look for a PRD file at ${TASKMASTER_DOCS_DIR}/PRD.md by default\\n 2. Use the file specified by --input or positional argument if provided\\n 3. Generate tasks from the PRD and either:\\n - Overwrite any existing tasks.json file (default)\\n - Append to existing tasks.json if --append is used`,\n\t\t\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.error(chalk.red(`\\nError: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconst numTasks = parseInt(options.numTasks, 10);\n\t\t\tconst force = options.force || false;\n\t\t\tconst append = options.append || false;\n\t\t\tconst research = options.research || false;\n\t\t\tlet useForce = force;\n\t\t\tconst useAppend = append;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t// Helper function to check if there are existing tasks in the target tag and confirm overwrite\n\t\t\tasync function confirmOverwriteIfNeeded() {\n\t\t\t\t// Check if there are existing tasks in the target tag\n\t\t\t\tlet hasExistingTasksInTag = false;\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\t\t\t\tif (fs.existsSync(tasksPath)) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\t// Read the entire file to check if the tag exists\n\t\t\t\t\t\tconst existingFileContent = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\t\t\tconst allData = JSON.parse(existingFileContent);\n\n\t\t\t\t\t\t// Check if the target tag exists and has tasks\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\tallData[tag] &&\n\t\t\t\t\t\t\tArray.isArray(allData[tag].tasks) &&\n\t\t\t\t\t\t\tallData[tag].tasks.length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\thasExistingTasksInTag = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t// If we can't read the file or parse it, assume no existing tasks in this tag\n\t\t\t\t\t\thasExistingTasksInTag = false;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Only show confirmation if there are existing tasks in the target tag\n\t\t\t\tif (hasExistingTasksInTag && !useForce && !useAppend) {\n\t\t\t\t\tconst overwrite = await confirmTaskOverwrite(tasksPath);\n\t\t\t\t\tif (!overwrite) {\n\t\t\t\t\t\tlog('info', 'Operation cancelled.');\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t// If user confirms 'y', we should set useForce = true for the parsePRD call\n\t\t\t\t\t// Only overwrite if not appending\n\t\t\t\t\tuseForce = true;\n\t\t\t\t}\n\t\t\t\treturn true;\n\t\t\t}\n\n\t\t\tlet spinner;\n\n\t\t\ttry {\n\t\t\t\tif (!(await confirmOverwriteIfNeeded())) return;\n\n\t\t\t\tconsole.log(chalk.blue(`Parsing PRD file: ${taskMaster.getPrdPath()}`));\n\t\t\t\tconsole.log(chalk.blue(`Generating ${numTasks} tasks...`));\n\t\t\t\tif (append) {\n\t\t\t\t\tconsole.log(chalk.blue('Appending to existing tasks...'));\n\t\t\t\t}\n\t\t\t\tif (research) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t'Using Perplexity AI for research-backed task generation'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tspinner = ora('Parsing PRD and generating tasks...\\n').start();\n\t\t\t\t// Handle case where getTasksPath() returns null\n\t\t\t\tconst outputPath =\n\t\t\t\t\ttaskMaster.getTasksPath() ||\n\t\t\t\t\tpath.join(taskMaster.getProjectRoot(), TASKMASTER_TASKS_FILE);\n\t\t\t\tawait parsePRD(taskMaster.getPrdPath(), outputPath, numTasks, {\n\t\t\t\t\tappend: useAppend,\n\t\t\t\t\tforce: useForce,\n\t\t\t\t\tresearch: research,\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag: tag\n\t\t\t\t});\n\t\t\t\tspinner.succeed('Tasks generated successfully!');\n\t\t\t} catch (error) {\n\t\t\t\tif (spinner) {\n\t\t\t\t\tspinner.fail(`Error parsing PRD: ${error.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(chalk.red(`Error parsing PRD: ${error.message}`));\n\t\t\t\t}\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// update command\n\tprogramInstance\n\t\t.command('update')\n\t\t.description(\n\t\t\t'Update multiple tasks with ID >= \"from\" based on new information or implementation changes'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'--from ',\n\t\t\t'Task ID to start updating from (tasks with ID >= this value will be updated)',\n\t\t\t'1'\n\t\t)\n\t\t.option(\n\t\t\t'-p, --prompt ',\n\t\t\t'Prompt explaining the changes or new context (required)'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed task updates'\n\t\t)\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst fromId = parseInt(options.from, 10); // Validation happens here\n\t\t\tconst prompt = options.prompt;\n\t\t\tconst useResearch = options.research || false;\n\n\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t// Check if there's an 'id' option which is a common mistake (instead of 'from')\n\t\t\tif (\n\t\t\t\tprocess.argv.includes('--id') ||\n\t\t\t\tprocess.argv.some((arg) => arg.startsWith('--id='))\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: The update command uses --from=, not --id=')\n\t\t\t\t);\n\t\t\t\tconsole.log(chalk.yellow('\\nTo update multiple tasks:'));\n\t\t\t\tconsole.log(\n\t\t\t\t\t` task-master update --from=${fromId} --prompt=\"Your prompt here\"`\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'\\nTo update a single specific task, use the update-task command instead:'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\t` task-master update-task --id= --prompt=\"Your prompt here\"`\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (!prompt) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: --prompt parameter is required. Please provide information about the changes.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Updating tasks from ID >= ${fromId} with prompt: \"${prompt}\"`\n\t\t\t\t)\n\t\t\t);\n\t\t\tconsole.log(chalk.blue(`Tasks file: ${tasksPath}`));\n\n\t\t\tif (useResearch) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue('Using Perplexity AI for research-backed task updates')\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Call core updateTasks, passing context for CLI\n\t\t\tawait updateTasks(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tfromId,\n\t\t\t\tprompt,\n\t\t\t\tuseResearch,\n\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag } // Pass context with projectRoot and tag\n\t\t\t);\n\t\t});\n\n\t// update-task command\n\tprogramInstance\n\t\t.command('update-task')\n\t\t.description(\n\t\t\t'Update a single specific task by ID with new information (use --id parameter)'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-i, --id ', 'Task ID to update (required)')\n\t\t.option(\n\t\t\t'-p, --prompt ',\n\t\t\t'Prompt explaining the changes or new context (required)'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed task updates'\n\t\t)\n\t\t.option(\n\t\t\t'--append',\n\t\t\t'Append timestamped information to task details instead of full update'\n\t\t)\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Resolve tag using standard pattern\n\t\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t\t// Show current tag context\n\t\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t\t// Validate required parameters\n\t\t\t\tif (!options.id) {\n\t\t\t\t\tconsole.error(chalk.red('Error: --id parameter is required'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-task --id=23 --prompt=\"Update with new information\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\t// Parse the task ID and validate it's a number\n\t\t\t\tconst taskId = parseInt(options.id, 10);\n\t\t\t\tif (Number.isNaN(taskId) || taskId <= 0) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-task --id=23 --prompt=\"Update with new information\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tif (!options.prompt) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: --prompt parameter is required. Please provide information about the changes.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-task --id=23 --prompt=\"Update with new information\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst prompt = options.prompt;\n\t\t\t\tconst useResearch = options.research || false;\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tif (tasksPath === TASKMASTER_TASKS_FILE) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Hint: Run task-master init or task-master parse-prd to create tasks.json first'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Hint: Check if the file path is correct: ${tasksPath}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Updating task ${taskId} with prompt: \"${prompt}\"`)\n\t\t\t\t);\n\t\t\t\tconsole.log(chalk.blue(`Tasks file: ${tasksPath}`));\n\n\t\t\t\tif (useResearch) {\n\t\t\t\t\t// Verify Perplexity API key exists if using research\n\t\t\t\t\tif (!isApiKeySet('perplexity')) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow('Falling back to Claude AI for task update.')\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue('Using Perplexity AI for research-backed task update')\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateTaskById(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskId,\n\t\t\t\t\tprompt,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag },\n\t\t\t\t\t'text',\n\t\t\t\t\toptions.append || false\n\t\t\t\t);\n\n\t\t\t\t// If the task wasn't updated (e.g., if it was already marked as done)\n\t\t\t\tif (!result) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nTask update was not completed. Review the messages above for details.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\t\t// Provide more helpful error messages for common issues\n\t\t\t\tif (\n\t\t\t\t\terror.message.includes('task') &&\n\t\t\t\t\terror.message.includes('not found')\n\t\t\t\t) {\n\t\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t' 1. Run task-master list to see all available task IDs'\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(' 2. Use a valid task ID with the --id parameter');\n\t\t\t\t} else if (error.message.includes('API key')) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nThis error is related to API keys. Check your environment variables.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Use getDebugFlag getter instead of CONFIG.debug\n\t\t\t\tif (getDebugFlag()) {\n\t\t\t\t\tconsole.error(error);\n\t\t\t\t}\n\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// update-subtask command\n\tprogramInstance\n\t\t.command('update-subtask')\n\t\t.description(\n\t\t\t'Update a subtask by appending additional timestamped information'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id ',\n\t\t\t'Subtask ID to update in format \"parentId.subtaskId\" (required)'\n\t\t)\n\t\t.option(\n\t\t\t'-p, --prompt ',\n\t\t\t'Prompt explaining what information to add (required)'\n\t\t)\n\t\t.option('-r, --research', 'Use Perplexity AI for research-backed updates')\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Resolve tag using standard pattern\n\t\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t\t// Show current tag context\n\t\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t\t// Validate required parameters\n\t\t\t\tif (!options.id) {\n\t\t\t\t\tconsole.error(chalk.red('Error: --id parameter is required'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-subtask --id=5.2 --prompt=\"Add more details about the API endpoint\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\t// Validate subtask ID format (should contain a dot)\n\t\t\t\tconst subtaskId = options.id;\n\t\t\t\tif (!subtaskId.includes('.')) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error: Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format \"parentId.subtaskId\"`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-subtask --id=5.2 --prompt=\"Add more details about the API endpoint\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tif (!options.prompt) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: --prompt parameter is required. Please provide information to add to the subtask.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-subtask --id=5.2 --prompt=\"Add more details about the API endpoint\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst prompt = options.prompt;\n\t\t\t\tconst useResearch = options.research || false;\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tif (tasksPath === TASKMASTER_TASKS_FILE) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Hint: Run task-master init or task-master parse-prd to create tasks.json first'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Hint: Check if the file path is correct: ${tasksPath}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Updating subtask ${subtaskId} with prompt: \"${prompt}\"`)\n\t\t\t\t);\n\t\t\t\tconsole.log(chalk.blue(`Tasks file: ${tasksPath}`));\n\n\t\t\t\tif (useResearch) {\n\t\t\t\t\t// Verify Perplexity API key exists if using research\n\t\t\t\t\tif (!isApiKeySet('perplexity')) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow('Falling back to Claude AI for subtask update.')\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t\t'Using Perplexity AI for research-backed subtask update'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateSubtaskById(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\tsubtaskId,\n\t\t\t\t\tprompt,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t);\n\n\t\t\t\tif (!result) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nSubtask update was not completed. Review the messages above for details.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\t\t// Provide more helpful error messages for common issues\n\t\t\t\tif (\n\t\t\t\t\terror.message.includes('subtask') &&\n\t\t\t\t\terror.message.includes('not found')\n\t\t\t\t) {\n\t\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t' 1. Run task-master list --with-subtasks to see all available subtask IDs'\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t' 2. Use a valid subtask ID with the --id parameter in format \"parentId.subtaskId\"'\n\t\t\t\t\t);\n\t\t\t\t} else if (error.message.includes('API key')) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nThis error is related to API keys. Check your environment variables.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Use getDebugFlag getter instead of CONFIG.debug\n\t\t\t\tif (getDebugFlag()) {\n\t\t\t\t\tconsole.error(error);\n\t\t\t\t}\n\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// generate command\n\tprogramInstance\n\t\t.command('generate')\n\t\t.description('Generate task files from tasks.json')\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-o, --output ',\n\t\t\t'Output directory',\n\t\t\tpath.dirname(TASKMASTER_TASKS_FILE)\n\t\t)\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst outputDir = options.output;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(`Generating task files from: ${taskMaster.getTasksPath()}`)\n\t\t\t);\n\t\t\tconsole.log(chalk.blue(`Output directory: ${outputDir}`));\n\n\t\t\tawait generateTaskFiles(taskMaster.getTasksPath(), outputDir, {\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\ttag\n\t\t\t});\n\t\t});\n\n\t// set-status command\n\tprogramInstance\n\t\t.command('set-status')\n\t\t.alias('mark')\n\t\t.alias('set')\n\t\t.description('Set the status of a task')\n\t\t.option(\n\t\t\t'-i, --id ',\n\t\t\t'Task ID (can be comma-separated for multiple tasks)'\n\t\t)\n\t\t.option(\n\t\t\t'-s, --status ',\n\t\t\t`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst taskId = options.id;\n\t\t\tconst status = options.status;\n\n\t\t\tif (!taskId || !status) {\n\t\t\t\tconsole.error(chalk.red('Error: Both --id and --status are required'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (!isValidTaskStatus(status)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)\n\t\t\t);\n\n\t\t\tawait setTaskStatus(taskMaster.getTasksPath(), taskId, status, {\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\ttag\n\t\t\t});\n\t\t});\n\n\t// list command\n\tprogramInstance\n\t\t.command('list')\n\t\t.description('List all tasks')\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --report ',\n\t\t\t'Path to the complexity report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('-s, --status ', 'Filter by status')\n\t\t.option('--with-subtasks', 'Show subtasks for each task')\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Only pass complexityReportPath if user provided a custom path\n\t\t\tif (options.report && options.report !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.report;\n\t\t\t}\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst statusFilter = options.status;\n\t\t\tconst withSubtasks = options.withSubtasks || false;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(`Listing tasks from: ${taskMaster.getTasksPath()}`)\n\t\t\t);\n\t\t\tif (statusFilter) {\n\t\t\t\tconsole.log(chalk.blue(`Filtering by status: ${statusFilter}`));\n\t\t\t}\n\t\t\tif (withSubtasks) {\n\t\t\t\tconsole.log(chalk.blue('Including subtasks in listing'));\n\t\t\t}\n\n\t\t\tawait listTasks(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tstatusFilter,\n\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\twithSubtasks,\n\t\t\t\t'text',\n\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t);\n\t\t});\n\n\t// expand command\n\tprogramInstance\n\t\t.command('expand')\n\t\t.description('Expand a task into subtasks using AI')\n\t\t.option('-i, --id ', 'ID of the task to expand')\n\t\t.option(\n\t\t\t'-a, --all',\n\t\t\t'Expand all pending tasks based on complexity analysis'\n\t\t)\n\t\t.option(\n\t\t\t'-n, --num ',\n\t\t\t'Number of subtasks to generate (uses complexity analysis by default if available)'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Enable research-backed generation (e.g., using Perplexity)',\n\t\t\tfalse\n\t\t)\n\t\t.option('-p, --prompt ', 'Additional context for subtask generation')\n\t\t.option('-f, --force', 'Force expansion even if subtasks exist', false) // Ensure force option exists\n\t\t.option(\n\t\t\t'--file ',\n\t\t\t'Path to the tasks file (relative to project root)',\n\t\t\tTASKMASTER_TASKS_FILE // Allow file override\n\t\t) // Allow file override\n\t\t.option(\n\t\t\t'-cr, --complexity-report ',\n\t\t\t'Path to the report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tif (options.complexityReport) {\n\t\t\t\tinitOptions.complexityReportPath = options.complexityReport;\n\t\t\t}\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (options.all) {\n\t\t\t\t// --- Handle expand --all ---\n\t\t\t\tconsole.log(chalk.blue('Expanding all pending tasks...'));\n\t\t\t\t// Updated call to the refactored expandAllTasks\n\t\t\t\ttry {\n\t\t\t\t\tconst result = await expandAllTasks(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\toptions.num, // Pass num\n\t\t\t\t\t\toptions.research, // Pass research flag\n\t\t\t\t\t\toptions.prompt, // Pass additional context\n\t\t\t\t\t\toptions.force, // Pass force flag\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\t\t\ttag,\n\t\t\t\t\t\t\tcomplexityReportPath: taskMaster.getComplexityReportPath()\n\t\t\t\t\t\t} // Pass context with projectRoot and tag\n\t\t\t\t\t\t// outputFormat defaults to 'text' in expandAllTasks for CLI\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error expanding all tasks: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} else if (options.id) {\n\t\t\t\t// --- Handle expand --id (Should be correct from previous refactor) ---\n\t\t\t\tif (!options.id) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('Error: Task ID is required unless using --all.')\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(chalk.blue(`Expanding task ${options.id}...`));\n\t\t\t\ttry {\n\t\t\t\t\t// Call the refactored expandTask function\n\t\t\t\t\tawait expandTask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\toptions.id,\n\t\t\t\t\t\toptions.num,\n\t\t\t\t\t\toptions.research,\n\t\t\t\t\t\toptions.prompt,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\t\t\ttag,\n\t\t\t\t\t\t\tcomplexityReportPath: taskMaster.getComplexityReportPath()\n\t\t\t\t\t\t}, // Pass context with projectRoot and tag\n\t\t\t\t\t\toptions.force // Pass the force flag down\n\t\t\t\t\t);\n\t\t\t\t\t// expandTask logs its own success/failure for single task\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error expanding task ${options.id}: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: You must specify either a task ID (--id) or --all.')\n\t\t\t\t);\n\t\t\t\tprogramInstance.help(); // Show help\n\t\t\t}\n\t\t});\n\n\t// analyze-complexity command\n\tprogramInstance\n\t\t.command('analyze-complexity')\n\t\t.description(\n\t\t\t`Analyze tasks and generate expansion recommendations${chalk.reset('')}`\n\t\t)\n\t\t.option('-o, --output ', 'Output file path for the report')\n\t\t.option(\n\t\t\t'-m, --model ',\n\t\t\t'LLM model to use for analysis (defaults to configured model)'\n\t\t)\n\t\t.option(\n\t\t\t'-t, --threshold ',\n\t\t\t'Minimum complexity score to recommend expansion (1-10)',\n\t\t\t'5'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed complexity analysis'\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id ',\n\t\t\t'Comma-separated list of specific task IDs to analyze (e.g., \"1,3,5\")'\n\t\t)\n\t\t.option('--from ', 'Starting task ID in a range to analyze')\n\t\t.option('--to ', 'Ending task ID in a range to analyze')\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true, // Tasks file is required to analyze\n\t\t\t\ttag: options.tag\n\t\t\t};\n\t\t\t// Only include complexityReportPath if output is explicitly specified\n\t\t\tif (options.output) {\n\t\t\t\tinitOptions.complexityReportPath = options.output;\n\t\t\t}\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst modelOverride = options.model;\n\t\t\tconst thresholdScore = parseFloat(options.threshold);\n\t\t\tconst useResearch = options.research || false;\n\n\t\t\t// Use the provided tag, or the current active tag, or default to 'master'\n\t\t\tconst targetTag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(targetTag);\n\n\t\t\t// Use user's explicit output path if provided, otherwise use tag-aware default\n\t\t\tconst outputPath = taskMaster.getComplexityReportPath();\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Analyzing task complexity from: ${taskMaster.getTasksPath()}`\n\t\t\t\t)\n\t\t\t);\n\t\t\tconsole.log(chalk.blue(`Output report will be saved to: ${outputPath}`));\n\n\t\t\tif (options.id) {\n\t\t\t\tconsole.log(chalk.blue(`Analyzing specific task IDs: ${options.id}`));\n\t\t\t} else if (options.from || options.to) {\n\t\t\t\tconst fromStr = options.from ? options.from : 'first';\n\t\t\t\tconst toStr = options.to ? options.to : 'last';\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Analyzing tasks in range: ${fromStr} to ${toStr}`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (useResearch) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t'Using Perplexity AI for research-backed complexity analysis'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Update options with tag-aware output path and context\n\t\t\tconst updatedOptions = {\n\t\t\t\t...options,\n\t\t\t\toutput: outputPath,\n\t\t\t\ttag: targetTag,\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\tfile: taskMaster.getTasksPath()\n\t\t\t};\n\n\t\t\tawait analyzeTaskComplexity(updatedOptions);\n\t\t});\n\n\t// research command\n\tprogramInstance\n\t\t.command('research')\n\t\t.description('Perform AI-powered research queries with project context')\n\t\t.argument('[prompt]', 'Research prompt to investigate')\n\t\t.option('--file ', 'Path to the tasks file')\n\t\t.option(\n\t\t\t'-i, --id ',\n\t\t\t'Comma-separated task/subtask IDs to include as context (e.g., \"15,16.2\")'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --files ',\n\t\t\t'Comma-separated file paths to include as context'\n\t\t)\n\t\t.option(\n\t\t\t'-c, --context ',\n\t\t\t'Additional custom context to include in the research prompt'\n\t\t)\n\t\t.option(\n\t\t\t'-t, --tree',\n\t\t\t'Include project file tree structure in the research context'\n\t\t)\n\t\t.option(\n\t\t\t'-s, --save ',\n\t\t\t'Save research results to the specified task/subtask(s)'\n\t\t)\n\t\t.option(\n\t\t\t'-d, --detail ',\n\t\t\t'Output detail level: low, medium, high',\n\t\t\t'medium'\n\t\t)\n\t\t.option(\n\t\t\t'--save-to ',\n\t\t\t'Automatically save research results to specified task/subtask ID (e.g., \"15\" or \"15.2\")'\n\t\t)\n\t\t.option(\n\t\t\t'--save-file',\n\t\t\t'Save research results to .taskmaster/docs/research/ directory'\n\t\t)\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (prompt, options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Parameter validation\n\t\t\tif (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Research prompt is required and cannot be empty')\n\t\t\t\t);\n\t\t\t\tshowResearchHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Validate detail level\n\t\t\tconst validDetailLevels = ['low', 'medium', 'high'];\n\t\t\tif (\n\t\t\t\toptions.detail &&\n\t\t\t\t!validDetailLevels.includes(options.detail.toLowerCase())\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Detail level must be one of: ${validDetailLevels.join(', ')}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Validate and parse task IDs if provided\n\t\t\tlet taskIds = [];\n\t\t\tif (options.id) {\n\t\t\t\ttry {\n\t\t\t\t\ttaskIds = options.id.split(',').map((id) => {\n\t\t\t\t\t\tconst trimmedId = id.trim();\n\t\t\t\t\t\t// Support both task IDs (e.g., \"15\") and subtask IDs (e.g., \"15.2\")\n\t\t\t\t\t\tif (!/^\\d+(\\.\\d+)?$/.test(trimmedId)) {\n\t\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t\t`Invalid task ID format: \"${trimmedId}\". Expected format: \"15\" or \"15.2\"`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn trimmedId;\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(chalk.red(`Error parsing task IDs: ${error.message}`));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate and parse file paths if provided\n\t\t\tlet filePaths = [];\n\t\t\tif (options.files) {\n\t\t\t\ttry {\n\t\t\t\t\tfilePaths = options.files.split(',').map((filePath) => {\n\t\t\t\t\t\tconst trimmedPath = filePath.trim();\n\t\t\t\t\t\tif (trimmedPath.length === 0) {\n\t\t\t\t\t\t\tthrow new Error('Empty file path provided');\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn trimmedPath;\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error parsing file paths: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate save-to option if provided\n\t\t\tif (options.saveTo) {\n\t\t\t\tconst saveToId = options.saveTo.trim();\n\t\t\t\tif (saveToId.length === 0) {\n\t\t\t\t\tconsole.error(chalk.red('Error: Save-to ID cannot be empty'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t\t// Validate ID format: number or number.number\n\t\t\t\tif (!/^\\d+(\\.\\d+)?$/.test(saveToId)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: Save-to ID must be in format \"15\" for task or \"15.2\" for subtask'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate save option if provided (legacy file save)\n\t\t\tif (options.save) {\n\t\t\t\tconst saveTarget = options.save.trim();\n\t\t\t\tif (saveTarget.length === 0) {\n\t\t\t\t\tconsole.error(chalk.red('Error: Save target cannot be empty'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t\t// Check if it's a valid file path (basic validation)\n\t\t\t\tif (saveTarget.includes('..') || saveTarget.startsWith('/')) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: Save path must be relative and cannot contain \"..\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t// Validate tasks file exists if task IDs are specified\n\t\t\tif (taskIds.length > 0) {\n\t\t\t\ttry {\n\t\t\t\t\tconst tasksData = readJSON(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\t\ttag\n\t\t\t\t\t);\n\t\t\t\t\tif (!tasksData || !tasksData.tasks) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`Error: No valid tasks found in ${taskMaster.getTasksPath()} for tag '${tag}'`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error reading tasks file: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate file paths exist if specified\n\t\t\tif (filePaths.length > 0) {\n\t\t\t\tfor (const filePath of filePaths) {\n\t\t\t\t\tconst fullPath = path.isAbsolute(filePath)\n\t\t\t\t\t\t? filePath\n\t\t\t\t\t\t: path.join(taskMaster.getProjectRoot(), filePath);\n\t\t\t\t\tif (!fs.existsSync(fullPath)) {\n\t\t\t\t\t\tconsole.error(chalk.red(`Error: File not found: ${filePath}`));\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Create validated parameters object\n\t\t\tconst validatedParams = {\n\t\t\t\tprompt: prompt.trim(),\n\t\t\t\ttaskIds: taskIds,\n\t\t\t\tfilePaths: filePaths,\n\t\t\t\tcustomContext: options.context ? options.context.trim() : null,\n\t\t\t\tincludeProjectTree: !!options.tree,\n\t\t\t\tsaveTarget: options.save ? options.save.trim() : null,\n\t\t\t\tsaveToId: options.saveTo ? options.saveTo.trim() : null,\n\t\t\t\tallowFollowUp: true, // Always allow follow-up in CLI\n\t\t\t\tdetailLevel: options.detail ? options.detail.toLowerCase() : 'medium',\n\t\t\t\ttasksPath: taskMaster.getTasksPath(),\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot()\n\t\t\t};\n\n\t\t\t// Display what we're about to do\n\t\t\tconsole.log(chalk.blue(`Researching: \"${validatedParams.prompt}\"`));\n\n\t\t\tif (validatedParams.taskIds.length > 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.gray(`Task context: ${validatedParams.taskIds.join(', ')}`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (validatedParams.filePaths.length > 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.gray(`File context: ${validatedParams.filePaths.join(', ')}`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (validatedParams.customContext) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.gray(\n\t\t\t\t\t\t`Custom context: ${validatedParams.customContext.substring(0, 50)}${validatedParams.customContext.length > 50 ? '...' : ''}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (validatedParams.includeProjectTree) {\n\t\t\t\tconsole.log(chalk.gray('Including project file tree'));\n\t\t\t}\n\n\t\t\tconsole.log(chalk.gray(`Detail level: ${validatedParams.detailLevel}`));\n\n\t\t\ttry {\n\t\t\t\t// Import the research function\n\t\t\t\tconst { performResearch } = await import('./task-manager/research.js');\n\n\t\t\t\t// Prepare research options\n\t\t\t\tconst researchOptions = {\n\t\t\t\t\ttaskIds: validatedParams.taskIds,\n\t\t\t\t\tfilePaths: validatedParams.filePaths,\n\t\t\t\t\tcustomContext: validatedParams.customContext || '',\n\t\t\t\t\tincludeProjectTree: validatedParams.includeProjectTree,\n\t\t\t\t\tdetailLevel: validatedParams.detailLevel,\n\t\t\t\t\tprojectRoot: validatedParams.projectRoot,\n\t\t\t\t\tsaveToFile: !!options.saveFile,\n\t\t\t\t\ttag: tag\n\t\t\t\t};\n\n\t\t\t\t// Execute research\n\t\t\t\tconst result = await performResearch(\n\t\t\t\t\tvalidatedParams.prompt,\n\t\t\t\t\tresearchOptions,\n\t\t\t\t\t{\n\t\t\t\t\t\tcommandName: 'research',\n\t\t\t\t\t\toutputType: 'cli',\n\t\t\t\t\t\ttag: tag\n\t\t\t\t\t},\n\t\t\t\t\t'text',\n\t\t\t\t\tvalidatedParams.allowFollowUp // Pass follow-up flag\n\t\t\t\t);\n\n\t\t\t\t// Auto-save to task/subtask if requested and no interactive save occurred\n\t\t\t\tif (validatedParams.saveToId && !result.interactiveSaveOccurred) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst isSubtask = validatedParams.saveToId.includes('.');\n\n\t\t\t\t\t\t// Format research content for saving\n\t\t\t\t\t\tconst researchContent = `## Research Query: ${validatedParams.prompt}\n\n**Detail Level:** ${result.detailLevel}\n**Context Size:** ${result.contextSize} characters\n**Timestamp:** ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}\n\n### Results\n\n${result.result}`;\n\n\t\t\t\t\t\tif (isSubtask) {\n\t\t\t\t\t\t\t// Save to subtask\n\t\t\t\t\t\t\tconst { updateSubtaskById } = await import(\n\t\t\t\t\t\t\t\t'./task-manager/update-subtask-by-id.js'\n\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\tawait updateSubtaskById(\n\t\t\t\t\t\t\t\tvalidatedParams.tasksPath,\n\t\t\t\t\t\t\t\tvalidatedParams.saveToId,\n\t\t\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\t\t\toutputType: 'cli',\n\t\t\t\t\t\t\t\t\tprojectRoot: validatedParams.projectRoot,\n\t\t\t\t\t\t\t\t\ttag: tag\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'text'\n\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t\t`✅ Research saved to subtask ${validatedParams.saveToId}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Save to task\n\t\t\t\t\t\t\tconst updateTaskById = (\n\t\t\t\t\t\t\t\tawait import('./task-manager/update-task-by-id.js')\n\t\t\t\t\t\t\t).default;\n\n\t\t\t\t\t\t\tconst taskIdNum = parseInt(validatedParams.saveToId, 10);\n\t\t\t\t\t\t\tawait updateTaskById(\n\t\t\t\t\t\t\t\tvalidatedParams.tasksPath,\n\t\t\t\t\t\t\t\ttaskIdNum,\n\t\t\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\t\t\toutputType: 'cli',\n\t\t\t\t\t\t\t\t\tprojectRoot: validatedParams.projectRoot,\n\t\t\t\t\t\t\t\t\ttag: tag\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'text',\n\t\t\t\t\t\t\t\ttrue // appendMode = true\n\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t\t`✅ Research saved to task ${validatedParams.saveToId}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t} catch (saveError) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.red(`❌ Error saving to task/subtask: ${saveError.message}`)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Save results to file if requested (legacy)\n\t\t\t\tif (validatedParams.saveTarget) {\n\t\t\t\t\tconst saveContent = `# Research Query: ${validatedParams.prompt}\n\n**Detail Level:** ${result.detailLevel}\n**Context Size:** ${result.contextSize} characters\n**Timestamp:** ${new Date().toISOString()}\n\n## Results\n\n${result.result}\n`;\n\n\t\t\t\t\tfs.writeFileSync(validatedParams.saveTarget, saveContent, 'utf-8');\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(`\\n💾 Results saved to: ${validatedParams.saveTarget}`)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`\\n❌ Research failed: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// clear-subtasks command\n\tprogramInstance\n\t\t.command('clear-subtasks')\n\t\t.description('Clear subtasks from specified tasks')\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id ',\n\t\t\t'Task IDs (comma-separated) to clear subtasks from'\n\t\t)\n\t\t.option('--all', 'Clear subtasks from all tasks')\n\t\t.option('--tag ', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst taskIds = options.id;\n\t\t\tconst all = options.all;\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskIds && !all) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Please specify task IDs with --id= or use --all to clear all tasks'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (all) {\n\t\t\t\t// If --all is specified, get all task IDs\n\t\t\t\tconst data = readJSON(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t);\n\t\t\t\tif (!data || !data.tasks) {\n\t\t\t\t\tconsole.error(chalk.red('Error: No valid tasks found'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t\tconst allIds = data.tasks.map((t) => t.id).join(',');\n\t\t\t\tclearSubtasks(taskMaster.getTasksPath(), allIds, {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tclearSubtasks(taskMaster.getTasksPath(), taskIds, {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t// add-task command\n\tprogramInstance\n\t\t.command('add-task')\n\t\t.description('Add a new task using AI, optionally providing manual details')\n\t\t.option(\n\t\t\t'-f, --file ',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-p, --prompt ',\n\t\t\t'Description of the task to add (required if not using manual fields)'\n\t\t)\n\t\t.option('-t, --title ', 'Task title (for manual task creation)')\n\t\t.option(\n\t\t\t'-d, --description <description>',\n\t\t\t'Task description (for manual task creation)'\n\t\t)\n\t\t.option(\n\t\t\t'--details <details>',\n\t\t\t'Implementation details (for manual task creation)'\n\t\t)\n\t\t.option(\n\t\t\t'--dependencies <dependencies>',\n\t\t\t'Comma-separated list of task IDs this task depends on'\n\t\t)\n\t\t.option(\n\t\t\t'--priority <priority>',\n\t\t\t'Task priority (high, medium, low)',\n\t\t\t'medium'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Whether to use research capabilities for task creation'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst isManualCreation = options.title && options.description;\n\n\t\t\t// Validate that either prompt or title+description are provided\n\t\t\tif (!options.prompt && !isManualCreation) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Either --prompt or both --title and --description must be provided'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconst tasksPath = options.file || TASKMASTER_TASKS_FILE;\n\n\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\t`❌ No tasks.json file found. Please run \"task-master init\" or create a tasks.json file at ${TASKMASTER_TASKS_FILE}`\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Correctly determine projectRoot\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst projectRoot = taskMaster.getProjectRoot();\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tlet manualTaskData = null;\n\t\t\tif (isManualCreation) {\n\t\t\t\tmanualTaskData = {\n\t\t\t\t\ttitle: options.title,\n\t\t\t\t\tdescription: options.description,\n\t\t\t\t\tdetails: options.details || '',\n\t\t\t\t\ttestStrategy: options.testStrategy || ''\n\t\t\t\t};\n\t\t\t\t// Restore specific logging for manual creation\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Creating task manually with title: \"${options.title}\"`)\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Restore specific logging for AI creation\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Creating task with AI using prompt: \"${options.prompt}\"`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Log dependencies and priority if provided (restored)\n\t\t\tconst dependenciesArray = options.dependencies\n\t\t\t\t? options.dependencies.split(',').map((id) => id.trim())\n\t\t\t\t: [];\n\t\t\tif (dependenciesArray.length > 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Dependencies: [${dependenciesArray.join(', ')}]`)\n\t\t\t\t);\n\t\t\t}\n\t\t\tif (options.priority) {\n\t\t\t\tconsole.log(chalk.blue(`Priority: ${options.priority}`));\n\t\t\t}\n\n\t\t\tconst context = {\n\t\t\t\tprojectRoot,\n\t\t\t\ttag,\n\t\t\t\tcommandName: 'add-task',\n\t\t\t\toutputType: 'cli'\n\t\t\t};\n\n\t\t\ttry {\n\t\t\t\tconst { newTaskId, telemetryData } = await addTask(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\toptions.prompt,\n\t\t\t\t\tdependenciesArray,\n\t\t\t\t\toptions.priority,\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text',\n\t\t\t\t\tmanualTaskData,\n\t\t\t\t\toptions.research\n\t\t\t\t);\n\n\t\t\t\t// addTask handles detailed CLI success logging AND telemetry display when outputFormat is 'text'\n\t\t\t\t// No need to call displayAiUsageSummary here anymore.\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error adding task: ${error.message}`));\n\t\t\t\tif (error.details) {\n\t\t\t\t\tconsole.error(chalk.red(error.details));\n\t\t\t\t}\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// next command\n\tprogramInstance\n\t\t.command('next')\n\t\t.description(\n\t\t\t`Show the next task to work on based on dependencies and status${chalk.reset('')}`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --report <report>',\n\t\t\t'Path to the complexity report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tif (options.report && options.report !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.report;\n\t\t\t}\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag,\n\t\t\t\tcomplexityReportPath: options.report || false\n\t\t\t});\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tconst context = {\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\ttag\n\t\t\t};\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tawait displayNextTask(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\tcontext\n\t\t\t);\n\t\t});\n\n\t// show command\n\tprogramInstance\n\t\t.command('show')\n\t\t.description(\n\t\t\t`Display detailed information about one or more tasks${chalk.reset('')}`\n\t\t)\n\t\t.argument('[id]', 'Task ID(s) to show (comma-separated for multiple)')\n\t\t.option(\n\t\t\t'-i, --id <id>',\n\t\t\t'Task ID(s) to show (comma-separated for multiple)'\n\t\t)\n\t\t.option('-s, --status <status>', 'Filter subtasks by status')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --report <report>',\n\t\t\t'Path to the complexity report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (taskId, options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\t\t\t// Only pass complexityReportPath if user provided a custom path\n\t\t\tif (options.report && options.report !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.report;\n\t\t\t}\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst idArg = taskId || options.id;\n\t\t\tconst statusFilter = options.status;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!idArg) {\n\t\t\t\tconsole.error(chalk.red('Error: Please provide a task ID'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Check if multiple IDs are provided (comma-separated)\n\t\t\tconst taskIds = idArg\n\t\t\t\t.split(',')\n\t\t\t\t.map((id) => id.trim())\n\t\t\t\t.filter((id) => id.length > 0);\n\n\t\t\tif (taskIds.length > 1) {\n\t\t\t\t// Multiple tasks - use compact summary view with interactive drill-down\n\t\t\t\tawait displayMultipleTasksSummary(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskIds,\n\t\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\t\tstatusFilter,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Single task - use detailed view\n\t\t\t\tawait displayTaskById(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskIds[0],\n\t\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\t\tstatusFilter,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\n\t// add-dependency command\n\tprogramInstance\n\t\t.command('add-dependency')\n\t\t.description('Add a dependency to a task')\n\t\t.option('-i, --id <id>', 'Task ID to add dependency to')\n\t\t.option('-d, --depends-on <id>', 'Task ID that will become a dependency')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst taskId = options.id;\n\t\t\tconst dependencyId = options.dependsOn;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskId || !dependencyId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Both --id and --depends-on are required')\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Handle subtask IDs correctly by preserving the string format for IDs containing dots\n\t\t\t// Only use parseInt for simple numeric IDs\n\t\t\tconst formattedTaskId = taskId.includes('.')\n\t\t\t\t? taskId\n\t\t\t\t: parseInt(taskId, 10);\n\t\t\tconst formattedDependencyId = dependencyId.includes('.')\n\t\t\t\t? dependencyId\n\t\t\t\t: parseInt(dependencyId, 10);\n\n\t\t\tawait addDependency(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tformattedTaskId,\n\t\t\t\tformattedDependencyId,\n\t\t\t\t{\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t}\n\t\t\t);\n\t\t});\n\n\t// remove-dependency command\n\tprogramInstance\n\t\t.command('remove-dependency')\n\t\t.description('Remove a dependency from a task')\n\t\t.option('-i, --id <id>', 'Task ID to remove dependency from')\n\t\t.option('-d, --depends-on <id>', 'Task ID to remove as a dependency')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst taskId = options.id;\n\t\t\tconst dependencyId = options.dependsOn;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskId || !dependencyId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Both --id and --depends-on are required')\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Handle subtask IDs correctly by preserving the string format for IDs containing dots\n\t\t\t// Only use parseInt for simple numeric IDs\n\t\t\tconst formattedTaskId = taskId.includes('.')\n\t\t\t\t? taskId\n\t\t\t\t: parseInt(taskId, 10);\n\t\t\tconst formattedDependencyId = dependencyId.includes('.')\n\t\t\t\t? dependencyId\n\t\t\t\t: parseInt(dependencyId, 10);\n\n\t\t\tawait removeDependency(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tformattedTaskId,\n\t\t\t\tformattedDependencyId,\n\t\t\t\t{\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t}\n\t\t\t);\n\t\t});\n\n\t// validate-dependencies command\n\tprogramInstance\n\t\t.command('validate-dependencies')\n\t\t.description(\n\t\t\t`Identify invalid dependencies without fixing them${chalk.reset('')}`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tawait validateDependenciesCommand(taskMaster.getTasksPath(), {\n\t\t\t\tcontext: { projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t});\n\t\t});\n\n\t// fix-dependencies command\n\tprogramInstance\n\t\t.command('fix-dependencies')\n\t\t.description(`Fix invalid dependencies automatically${chalk.reset('')}`)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tawait fixDependenciesCommand(taskMaster.getTasksPath(), {\n\t\t\t\tcontext: { projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t});\n\t\t});\n\n\t// complexity-report command\n\tprogramInstance\n\t\t.command('complexity-report')\n\t\t.description(`Display the complexity analysis report${chalk.reset('')}`)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tif (options.file && options.file !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.file;\n\t\t\t}\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(taskMaster.getCurrentTag());\n\n\t\t\tawait displayComplexityReport(taskMaster.getComplexityReportPath());\n\t\t});\n\n\t// add-subtask command\n\tprogramInstance\n\t\t.command('add-subtask')\n\t\t.description('Add a subtask to an existing task')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-p, --parent <id>', 'Parent task ID (required)')\n\t\t.option('-i, --task-id <id>', 'Existing task ID to convert to subtask')\n\t\t.option(\n\t\t\t'-t, --title <title>',\n\t\t\t'Title for the new subtask (when creating a new subtask)'\n\t\t)\n\t\t.option('-d, --description <text>', 'Description for the new subtask')\n\t\t.option('--details <text>', 'Implementation details for the new subtask')\n\t\t.option(\n\t\t\t'--dependencies <ids>',\n\t\t\t'Comma-separated list of dependency IDs for the new subtask'\n\t\t)\n\t\t.option('-s, --status <status>', 'Status for the new subtask', 'pending')\n\t\t.option('--generate', 'Regenerate task files after adding subtask')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst parentId = options.parent;\n\t\t\tconst existingTaskId = options.taskId;\n\t\t\tconst generateFiles = options.generate || false;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!parentId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: --parent parameter is required. Please provide a parent task ID.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tshowAddSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Parse dependencies if provided\n\t\t\tlet dependencies = [];\n\t\t\tif (options.dependencies) {\n\t\t\t\tdependencies = options.dependencies.split(',').map((id) => {\n\t\t\t\t\t// Handle both regular IDs and dot notation\n\t\t\t\t\treturn id.includes('.') ? id.trim() : parseInt(id.trim(), 10);\n\t\t\t\t});\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\tif (existingTaskId) {\n\t\t\t\t\t// Convert existing task to subtask\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`Converting task ${existingTaskId} to a subtask of ${parentId}...`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tawait addSubtask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tparentId,\n\t\t\t\t\t\texistingTaskId,\n\t\t\t\t\t\tnull,\n\t\t\t\t\t\tgenerateFiles,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`✓ Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} else if (options.title) {\n\t\t\t\t\t// Create new subtask with provided data\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(`Creating new subtask for parent task ${parentId}...`)\n\t\t\t\t\t);\n\n\t\t\t\t\tconst newSubtaskData = {\n\t\t\t\t\t\ttitle: options.title,\n\t\t\t\t\t\tdescription: options.description || '',\n\t\t\t\t\t\tdetails: options.details || '',\n\t\t\t\t\t\tstatus: options.status || 'pending',\n\t\t\t\t\t\tdependencies: dependencies\n\t\t\t\t\t};\n\n\t\t\t\t\tconst subtask = await addSubtask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tparentId,\n\t\t\t\t\t\tnull,\n\t\t\t\t\t\tnewSubtaskData,\n\t\t\t\t\t\tgenerateFiles,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`✓ New subtask ${parentId}.${subtask.id} successfully created`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Display success message and suggested next steps\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.white.bold(\n\t\t\t\t\t\t\t\t`Subtask ${parentId}.${subtask.id} Added Successfully`\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white(`Title: ${subtask.title}`) +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.white(`Status: ${getStatusWithColor(subtask.status)}`) +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t(dependencies.length > 0\n\t\t\t\t\t\t\t\t\t? chalk.white(`Dependencies: ${dependencies.join(', ')}`) +\n\t\t\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t\t\t: '') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t`1. Run ${chalk.yellow(`task-master show ${parentId}`)} to see the parent task with all subtasks`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t`2. Run ${chalk.yellow(`task-master set-status --id=${parentId}.${subtask.id} --status=in-progress`)} to start working on it`\n\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('Error: Either --task-id or --title must be provided.')\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.white.bold('Usage Examples:') +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white('Convert existing task to subtask:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t\t` task-master add-subtask --parent=5 --task-id=8`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white('Create new subtask:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t\t` task-master add-subtask --parent=5 --title=\"Implement login UI\" --description=\"Create the login form\"`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n\\n',\n\t\t\t\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\tshowAddSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowAddSubtaskHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// Helper function to show add-subtask command help\n\tfunction showAddSubtaskHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\t`${chalk.white.bold('Add Subtask Command Help')}\\n\\n${chalk.cyan('Usage:')}\\n task-master add-subtask --parent=<id> [options]\\n\\n${chalk.cyan('Options:')}\\n -p, --parent <id> Parent task ID (required)\\n -i, --task-id <id> Existing task ID to convert to subtask\\n -t, --title <title> Title for the new subtask\\n -d, --description <text> Description for the new subtask\\n --details <text> Implementation details for the new subtask\\n --dependencies <ids> Comma-separated list of dependency IDs\\n -s, --status <status> Status for the new subtask (default: \"pending\")\\n -f, --file <file> Path to the tasks file (default: \"${TASKMASTER_TASKS_FILE}\")\\n --generate Regenerate task files after adding subtask\\n\\n${chalk.cyan('Examples:')}\\n task-master add-subtask --parent=5 --task-id=8\\n task-master add-subtask -p 5 -t \"Implement login UI\" -d \"Create the login form\" --generate`,\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// remove-subtask command\n\tprogramInstance\n\t\t.command('remove-subtask')\n\t\t.description('Remove a subtask from its parent task')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id <id>',\n\t\t\t'Subtask ID(s) to remove in format \"parentId.subtaskId\" (can be comma-separated for multiple subtasks)'\n\t\t)\n\t\t.option(\n\t\t\t'-c, --convert',\n\t\t\t'Convert the subtask to a standalone task instead of deleting it'\n\t\t)\n\t\t.option('--generate', 'Regenerate task files after removing subtask')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst subtaskIds = options.id;\n\t\t\tconst convertToTask = options.convert || false;\n\t\t\tconst generateFiles = options.generate || false;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tif (!subtaskIds) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: --id parameter is required. Please provide subtask ID(s) in format \"parentId.subtaskId\".'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tshowRemoveSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Split by comma to support multiple subtask IDs\n\t\t\t\tconst subtaskIdArray = subtaskIds.split(',').map((id) => id.trim());\n\n\t\t\t\tfor (const subtaskId of subtaskIdArray) {\n\t\t\t\t\t// Validate subtask ID format\n\t\t\t\t\tif (!subtaskId.includes('.')) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`Error: Subtask ID \"${subtaskId}\" must be in format \"parentId.subtaskId\"`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tshowRemoveSubtaskHelp();\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\tconsole.log(chalk.blue(`Removing subtask ${subtaskId}...`));\n\t\t\t\t\tif (convertToTask) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue('The subtask will be converted to a standalone task')\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\tconst result = await removeSubtask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tsubtaskId,\n\t\t\t\t\t\tconvertToTask,\n\t\t\t\t\t\tgenerateFiles,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\n\t\t\t\t\tif (convertToTask && result) {\n\t\t\t\t\t\t// Display success message and next steps for converted task\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\t\tchalk.white.bold(\n\t\t\t\t\t\t\t\t\t`Subtask ${subtaskId} Converted to Task #${result.id}`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white(`Title: ${result.title}`) +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white(`Status: ${getStatusWithColor(result.status)}`) +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t\t\t\t`Dependencies: ${result.dependencies.join(', ')}`\n\t\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t\t`1. Run ${chalk.yellow(`task-master show ${result.id}`)} to see details of the new task`\n\t\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t\t`2. Run ${chalk.yellow(`task-master set-status --id=${result.id} --status=in-progress`)} to start working on it`\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Display success message for deleted subtask\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\t\tchalk.white.bold(`Subtask ${subtaskId} Removed`) +\n\t\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white('The subtask has been successfully deleted.'),\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\tshowRemoveSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowRemoveSubtaskHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// Helper function to show remove-subtask command help\n\tfunction showRemoveSubtaskHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Remove Subtask Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master remove-subtask --id=<parentId.subtaskId> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -i, --id <id> Subtask ID(s) to remove in format \"parentId.subtaskId\" (can be comma-separated, required)\\n' +\n\t\t\t\t\t' -c, --convert Convert the subtask to a standalone task instead of deleting it\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' --skip-generate Skip regenerating task files\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master remove-subtask --id=5.2\\n' +\n\t\t\t\t\t' task-master remove-subtask --id=5.2,6.3,7.1\\n' +\n\t\t\t\t\t' task-master remove-subtask --id=5.2 --convert',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show tags command help\n\tfunction showTagsHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Tags Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master tags [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' --show-metadata Show detailed metadata for each tag\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master tags\\n' +\n\t\t\t\t\t' task-master tags --show-metadata\\n\\n' +\n\t\t\t\t\tchalk.cyan('Related Commands:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master add-tag <name> Create a new tag\\n' +\n\t\t\t\t\t' task-master use-tag <name> Switch to a tag\\n' +\n\t\t\t\t\t' task-master delete-tag <name> Delete a tag',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show add-tag command help\n\tfunction showAddTagHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Add Tag Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master add-tag <tagName> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' --copy-from-current Copy tasks from the current tag to the new tag\\n' +\n\t\t\t\t\t' --copy-from <tag> Copy tasks from the specified tag to the new tag\\n' +\n\t\t\t\t\t' -d, --description <text> Optional description for the tag\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz --copy-from-current\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz --copy-from master\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz -d \"Feature XYZ development\"',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show delete-tag command help\n\tfunction showDeleteTagHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Delete Tag Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master delete-tag <tagName> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' -y, --yes Skip confirmation prompts\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master delete-tag feature-xyz\\n' +\n\t\t\t\t\t' task-master delete-tag feature-xyz --yes\\n\\n' +\n\t\t\t\t\tchalk.yellow('Warning:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' This will permanently delete the tag and all its tasks!',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show use-tag command help\n\tfunction showUseTagHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Use Tag Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master use-tag <tagName> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master use-tag feature-xyz\\n' +\n\t\t\t\t\t' task-master use-tag master\\n\\n' +\n\t\t\t\t\tchalk.cyan('Related Commands:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master tags List all available tags\\n' +\n\t\t\t\t\t' task-master add-tag <name> Create a new tag',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show research command help\n\tfunction showResearchHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Research Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master research \"<query>\" [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Required:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' <query> Research question or prompt (required)\\n\\n' +\n\t\t\t\t\tchalk.cyan('Context Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -i, --id <ids> Comma-separated task/subtask IDs for context (e.g., \"15,23.2\")\\n' +\n\t\t\t\t\t' -f, --files <paths> Comma-separated file paths for context\\n' +\n\t\t\t\t\t' -c, --context <text> Additional custom context text\\n' +\n\t\t\t\t\t' --tree Include project file tree structure\\n\\n' +\n\t\t\t\t\tchalk.cyan('Output Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -d, --detail <level> Detail level: low, medium, high (default: medium)\\n' +\n\t\t\t\t\t' --save-to <id> Auto-save results to task/subtask ID (e.g., \"15\" or \"15.2\")\\n' +\n\t\t\t\t\t' --tag <tag> Specify tag context for task operations\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master research \"How should I implement user authentication?\"\\n' +\n\t\t\t\t\t' task-master research \"What\\'s the best approach?\" --id=15,23.2\\n' +\n\t\t\t\t\t' task-master research \"How does auth work?\" --files=src/auth.js --tree\\n' +\n\t\t\t\t\t' task-master research \"Implementation steps?\" --save-to=15.2 --detail=high',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// remove-task command\n\tprogramInstance\n\t\t.command('remove-task')\n\t\t.description('Remove one or more tasks or subtasks permanently')\n\t\t.option(\n\t\t\t'-i, --id <ids>',\n\t\t\t'ID(s) of the task(s) or subtask(s) to remove (e.g., \"5\", \"5.2\", or \"5,6.1,7\")'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-y, --yes', 'Skip confirmation prompt', false)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst taskIdsString = options.id;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskIdsString) {\n\t\t\t\tconsole.error(chalk.red('Error: Task ID(s) are required'));\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'Usage: task-master remove-task --id=<taskId1,taskId2...>'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconst taskIdsToRemove = taskIdsString\n\t\t\t\t.split(',')\n\t\t\t\t.map((id) => id.trim())\n\t\t\t\t.filter(Boolean);\n\n\t\t\tif (taskIdsToRemove.length === 0) {\n\t\t\t\tconsole.error(chalk.red('Error: No valid task IDs provided.'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Read data once for checks and confirmation\n\t\t\t\tconst data = readJSON(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t);\n\t\t\t\tif (!data || !data.tasks) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: No valid tasks found in ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst existingTasksToRemove = [];\n\t\t\t\tconst nonExistentIds = [];\n\t\t\t\tlet totalSubtasksToDelete = 0;\n\t\t\t\tconst dependentTaskMessages = [];\n\n\t\t\t\tfor (const taskId of taskIdsToRemove) {\n\t\t\t\t\tif (!taskExists(data.tasks, taskId)) {\n\t\t\t\t\t\tnonExistentIds.push(taskId);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Correctly extract the task object from the result of findTaskById\n\t\t\t\t\t\tconst findResult = findTaskById(data.tasks, taskId);\n\t\t\t\t\t\tconst taskObject = findResult.task; // Get the actual task/subtask object\n\n\t\t\t\t\t\tif (taskObject) {\n\t\t\t\t\t\t\texistingTasksToRemove.push({ id: taskId, task: taskObject }); // Push the actual task object\n\n\t\t\t\t\t\t\t// If it's a main task, count its subtasks and check dependents\n\t\t\t\t\t\t\tif (!taskObject.isSubtask) {\n\t\t\t\t\t\t\t\t// Check the actual task object\n\t\t\t\t\t\t\t\tif (taskObject.subtasks && taskObject.subtasks.length > 0) {\n\t\t\t\t\t\t\t\t\ttotalSubtasksToDelete += taskObject.subtasks.length;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tconst dependentTasks = data.tasks.filter(\n\t\t\t\t\t\t\t\t\t(t) =>\n\t\t\t\t\t\t\t\t\t\tt.dependencies &&\n\t\t\t\t\t\t\t\t\t\tt.dependencies.includes(parseInt(taskId, 10))\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tif (dependentTasks.length > 0) {\n\t\t\t\t\t\t\t\t\tdependentTaskMessages.push(\n\t\t\t\t\t\t\t\t\t\t` - Task ${taskId}: ${dependentTasks.length} dependent tasks (${dependentTasks.map((t) => t.id).join(', ')})`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Handle case where findTaskById returned null for the task property (should be rare)\n\t\t\t\t\t\t\tnonExistentIds.push(`${taskId} (error finding details)`);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (nonExistentIds.length > 0) {\n\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Warning: The following task IDs were not found: ${nonExistentIds.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tif (existingTasksToRemove.length === 0) {\n\t\t\t\t\tconsole.log(chalk.blue('No existing tasks found to remove.'));\n\t\t\t\t\tprocess.exit(0);\n\t\t\t\t}\n\n\t\t\t\t// Skip confirmation if --yes flag is provided\n\t\t\t\tif (!options.yes) {\n\t\t\t\t\tconsole.log();\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.red.bold(\n\t\t\t\t\t\t\t`⚠️ WARNING: This will permanently delete the following ${existingTasksToRemove.length} item(s):`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log();\n\n\t\t\t\t\texistingTasksToRemove.forEach(({ id, task }) => {\n\t\t\t\t\t\tif (!task) return; // Should not happen due to taskExists check, but safeguard\n\t\t\t\t\t\tif (task.isSubtask) {\n\t\t\t\t\t\t\t// Subtask - title is directly on the task object\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.white(` Subtask ${id}: ${task.title || '(no title)'}`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Optionally show parent context if available\n\t\t\t\t\t\t\tif (task.parentTask) {\n\t\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\t\tchalk.gray(\n\t\t\t\t\t\t\t\t\t\t` (Parent: ${task.parentTask.id} - ${task.parentTask.title || '(no title)'})`\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Main task - title is directly on the task object\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.white.bold(` Task ${id}: ${task.title || '(no title)'}`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\n\t\t\t\t\tif (totalSubtasksToDelete > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`⚠️ This will also delete ${totalSubtasksToDelete} subtasks associated with the selected main tasks!`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\tif (dependentTaskMessages.length > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'⚠️ Warning: Dependencies on the following tasks will be removed:'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tdependentTaskMessages.forEach((msg) =>\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(msg))\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\tconsole.log();\n\n\t\t\t\t\tconst { confirm } = await inquirer.prompt([\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: 'confirm',\n\t\t\t\t\t\t\tname: 'confirm',\n\t\t\t\t\t\t\tmessage: chalk.red.bold(\n\t\t\t\t\t\t\t\t`Are you sure you want to permanently delete these ${existingTasksToRemove.length} item(s)?`\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\tdefault: false\n\t\t\t\t\t\t}\n\t\t\t\t\t]);\n\n\t\t\t\t\tif (!confirm) {\n\t\t\t\t\t\tconsole.log(chalk.blue('Task deletion cancelled.'));\n\t\t\t\t\t\tprocess.exit(0);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tconst indicator = startLoadingIndicator(\n\t\t\t\t\t`Removing ${existingTasksToRemove.length} task(s)/subtask(s)...`\n\t\t\t\t);\n\n\t\t\t\t// Use the string of existing IDs for the core function\n\t\t\t\tconst existingIdsString = existingTasksToRemove\n\t\t\t\t\t.map(({ id }) => id)\n\t\t\t\t\t.join(',');\n\t\t\t\tconst result = await removeTask(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\texistingIdsString,\n\t\t\t\t\t{\n\t\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\t\ttag\n\t\t\t\t\t}\n\t\t\t\t);\n\n\t\t\t\tstopLoadingIndicator(indicator);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t`Successfully removed ${result.removedTasks.length} task(s)/subtask(s).`\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t(result.message ? `\\n\\nDetails:\\n${result.message}` : '') +\n\t\t\t\t\t\t\t\t(result.error\n\t\t\t\t\t\t\t\t\t? `\\n\\nWarnings:\\n${chalk.yellow(result.error)}`\n\t\t\t\t\t\t\t\t\t: ''),\n\t\t\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`Operation completed with errors. Removed ${result.removedTasks.length} task(s)/subtask(s).`\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t(result.message ? `\\n\\nDetails:\\n${result.message}` : '') +\n\t\t\t\t\t\t\t\t(result.error ? `\\n\\nErrors:\\n${chalk.red(result.error)}` : ''),\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1); // Exit with error code if any part failed\n\t\t\t\t}\n\n\t\t\t\t// Log any initially non-existent IDs again for clarity\n\t\t\t\tif (nonExistentIds.length > 0) {\n\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Note: The following IDs were not found initially and were skipped: ${nonExistentIds.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Exit with error if any removals failed\n\t\t\t\t\tif (result.removedTasks.length === 0) {\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(`Error: ${error.message || 'An unknown error occurred'}`)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// init command (Directly calls the implementation from init.js)\n\tprogramInstance\n\t\t.command('init')\n\t\t.description('Initialize a new project with Task Master structure')\n\t\t.option('-y, --yes', 'Skip prompts and use default values')\n\t\t.option('-n, --name <name>', 'Project name')\n\t\t.option('-d, --description <description>', 'Project description')\n\t\t.option('-v, --version <version>', 'Project version', '0.1.0') // Set default here\n\t\t.option('-a, --author <author>', 'Author name')\n\t\t.option(\n\t\t\t'-r, --rules <rules...>',\n\t\t\t'List of rules to add (roo, windsurf, cursor, ...). Accepts comma or space separated values.'\n\t\t)\n\t\t.option('--skip-install', 'Skip installing dependencies')\n\t\t.option('--dry-run', 'Show what would be done without making changes')\n\t\t.option('--aliases', 'Add shell aliases (tm, taskmaster)')\n\t\t.option('--no-aliases', 'Skip shell aliases (tm, taskmaster)')\n\t\t.option('--git', 'Initialize Git repository')\n\t\t.option('--no-git', 'Skip Git repository initialization')\n\t\t.option('--git-tasks', 'Store tasks in Git')\n\t\t.option('--no-git-tasks', 'No Git storage of tasks')\n\t\t.action(async (cmdOptions) => {\n\t\t\t// cmdOptions contains parsed arguments\n\t\t\t// Parse rules: accept space or comma separated, default to all available rules\n\t\t\tlet selectedProfiles = RULE_PROFILES;\n\t\t\tlet rulesExplicitlyProvided = false;\n\n\t\t\tif (cmdOptions.rules && Array.isArray(cmdOptions.rules)) {\n\t\t\t\tconst userSpecifiedProfiles = cmdOptions.rules\n\t\t\t\t\t.flatMap((r) => r.split(','))\n\t\t\t\t\t.map((r) => r.trim())\n\t\t\t\t\t.filter(Boolean);\n\t\t\t\t// Only override defaults if user specified valid rules\n\t\t\t\tif (userSpecifiedProfiles.length > 0) {\n\t\t\t\t\tselectedProfiles = userSpecifiedProfiles;\n\t\t\t\t\trulesExplicitlyProvided = true;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcmdOptions.rules = selectedProfiles;\n\t\t\tcmdOptions.rulesExplicitlyProvided = rulesExplicitlyProvided;\n\n\t\t\ttry {\n\t\t\t\t// Directly call the initializeProject function, passing the parsed options\n\t\t\t\tawait initializeProject(cmdOptions);\n\t\t\t\t// initializeProject handles its own flow, including potential process.exit()\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(`Error during initialization: ${error.message}`)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// models command\n\tprogramInstance\n\t\t.command('models')\n\t\t.description('Manage AI model configurations')\n\t\t.option(\n\t\t\t'--set-main <model_id>',\n\t\t\t'Set the primary model for task generation/updates'\n\t\t)\n\t\t.option(\n\t\t\t'--set-research <model_id>',\n\t\t\t'Set the model for research-backed operations'\n\t\t)\n\t\t.option(\n\t\t\t'--set-fallback <model_id>',\n\t\t\t'Set the model to use if the primary fails'\n\t\t)\n\t\t.option('--setup', 'Run interactive setup to configure models')\n\t\t.option(\n\t\t\t'--openrouter',\n\t\t\t'Allow setting a custom OpenRouter model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--ollama',\n\t\t\t'Allow setting a custom Ollama model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--bedrock',\n\t\t\t'Allow setting a custom Bedrock model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--claude-code',\n\t\t\t'Allow setting a Claude Code model ID (use with --set-*)'\n\t\t)\n\t\t.option(\n\t\t\t'--azure',\n\t\t\t'Allow setting a custom Azure OpenAI model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--vertex',\n\t\t\t'Allow setting a custom Vertex AI model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--gemini-cli',\n\t\t\t'Allow setting a Gemini CLI model ID (use with --set-*)'\n\t\t)\n\t\t.addHelpText(\n\t\t\t'after',\n\t\t\t`\nExamples:\n $ task-master models # View current configuration\n $ task-master models --set-main gpt-4o # Set main model (provider inferred)\n $ task-master models --set-research sonar-pro # Set research model\n $ task-master models --set-fallback claude-3-5-sonnet-20241022 # Set fallback\n $ task-master models --set-main my-custom-model --ollama # Set custom Ollama model for main role\n $ task-master models --set-main anthropic.claude-3-sonnet-20240229-v1:0 --bedrock # Set custom Bedrock model for main role\n $ task-master models --set-main some/other-model --openrouter # Set custom OpenRouter model for main role\n $ task-master models --set-main sonnet --claude-code # Set Claude Code model for main role\n $ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role\n $ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role\n $ task-master models --set-main gemini-2.5-pro --gemini-cli # Set Gemini CLI model for main role\n $ task-master models --setup # Run interactive setup`\n\t\t)\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || false\n\t\t\t});\n\n\t\t\tconst projectRoot = taskMaster.getProjectRoot();\n\n\t\t\t// Validate flags: cannot use multiple provider flags simultaneously\n\t\t\tconst providerFlags = [\n\t\t\t\toptions.openrouter,\n\t\t\t\toptions.ollama,\n\t\t\t\toptions.bedrock,\n\t\t\t\toptions.claudeCode,\n\t\t\t\toptions.geminiCli\n\t\t\t].filter(Boolean).length;\n\t\t\tif (providerFlags > 1) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock, --claude-code, --gemini-cli) simultaneously.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Determine the primary action based on flags\n\t\t\tconst isSetup = options.setup;\n\t\t\tconst isSetOperation =\n\t\t\t\toptions.setMain || options.setResearch || options.setFallback;\n\n\t\t\t// --- Execute Action ---\n\n\t\t\tif (isSetup) {\n\t\t\t\t// Action 1: Run Interactive Setup\n\t\t\t\tconsole.log(chalk.blue('Starting interactive model setup...')); // Added feedback\n\t\t\t\ttry {\n\t\t\t\t\tawait runInteractiveSetup(taskMaster.getProjectRoot());\n\t\t\t\t\t// runInteractiveSetup logs its own completion/error messages\n\t\t\t\t} catch (setupError) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('\\\\nInteractive setup failed unexpectedly:'),\n\t\t\t\t\t\tsetupError.message\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\t// --- IMPORTANT: Exit after setup ---\n\t\t\t\treturn; // Stop execution here\n\t\t\t}\n\n\t\t\tif (isSetOperation) {\n\t\t\t\t// Action 2: Perform Direct Set Operations\n\t\t\t\tlet updateOccurred = false; // Track if any update actually happened\n\n\t\t\t\tif (options.setMain) {\n\t\t\t\t\tconst result = await setModel('main', options.setMain, {\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tproviderHint: options.openrouter\n\t\t\t\t\t\t\t? 'openrouter'\n\t\t\t\t\t\t\t: options.ollama\n\t\t\t\t\t\t\t\t? 'ollama'\n\t\t\t\t\t\t\t\t: options.bedrock\n\t\t\t\t\t\t\t\t\t? 'bedrock'\n\t\t\t\t\t\t\t\t\t: options.claudeCode\n\t\t\t\t\t\t\t\t\t\t? 'claude-code'\n\t\t\t\t\t\t\t\t\t\t: options.geminiCli\n\t\t\t\t\t\t\t\t\t\t\t? 'gemini-cli'\n\t\t\t\t\t\t\t\t\t\t\t: undefined\n\t\t\t\t\t});\n\t\t\t\t\tif (result.success) {\n\t\t\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t\t\t\tif (result.data.warning)\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t\tupdateOccurred = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(`❌ Error setting main model: ${result.error.message}`)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (options.setResearch) {\n\t\t\t\t\tconst result = await setModel('research', options.setResearch, {\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tproviderHint: options.openrouter\n\t\t\t\t\t\t\t? 'openrouter'\n\t\t\t\t\t\t\t: options.ollama\n\t\t\t\t\t\t\t\t? 'ollama'\n\t\t\t\t\t\t\t\t: options.bedrock\n\t\t\t\t\t\t\t\t\t? 'bedrock'\n\t\t\t\t\t\t\t\t\t: options.claudeCode\n\t\t\t\t\t\t\t\t\t\t? 'claude-code'\n\t\t\t\t\t\t\t\t\t\t: options.geminiCli\n\t\t\t\t\t\t\t\t\t\t\t? 'gemini-cli'\n\t\t\t\t\t\t\t\t\t\t\t: undefined\n\t\t\t\t\t});\n\t\t\t\t\tif (result.success) {\n\t\t\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t\t\t\tif (result.data.warning)\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t\tupdateOccurred = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`❌ Error setting research model: ${result.error.message}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (options.setFallback) {\n\t\t\t\t\tconst result = await setModel('fallback', options.setFallback, {\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tproviderHint: options.openrouter\n\t\t\t\t\t\t\t? 'openrouter'\n\t\t\t\t\t\t\t: options.ollama\n\t\t\t\t\t\t\t\t? 'ollama'\n\t\t\t\t\t\t\t\t: options.bedrock\n\t\t\t\t\t\t\t\t\t? 'bedrock'\n\t\t\t\t\t\t\t\t\t: options.claudeCode\n\t\t\t\t\t\t\t\t\t\t? 'claude-code'\n\t\t\t\t\t\t\t\t\t\t: options.geminiCli\n\t\t\t\t\t\t\t\t\t\t\t? 'gemini-cli'\n\t\t\t\t\t\t\t\t\t\t\t: undefined\n\t\t\t\t\t});\n\t\t\t\t\tif (result.success) {\n\t\t\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t\t\t\tif (result.data.warning)\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t\tupdateOccurred = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`❌ Error setting fallback model: ${result.error.message}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Optional: Add a final confirmation if any update occurred\n\t\t\t\tif (updateOccurred) {\n\t\t\t\t\tconsole.log(chalk.blue('\\nModel configuration updated.'));\n\t\t\t\t} else {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nNo model configuration changes were made (or errors occurred).'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// --- IMPORTANT: Exit after set operations ---\n\t\t\t\treturn; // Stop execution here\n\t\t\t}\n\n\t\t\t// Action 3: Display Full Status (Only runs if no setup and no set flags)\n\t\t\tconsole.log(chalk.blue('Fetching current model configuration...')); // Added feedback\n\t\t\tconst configResult = await getModelConfiguration({ projectRoot });\n\t\t\tconst availableResult = await getAvailableModelsList({ projectRoot });\n\t\t\tconst apiKeyStatusResult = await getApiKeyStatusReport({ projectRoot });\n\n\t\t\t// 1. Display Active Models\n\t\t\tif (!configResult.success) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`❌ Error fetching configuration: ${configResult.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tdisplayModelConfiguration(\n\t\t\t\t\tconfigResult.data,\n\t\t\t\t\tavailableResult.data?.models || []\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// 2. Display API Key Status\n\t\t\tif (apiKeyStatusResult.success) {\n\t\t\t\tdisplayApiKeyStatus(apiKeyStatusResult.data.report);\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`⚠️ Warning: Could not display API Key status: ${apiKeyStatusResult.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// 3. Display Other Available Models (Filtered)\n\t\t\tif (availableResult.success) {\n\t\t\t\tconst activeIds = configResult.success\n\t\t\t\t\t? [\n\t\t\t\t\t\t\tconfigResult.data.activeModels.main.modelId,\n\t\t\t\t\t\t\tconfigResult.data.activeModels.research.modelId,\n\t\t\t\t\t\t\tconfigResult.data.activeModels.fallback?.modelId\n\t\t\t\t\t\t].filter(Boolean)\n\t\t\t\t\t: [];\n\t\t\t\tconst displayableAvailable = availableResult.data.models.filter(\n\t\t\t\t\t(m) => !activeIds.includes(m.modelId) && !m.modelId.startsWith('[')\n\t\t\t\t);\n\t\t\t\tdisplayAvailableModels(displayableAvailable);\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`⚠️ Warning: Could not display available models: ${availableResult.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// 4. Conditional Hint if Config File is Missing\n\t\t\tconst configExists = isConfigFilePresent(projectRoot);\n\t\t\tif (!configExists) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\"\\\\nHint: Run 'task-master models --setup' to create or update your configuration.\"\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\t// --- IMPORTANT: Exit after displaying status ---\n\t\t\treturn; // Stop execution here\n\t\t});\n\n\t// response-language command\n\tprogramInstance\n\t\t.command('lang')\n\t\t.description('Manage response language settings')\n\t\t.option('--response <response_language>', 'Set the response language')\n\t\t.option('--setup', 'Run interactive setup to configure response language')\n\t\t.action(async (options) => {\n\t\t\tconst taskMaster = initTaskMaster({});\n\t\t\tconst projectRoot = taskMaster.getProjectRoot(); // Find project root for context\n\t\t\tconst { response, setup } = options;\n\t\t\tlet responseLanguage = response !== undefined ? response : 'English';\n\t\t\tif (setup) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue('Starting interactive response language setup...')\n\t\t\t\t);\n\t\t\t\ttry {\n\t\t\t\t\tconst userResponse = await inquirer.prompt([\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: 'input',\n\t\t\t\t\t\t\tname: 'responseLanguage',\n\t\t\t\t\t\t\tmessage: 'Input your preferred response language',\n\t\t\t\t\t\t\tdefault: 'English'\n\t\t\t\t\t\t}\n\t\t\t\t\t]);\n\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t'Response language set to:',\n\t\t\t\t\t\t\tuserResponse.responseLanguage\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tresponseLanguage = userResponse.responseLanguage;\n\t\t\t\t} catch (setupError) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('\\\\nInteractive setup failed unexpectedly:'),\n\t\t\t\t\t\tsetupError.message\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst result = setResponseLanguage(responseLanguage, {\n\t\t\t\tprojectRoot\n\t\t\t});\n\n\t\t\tif (result.success) {\n\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`❌ Error setting response language: ${result.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// move-task command\n\tprogramInstance\n\t\t.command('move')\n\t\t.description('Move a task or subtask to a new position')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'--from <id>',\n\t\t\t'ID of the task/subtask to move (e.g., \"5\" or \"5.2\"). Can be comma-separated to move multiple tasks (e.g., \"5,6,7\")'\n\t\t)\n\t\t.option(\n\t\t\t'--to <id>',\n\t\t\t'ID of the destination (e.g., \"7\" or \"7.3\"). Must match the number of source IDs if comma-separated'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst sourceId = options.from;\n\t\t\tconst destinationId = options.to;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tif (!sourceId || !destinationId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Both --from and --to parameters are required')\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'Usage: task-master move --from=<sourceId> --to=<destinationId>'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Check if we're moving multiple tasks (comma-separated IDs)\n\t\t\tconst sourceIds = sourceId.split(',').map((id) => id.trim());\n\t\t\tconst destinationIds = destinationId.split(',').map((id) => id.trim());\n\n\t\t\t// Validate that the number of source and destination IDs match\n\t\t\tif (sourceIds.length !== destinationIds.length) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: The number of source and destination IDs must match'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow('Example: task-master move --from=5,6,7 --to=10,11,12')\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// If moving multiple tasks\n\t\t\tif (sourceIds.length > 1) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t`Moving multiple tasks: ${sourceIds.join(', ')} to ${destinationIds.join(', ')}...`\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\ttry {\n\t\t\t\t\t// Read tasks data once to validate destination IDs\n\t\t\t\t\tconst tasksData = readJSON(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\t\ttag\n\t\t\t\t\t);\n\t\t\t\t\tif (!tasksData || !tasksData.tasks) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(`Error: Invalid or missing tasks file at ${tasksPath}`)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Move tasks one by one\n\t\t\t\t\tfor (let i = 0; i < sourceIds.length; i++) {\n\t\t\t\t\t\tconst fromId = sourceIds[i];\n\t\t\t\t\t\tconst toId = destinationIds[i];\n\n\t\t\t\t\t\t// Skip if source and destination are the same\n\t\t\t\t\t\tif (fromId === toId) {\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.yellow(`Skipping ${fromId} -> ${toId} (same ID)`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue(`Moving task/subtask ${fromId} to ${toId}...`)\n\t\t\t\t\t\t);\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tawait moveTask(\n\t\t\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\t\t\tfromId,\n\t\t\t\t\t\t\t\ttoId,\n\t\t\t\t\t\t\t\ti === sourceIds.length - 1,\n\t\t\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t\t`✓ Successfully moved task/subtask ${fromId} to ${toId}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t\tchalk.red(`Error moving ${fromId} to ${toId}: ${error.message}`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Continue with the next task rather than exiting\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Moving a single task (existing logic)\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Moving task/subtask ${sourceId} to ${destinationId}...`)\n\t\t\t\t);\n\n\t\t\t\ttry {\n\t\t\t\t\tconst result = await moveTask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tsourceId,\n\t\t\t\t\t\tdestinationId,\n\t\t\t\t\t\ttrue,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`✓ Successfully moved task/subtask ${sourceId} to ${destinationId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t// Add/remove profile rules command\n\tprogramInstance\n\t\t.command('rules [action] [profiles...]')\n\t\t.description(\n\t\t\t`Add or remove rules for one or more profiles. Valid actions: ${Object.values(RULES_ACTIONS).join(', ')} (e.g., task-master rules ${RULES_ACTIONS.ADD} windsurf roo)`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --force',\n\t\t\t'Skip confirmation prompt when removing rules (dangerous)'\n\t\t)\n\t\t.option(\n\t\t\t`--${RULES_SETUP_ACTION}`,\n\t\t\t'Run interactive setup to select rule profiles to add'\n\t\t)\n\t\t.addHelpText(\n\t\t\t'after',\n\t\t\t`\n\t\tExamples:\n\t\t$ task-master rules ${RULES_ACTIONS.ADD} windsurf roo # Add Windsurf and Roo rule sets\n\t\t$ task-master rules ${RULES_ACTIONS.REMOVE} windsurf # Remove Windsurf rule set\n\t\t$ task-master rules --${RULES_SETUP_ACTION} # Interactive setup to select rule profiles`\n\t\t)\n\t\t.action(async (action, profiles, options) => {\n\t\t\tconst taskMaster = initTaskMaster({});\n\t\t\tconst projectRoot = taskMaster.getProjectRoot();\n\t\t\tif (!projectRoot) {\n\t\t\t\tconsole.error(chalk.red('Error: Could not find project root.'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t/**\n\t\t\t * 'task-master rules --setup' action:\n\t\t\t *\n\t\t\t * Launches an interactive prompt to select which rule profiles to add to the current project.\n\t\t\t * This does NOT perform project initialization or ask about shell aliases—only rules selection.\n\t\t\t *\n\t\t\t * Example usage:\n\t\t\t * $ task-master rules --setup\n\t\t\t *\n\t\t\t * Useful for adding rules after project creation.\n\t\t\t *\n\t\t\t * The list of profiles is always up-to-date with the available profiles.\n\t\t\t */\n\t\t\tif (options[RULES_SETUP_ACTION]) {\n\t\t\t\t// Run interactive rules setup ONLY (no project init)\n\t\t\t\tconst selectedRuleProfiles = await runInteractiveProfilesSetup();\n\n\t\t\t\tif (!selectedRuleProfiles || selectedRuleProfiles.length === 0) {\n\t\t\t\t\tconsole.log(chalk.yellow('No profiles selected. Exiting.'));\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t`Installing ${selectedRuleProfiles.length} selected profile(s)...`\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\tfor (let i = 0; i < selectedRuleProfiles.length; i++) {\n\t\t\t\t\tconst profile = selectedRuleProfiles[i];\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`Processing profile ${i + 1}/${selectedRuleProfiles.length}: ${profile}...`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t\t`Rule profile for \"${profile}\" not found. Valid profiles: ${RULE_PROFILES.join(', ')}. Skipping.`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\n\t\t\t\t\tconst addResult = convertAllRulesToProfileRules(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tprofileConfig\n\t\t\t\t\t);\n\n\t\t\t\t\tconsole.log(chalk.green(generateProfileSummary(profile, addResult)));\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`\\nCompleted installation of all ${selectedRuleProfiles.length} profile(s).`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Validate action for non-setup mode\n\t\t\tif (!action || !isValidRulesAction(action)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Invalid or missing action '${action || 'none'}'. Valid actions are: ${Object.values(RULES_ACTIONS).join(', ')}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`For interactive setup, use: task-master rules --${RULES_SETUP_ACTION}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (!profiles || profiles.length === 0) {\n\t\t\t\tconsole.error(\n\t\t\t\t\t'Please specify at least one rule profile (e.g., windsurf, roo).'\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Support both space- and comma-separated profile lists\n\t\t\tconst expandedProfiles = profiles\n\t\t\t\t.flatMap((b) => b.split(',').map((s) => s.trim()))\n\t\t\t\t.filter(Boolean);\n\n\t\t\tif (action === RULES_ACTIONS.REMOVE) {\n\t\t\t\tlet confirmed = true;\n\t\t\t\tif (!options.force) {\n\t\t\t\t\t// Check if this removal would leave no profiles remaining\n\t\t\t\t\tif (wouldRemovalLeaveNoProfiles(projectRoot, expandedProfiles)) {\n\t\t\t\t\t\tconst installedProfiles = getInstalledProfiles(projectRoot);\n\t\t\t\t\t\tconfirmed = await confirmRemoveAllRemainingProfiles(\n\t\t\t\t\t\t\texpandedProfiles,\n\t\t\t\t\t\t\tinstalledProfiles\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconfirmed = await confirmProfilesRemove(expandedProfiles);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (!confirmed) {\n\t\t\t\t\tconsole.log(chalk.yellow('Aborted: No rules were removed.'));\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst removalResults = [];\n\t\t\tconst addResults = [];\n\n\t\t\tfor (const profile of expandedProfiles) {\n\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t`Rule profile for \"${profile}\" not found. Valid profiles: ${RULE_PROFILES.join(', ')}. Skipping.`\n\t\t\t\t\t);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\n\t\t\t\tif (action === RULES_ACTIONS.ADD) {\n\t\t\t\t\tconsole.log(chalk.blue(`Adding rules for profile: ${profile}...`));\n\t\t\t\t\tconst addResult = convertAllRulesToProfileRules(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tprofileConfig\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(`Completed adding rules for profile: ${profile}`)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Store result with profile name for summary\n\t\t\t\t\taddResults.push({\n\t\t\t\t\t\tprofileName: profile,\n\t\t\t\t\t\tsuccess: addResult.success,\n\t\t\t\t\t\tfailed: addResult.failed\n\t\t\t\t\t});\n\n\t\t\t\t\tconsole.log(chalk.green(generateProfileSummary(profile, addResult)));\n\t\t\t\t} else if (action === RULES_ACTIONS.REMOVE) {\n\t\t\t\t\tconsole.log(chalk.blue(`Removing rules for profile: ${profile}...`));\n\t\t\t\t\tconst result = removeProfileRules(projectRoot, profileConfig);\n\t\t\t\t\tremovalResults.push(result);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(generateProfileRemovalSummary(profile, result))\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t`Unknown action. Use \"${RULES_ACTIONS.ADD}\" or \"${RULES_ACTIONS.REMOVE}\".`\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Print summary for additions\n\t\t\tif (action === RULES_ACTIONS.ADD && addResults.length > 0) {\n\t\t\t\tconst { allSuccessfulProfiles, totalSuccess, totalFailed } =\n\t\t\t\t\tcategorizeProfileResults(addResults);\n\n\t\t\t\tif (allSuccessfulProfiles.length > 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`\\nSuccessfully processed profiles: ${allSuccessfulProfiles.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Create a descriptive summary\n\t\t\t\t\tif (totalSuccess > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t`Total: ${totalSuccess} files processed, ${totalFailed} failed.`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t`Total: ${allSuccessfulProfiles.length} profile(s) set up successfully.`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Print summary for removals\n\t\t\tif (action === RULES_ACTIONS.REMOVE && removalResults.length > 0) {\n\t\t\t\tconst {\n\t\t\t\t\tsuccessfulRemovals,\n\t\t\t\t\tskippedRemovals,\n\t\t\t\t\tfailedRemovals,\n\t\t\t\t\tremovalsWithNotices\n\t\t\t\t} = categorizeRemovalResults(removalResults);\n\n\t\t\t\tif (successfulRemovals.length > 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`\\nSuccessfully removed profiles for: ${successfulRemovals.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tif (skippedRemovals.length > 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Skipped (default or protected): ${skippedRemovals.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tif (failedRemovals.length > 0) {\n\t\t\t\t\tconsole.log(chalk.red('\\nErrors occurred:'));\n\t\t\t\t\tfailedRemovals.forEach((r) => {\n\t\t\t\t\t\tconsole.log(chalk.red(` ${r.profileName}: ${r.error}`));\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\t// Display notices about preserved files/configurations\n\t\t\t\tif (removalsWithNotices.length > 0) {\n\t\t\t\t\tconsole.log(chalk.cyan('\\nNotices:'));\n\t\t\t\t\tremovalsWithNotices.forEach((r) => {\n\t\t\t\t\t\tconsole.log(chalk.cyan(` ${r.profileName}: ${r.notice}`));\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Overall summary\n\t\t\t\tconst totalProcessed = removalResults.length;\n\t\t\t\tconst totalSuccessful = successfulRemovals.length;\n\t\t\t\tconst totalSkipped = skippedRemovals.length;\n\t\t\t\tconst totalFailed = failedRemovals.length;\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t`\\nTotal: ${totalProcessed} profile(s) processed - ${totalSuccessful} removed, ${totalSkipped} skipped, ${totalFailed} failed.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\n\tprogramInstance\n\t\t.command('migrate')\n\t\t.description(\n\t\t\t'Migrate existing project to use the new .taskmaster directory structure'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --force',\n\t\t\t'Force migration even if .taskmaster directory already exists'\n\t\t)\n\t\t.option(\n\t\t\t'--backup',\n\t\t\t'Create backup of old files before migration (default: false)',\n\t\t\tfalse\n\t\t)\n\t\t.option(\n\t\t\t'--cleanup',\n\t\t\t'Remove old files after successful migration (default: true)',\n\t\t\ttrue\n\t\t)\n\t\t.option('-y, --yes', 'Skip confirmation prompts')\n\t\t.option(\n\t\t\t'--dry-run',\n\t\t\t'Show what would be migrated without actually moving files'\n\t\t)\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\tawait migrateProject(options);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red('Error during migration:'), error.message);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// sync-readme command\n\tprogramInstance\n\t\t.command('sync-readme')\n\t\t.description('Sync the current task list to README.md in the project root')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--with-subtasks', 'Include subtasks in the README output')\n\t\t.option(\n\t\t\t'-s, --status <status>',\n\t\t\t'Show only tasks matching this status (e.g., pending, done)'\n\t\t)\n\t\t.option('-t, --tag <tag>', 'Tag to use for the task list (default: master)')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst withSubtasks = options.withSubtasks || false;\n\t\t\tconst status = options.status || null;\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`📝 Syncing tasks to README.md${withSubtasks ? ' (with subtasks)' : ''}${status ? ` (status: ${status})` : ''}...`\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tconst success = await syncTasksToReadme(taskMaster.getProjectRoot(), {\n\t\t\t\twithSubtasks,\n\t\t\t\tstatus,\n\t\t\t\ttasksPath: taskMaster.getTasksPath(),\n\t\t\t\ttag\n\t\t\t});\n\n\t\t\tif (!success) {\n\t\t\t\tconsole.error(chalk.red('❌ Failed to sync tasks to README.md'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// ===== TAG MANAGEMENT COMMANDS =====\n\n\t// add-tag command\n\tprogramInstance\n\t\t.command('add-tag')\n\t\t.description('Create a new tag context for organizing tasks')\n\t\t.argument(\n\t\t\t'[tagName]',\n\t\t\t'Name of the new tag to create (optional when using --from-branch)'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'--copy-from-current',\n\t\t\t'Copy tasks from the current tag to the new tag'\n\t\t)\n\t\t.option(\n\t\t\t'--copy-from <tag>',\n\t\t\t'Copy tasks from the specified tag to the new tag'\n\t\t)\n\t\t.option(\n\t\t\t'--from-branch',\n\t\t\t'Create tag name from current git branch (ignores tagName argument)'\n\t\t)\n\t\t.option('-d, --description <text>', 'Optional description for the tag')\n\t\t.action(async (tagName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Hint: Run task-master init or task-master parse-prd to create tasks.json first'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\t// Validate that either tagName is provided or --from-branch is used\n\t\t\t\tif (!tagName && !options.fromBranch) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: Either tagName argument or --from-branch option is required.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(chalk.yellow('Usage examples:'));\n\t\t\t\t\tconsole.log(chalk.cyan(' task-master add-tag my-tag'));\n\t\t\t\t\tconsole.log(chalk.cyan(' task-master add-tag --from-branch'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'add-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\t// Handle --from-branch option\n\t\t\t\tif (options.fromBranch) {\n\t\t\t\t\tconst { createTagFromBranch } = await import(\n\t\t\t\t\t\t'./task-manager/tag-management.js'\n\t\t\t\t\t);\n\t\t\t\t\tconst gitUtils = await import('./utils/git-utils.js');\n\n\t\t\t\t\t// Check if we're in a git repository\n\t\t\t\t\tif (!(await gitUtils.isGitRepository(projectRoot))) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t'Error: Not in a git repository. Cannot use --from-branch option.'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Get current git branch\n\t\t\t\t\tconst currentBranch = await gitUtils.getCurrentBranch(projectRoot);\n\t\t\t\t\tif (!currentBranch) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red('Error: Could not determine current git branch.')\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create tag from branch\n\t\t\t\t\tconst branchOptions = {\n\t\t\t\t\t\tcopyFromCurrent: options.copyFromCurrent || false,\n\t\t\t\t\t\tcopyFromTag: options.copyFrom,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\toptions.description ||\n\t\t\t\t\t\t\t`Tag created from git branch \"${currentBranch}\"`\n\t\t\t\t\t};\n\n\t\t\t\t\tawait createTagFromBranch(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tcurrentBranch,\n\t\t\t\t\t\tbranchOptions,\n\t\t\t\t\t\tcontext,\n\t\t\t\t\t\t'text'\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\t// Regular tag creation\n\t\t\t\t\tconst createOptions = {\n\t\t\t\t\t\tcopyFromCurrent: options.copyFromCurrent || false,\n\t\t\t\t\t\tcopyFromTag: options.copyFrom,\n\t\t\t\t\t\tdescription: options.description\n\t\t\t\t\t};\n\n\t\t\t\t\tawait createTag(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\ttagName,\n\t\t\t\t\t\tcreateOptions,\n\t\t\t\t\t\tcontext,\n\t\t\t\t\t\t'text'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Handle auto-switch if requested\n\t\t\t\tif (options.autoSwitch) {\n\t\t\t\t\tconst { useTag } = await import('./task-manager/tag-management.js');\n\t\t\t\t\tconst finalTagName = options.fromBranch\n\t\t\t\t\t\t? (await import('./utils/git-utils.js')).sanitizeBranchNameForTag(\n\t\t\t\t\t\t\t\tawait (await import('./utils/git-utils.js')).getCurrentBranch(\n\t\t\t\t\t\t\t\t\tprojectRoot\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t: tagName;\n\t\t\t\t\tawait useTag(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tfinalTagName,\n\t\t\t\t\t\t{},\n\t\t\t\t\t\tcontext,\n\t\t\t\t\t\t'text'\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error creating tag: ${error.message}`));\n\t\t\t\tshowAddTagHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowAddTagHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// delete-tag command\n\tprogramInstance\n\t\t.command('delete-tag')\n\t\t.description('Delete an existing tag and all its tasks')\n\t\t.argument('<tagName>', 'Name of the tag to delete')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-y, --yes', 'Skip confirmation prompts')\n\t\t.action(async (tagName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst deleteOptions = {\n\t\t\t\t\tyes: options.yes || false\n\t\t\t\t};\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'delete-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait deleteTag(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttagName,\n\t\t\t\t\tdeleteOptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text'\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error deleting tag: ${error.message}`));\n\t\t\t\tshowDeleteTagHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowDeleteTagHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// tags command\n\tprogramInstance\n\t\t.command('tags')\n\t\t.description('List all available tags with metadata')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--show-metadata', 'Show detailed metadata for each tag')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst listOptions = {\n\t\t\t\t\tshowTaskCounts: true,\n\t\t\t\t\tshowMetadata: options.showMetadata || false\n\t\t\t\t};\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'tags',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait tags(taskMaster.getTasksPath(), listOptions, context, 'text');\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error listing tags: ${error.message}`));\n\t\t\t\tshowTagsHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowTagsHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// use-tag command\n\tprogramInstance\n\t\t.command('use-tag')\n\t\t.description('Switch to a different tag context')\n\t\t.argument('<tagName>', 'Name of the tag to switch to')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.action(async (tagName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'use-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait useTag(taskMaster.getTasksPath(), tagName, {}, context, 'text');\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error switching tag: ${error.message}`));\n\t\t\t\tshowUseTagHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowUseTagHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// rename-tag command\n\tprogramInstance\n\t\t.command('rename-tag')\n\t\t.description('Rename an existing tag')\n\t\t.argument('<oldName>', 'Current name of the tag')\n\t\t.argument('<newName>', 'New name for the tag')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.action(async (oldName, newName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'rename-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait renameTag(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\toldName,\n\t\t\t\t\tnewName,\n\t\t\t\t\t{},\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text'\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error renaming tag: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// copy-tag command\n\tprogramInstance\n\t\t.command('copy-tag')\n\t\t.description('Copy an existing tag to create a new tag with the same tasks')\n\t\t.argument('<sourceName>', 'Name of the source tag to copy from')\n\t\t.argument('<targetName>', 'Name of the new tag to create')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-d, --description <text>', 'Optional description for the new tag')\n\t\t.action(async (sourceName, targetName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst copyOptions = {\n\t\t\t\t\tdescription: options.description\n\t\t\t\t};\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'copy-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait copyTag(\n\t\t\t\t\ttasksPath,\n\t\t\t\t\tsourceName,\n\t\t\t\t\ttargetName,\n\t\t\t\t\tcopyOptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text'\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error copying tag: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tprocess.exit(1);\n\t\t});\n\n\treturn programInstance;\n}\n\n/**\n * Setup the CLI application\n * @returns {Object} Configured Commander program\n */\nfunction setupCLI() {\n\t// Create a new program instance\n\tconst programInstance = program\n\t\t.name('dev')\n\t\t.description('AI-driven development task management')\n\t\t.version(() => {\n\t\t\t// Read version directly from package.json ONLY\n\t\t\ttry {\n\t\t\t\tconst packageJsonPath = path.join(process.cwd(), 'package.json');\n\t\t\t\tif (fs.existsSync(packageJsonPath)) {\n\t\t\t\t\tconst packageJson = JSON.parse(\n\t\t\t\t\t\tfs.readFileSync(packageJsonPath, 'utf8')\n\t\t\t\t\t);\n\t\t\t\t\treturn packageJson.version;\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// Silently fall back to 'unknown'\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'Could not read package.json for version info in .version()'\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn 'unknown'; // Default fallback if package.json fails\n\t\t})\n\t\t.helpOption('-h, --help', 'Display help')\n\t\t.addHelpCommand(false); // Disable default help command\n\n\t// Only override help for the main program, not for individual commands\n\tconst originalHelpInformation =\n\t\tprogramInstance.helpInformation.bind(programInstance);\n\tprogramInstance.helpInformation = function () {\n\t\t// If this is being called for a subcommand, use the default Commander.js help\n\t\tif (this.parent && this.parent !== programInstance) {\n\t\t\treturn originalHelpInformation();\n\t\t}\n\t\t// If this is the main program help, use our custom display\n\t\tdisplayHelp();\n\t\treturn '';\n\t};\n\n\t// Register commands\n\tregisterCommands(programInstance);\n\n\treturn programInstance;\n}\n\n/**\n * Check for newer version of task-master-ai\n * @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>}\n */\nasync function checkForUpdate() {\n\t// Get current version from package.json ONLY\n\tconst currentVersion = getTaskMasterVersion();\n\n\treturn new Promise((resolve) => {\n\t\t// Get the latest version from npm registry\n\t\tconst options = {\n\t\t\thostname: 'registry.npmjs.org',\n\t\t\tpath: '/task-master-ai',\n\t\t\tmethod: 'GET',\n\t\t\theaders: {\n\t\t\t\tAccept: 'application/vnd.npm.install-v1+json' // Lightweight response\n\t\t\t}\n\t\t};\n\n\t\tconst req = https.request(options, (res) => {\n\t\t\tlet data = '';\n\n\t\t\tres.on('data', (chunk) => {\n\t\t\t\tdata += chunk;\n\t\t\t});\n\n\t\t\tres.on('end', () => {\n\t\t\t\ttry {\n\t\t\t\t\tconst npmData = JSON.parse(data);\n\t\t\t\t\tconst latestVersion = npmData['dist-tags']?.latest || currentVersion;\n\n\t\t\t\t\t// Compare versions\n\t\t\t\t\tconst needsUpdate =\n\t\t\t\t\t\tcompareVersions(currentVersion, latestVersion) < 0;\n\n\t\t\t\t\tresolve({\n\t\t\t\t\t\tcurrentVersion,\n\t\t\t\t\t\tlatestVersion,\n\t\t\t\t\t\tneedsUpdate\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog('debug', `Error parsing npm response: ${error.message}`);\n\t\t\t\t\tresolve({\n\t\t\t\t\t\tcurrentVersion,\n\t\t\t\t\t\tlatestVersion: currentVersion,\n\t\t\t\t\t\tneedsUpdate: false\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\n\t\treq.on('error', (error) => {\n\t\t\tlog('debug', `Error checking for updates: ${error.message}`);\n\t\t\tresolve({\n\t\t\t\tcurrentVersion,\n\t\t\t\tlatestVersion: currentVersion,\n\t\t\t\tneedsUpdate: false\n\t\t\t});\n\t\t});\n\n\t\t// Set a timeout to avoid hanging if npm is slow\n\t\treq.setTimeout(3000, () => {\n\t\t\treq.abort();\n\t\t\tlog('debug', 'Update check timed out');\n\t\t\tresolve({\n\t\t\t\tcurrentVersion,\n\t\t\t\tlatestVersion: currentVersion,\n\t\t\t\tneedsUpdate: false\n\t\t\t});\n\t\t});\n\n\t\treq.end();\n\t});\n}\n\n/**\n * Compare semantic versions\n * @param {string} v1 - First version\n * @param {string} v2 - Second version\n * @returns {number} -1 if v1 < v2, 0 if v1 = v2, 1 if v1 > v2\n */\nfunction compareVersions(v1, v2) {\n\tconst v1Parts = v1.split('.').map((p) => parseInt(p, 10));\n\tconst v2Parts = v2.split('.').map((p) => parseInt(p, 10));\n\n\tfor (let i = 0; i < Math.max(v1Parts.length, v2Parts.length); i++) {\n\t\tconst v1Part = v1Parts[i] || 0;\n\t\tconst v2Part = v2Parts[i] || 0;\n\n\t\tif (v1Part < v2Part) return -1;\n\t\tif (v1Part > v2Part) return 1;\n\t}\n\n\treturn 0;\n}\n\n/**\n * Display upgrade notification message\n * @param {string} currentVersion - Current version\n * @param {string} latestVersion - Latest version\n */\nfunction displayUpgradeNotification(currentVersion, latestVersion) {\n\tconst message = boxen(\n\t\t`${chalk.blue.bold('Update Available!')} ${chalk.dim(currentVersion)} → ${chalk.green(latestVersion)}\\n\\n` +\n\t\t\t`Run ${chalk.cyan('npm i task-master-ai@latest -g')} to update to the latest version with new features and bug fixes.`,\n\t\t{\n\t\t\tpadding: 1,\n\t\t\tmargin: { top: 1, bottom: 1 },\n\t\t\tborderColor: 'yellow',\n\t\t\tborderStyle: 'round'\n\t\t}\n\t);\n\n\tconsole.log(message);\n}\n\n/**\n * Parse arguments and run the CLI\n * @param {Array} argv - Command-line arguments\n */\nasync function runCLI(argv = process.argv) {\n\ttry {\n\t\t// Display banner if not in a pipe\n\t\tif (process.stdout.isTTY) {\n\t\t\tdisplayBanner();\n\t\t}\n\n\t\t// If no arguments provided, show help\n\t\tif (argv.length <= 2) {\n\t\t\tdisplayHelp();\n\t\t\tprocess.exit(0);\n\t\t}\n\n\t\t// Start the update check in the background - don't await yet\n\t\tconst updateCheckPromise = checkForUpdate();\n\n\t\t// Setup and parse\n\t\t// NOTE: getConfig() might be called during setupCLI->registerCommands if commands need config\n\t\t// This means the ConfigurationError might be thrown here if configuration file is missing.\n\t\tconst programInstance = setupCLI();\n\t\tawait programInstance.parseAsync(argv);\n\n\t\t// After command execution, check if an update is available\n\t\tconst updateInfo = await updateCheckPromise;\n\t\tif (updateInfo.needsUpdate) {\n\t\t\tdisplayUpgradeNotification(\n\t\t\t\tupdateInfo.currentVersion,\n\t\t\t\tupdateInfo.latestVersion\n\t\t\t);\n\t\t}\n\n\t\t// Check if migration has occurred and show FYI notice once\n\t\ttry {\n\t\t\t// Use initTaskMaster with no required fields - will only fail if no project root\n\t\t\tconst taskMaster = initTaskMaster({});\n\n\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\t\t\tconst statePath = taskMaster.getStatePath();\n\n\t\t\tif (tasksPath && fs.existsSync(tasksPath)) {\n\t\t\t\t// Read raw file to check if it has master key (bypassing tag resolution)\n\t\t\t\tconst rawData = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\tconst parsedData = JSON.parse(rawData);\n\n\t\t\t\tif (parsedData && parsedData.master) {\n\t\t\t\t\t// Migration has occurred, check if we've shown the notice\n\t\t\t\t\tlet stateData = { migrationNoticeShown: false };\n\t\t\t\t\tif (statePath && fs.existsSync(statePath)) {\n\t\t\t\t\t\t// Read state.json directly without tag resolution since it's not a tagged file\n\t\t\t\t\t\tconst rawStateData = fs.readFileSync(statePath, 'utf8');\n\t\t\t\t\t\tstateData = JSON.parse(rawStateData) || stateData;\n\t\t\t\t\t}\n\n\t\t\t\t\tif (!stateData.migrationNoticeShown) {\n\t\t\t\t\t\tdisplayTaggedTasksFYI({ _migrationHappened: true });\n\n\t\t\t\t\t\t// Mark as shown\n\t\t\t\t\t\tstateData.migrationNoticeShown = true;\n\t\t\t\t\t\t// Write state.json directly without tag resolution since it's not a tagged file\n\t\t\t\t\t\tif (statePath) {\n\t\t\t\t\t\t\tfs.writeFileSync(statePath, JSON.stringify(stateData, null, 2));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Silently ignore errors checking for migration notice\n\t\t}\n\t} catch (error) {\n\t\t// ** Specific catch block for missing configuration file **\n\t\tif (error instanceof ConfigurationError) {\n\t\t\tconsole.error(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.red.bold('Configuration Update Required!') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white('Taskmaster now uses a ') +\n\t\t\t\t\t\tchalk.yellow.bold('configuration file') +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t' in your project for AI model choices and settings.\\n\\n' +\n\t\t\t\t\t\t\t\t'This file appears to be '\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.red.bold('missing') +\n\t\t\t\t\t\tchalk.white('. No worries though.\\n\\n') +\n\t\t\t\t\t\tchalk.cyan.bold('To create this file, run the interactive setup:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.green(' task-master models --setup') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white.bold('Key Points:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white('* ') +\n\t\t\t\t\t\tchalk.yellow.bold('Configuration file') +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t': Stores your AI model settings (do not manually edit)\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white('* ') +\n\t\t\t\t\t\tchalk.yellow.bold('.env & .mcp.json') +\n\t\t\t\t\t\tchalk.white(': Still used ') +\n\t\t\t\t\t\tchalk.red.bold('only') +\n\t\t\t\t\t\tchalk.white(' for your AI provider API keys.\\n\\n') +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t'`task-master models` to check your config & available models\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t'`task-master models --setup` to adjust the AI models used by Taskmaster'\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tmargin: { top: 1 },\n\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\t// Generic error handling for other errors\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t}\n\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Resolve the final complexity-report path.\n * Rules:\n * 1. If caller passes --output, always respect it.\n * 2. If no explicit output AND tag === 'master' → default report file\n * 3. If no explicit output AND tag !== 'master' → append _<tag>.json\n *\n * @param {string|undefined} outputOpt --output value from CLI (may be undefined)\n * @param {string} targetTag resolved tag (defaults to 'master')\n * @param {string} projectRoot absolute project root\n * @returns {string} absolute path for the report\n */\nexport function resolveComplexityReportPath({\n\tprojectRoot,\n\ttag = 'master',\n\toutput // may be undefined\n}) {\n\t// 1. user knows best\n\tif (output) {\n\t\treturn path.isAbsolute(output) ? output : path.join(projectRoot, output);\n\t}\n\n\t// 2. default naming\n\tconst base = path.join(projectRoot, COMPLEXITY_REPORT_FILE);\n\treturn tag !== 'master' ? base.replace('.json', `_${tag}.json`) : base;\n}\n\nexport {\n\tregisterCommands,\n\tsetupCLI,\n\trunCLI,\n\tcheckForUpdate,\n\tcompareVersions,\n\tdisplayUpgradeNotification\n};\n"], ["/claude-task-master/scripts/modules/utils.js", "/**\n * utils.js\n * Utility functions for the Task Master CLI\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport dotenv from 'dotenv';\n// Import specific config getters needed here\nimport { getLogLevel, getDebugFlag } from './config-manager.js';\nimport * as gitUtils from './utils/git-utils.js';\nimport {\n\tCOMPLEXITY_REPORT_FILE,\n\tLEGACY_COMPLEXITY_REPORT_FILE,\n\tLEGACY_CONFIG_FILE\n} from '../../src/constants/paths.js';\n\n// Global silent mode flag\nlet silentMode = false;\n\n// --- Environment Variable Resolution Utility ---\n/**\n * Resolves an environment variable's value.\n * Precedence:\n * 1. session.env (if session provided)\n * 2. process.env\n * 3. .env file at projectRoot (if projectRoot provided)\n * @param {string} key - The environment variable key.\n * @param {object|null} [session=null] - The MCP session object.\n * @param {string|null} [projectRoot=null] - The project root directory (for .env fallback).\n * @returns {string|undefined} The value of the environment variable or undefined if not found.\n */\nfunction resolveEnvVariable(key, session = null, projectRoot = null) {\n\t// 1. Check session.env\n\tif (session?.env?.[key]) {\n\t\treturn session.env[key];\n\t}\n\n\t// 2. Read .env file at projectRoot\n\tif (projectRoot) {\n\t\tconst envPath = path.join(projectRoot, '.env');\n\t\tif (fs.existsSync(envPath)) {\n\t\t\ttry {\n\t\t\t\tconst envFileContent = fs.readFileSync(envPath, 'utf-8');\n\t\t\t\tconst parsedEnv = dotenv.parse(envFileContent); // Use dotenv to parse\n\t\t\t\tif (parsedEnv && parsedEnv[key]) {\n\t\t\t\t\t// console.log(`DEBUG: Found key ${key} in ${envPath}`); // Optional debug log\n\t\t\t\t\treturn parsedEnv[key];\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// Log error but don't crash, just proceed as if key wasn't found in file\n\t\t\t\tlog('warn', `Could not read or parse ${envPath}: ${error.message}`);\n\t\t\t}\n\t\t}\n\t}\n\n\t// 3. Fallback: Check process.env\n\tif (process.env[key]) {\n\t\treturn process.env[key];\n\t}\n\n\t// Not found anywhere\n\treturn undefined;\n}\n\n// --- Tag-Aware Path Resolution Utility ---\n\n/**\n * Slugifies a tag name to be filesystem-safe\n * @param {string} tagName - The tag name to slugify\n * @returns {string} Slugified tag name safe for filesystem use\n */\nfunction slugifyTagForFilePath(tagName) {\n\tif (!tagName || typeof tagName !== 'string') {\n\t\treturn 'unknown-tag';\n\t}\n\n\t// Replace invalid filesystem characters with hyphens and clean up\n\treturn tagName\n\t\t.replace(/[^a-zA-Z0-9_-]/g, '-') // Replace invalid chars with hyphens\n\t\t.replace(/^-+|-+$/g, '') // Remove leading/trailing hyphens\n\t\t.replace(/-+/g, '-') // Collapse multiple hyphens\n\t\t.toLowerCase() // Convert to lowercase\n\t\t.substring(0, 50); // Limit length to prevent overly long filenames\n}\n\n/**\n * Resolves a file path to be tag-aware, following the pattern used by other commands.\n * For non-master tags, appends _slugified-tagname before the file extension.\n * @param {string} basePath - The base file path (e.g., '.taskmaster/reports/task-complexity-report.json')\n * @param {string|null} tag - The tag name (null, undefined, or 'master' uses base path)\n * @param {string} [projectRoot='.'] - The project root directory\n * @returns {string} The resolved file path\n */\nfunction getTagAwareFilePath(basePath, tag, projectRoot = '.') {\n\t// Use path.parse and format for clean tag insertion\n\tconst parsedPath = path.parse(basePath);\n\tif (!tag || tag === 'master') {\n\t\treturn path.join(projectRoot, basePath);\n\t}\n\n\t// Slugify the tag for filesystem safety\n\tconst slugifiedTag = slugifyTagForFilePath(tag);\n\n\t// Append slugified tag before file extension\n\tparsedPath.base = `${parsedPath.name}_${slugifiedTag}${parsedPath.ext}`;\n\tconst relativePath = path.format(parsedPath);\n\treturn path.join(projectRoot, relativePath);\n}\n\n// --- Project Root Finding Utility ---\n/**\n * Recursively searches upwards for project root starting from a given directory.\n * @param {string} [startDir=process.cwd()] - The directory to start searching from.\n * @param {string[]} [markers=['package.json', '.git', LEGACY_CONFIG_FILE]] - Marker files/dirs to look for.\n * @returns {string|null} The path to the project root, or null if not found.\n */\nfunction findProjectRoot(\n\tstartDir = process.cwd(),\n\tmarkers = ['package.json', 'pyproject.toml', '.git', LEGACY_CONFIG_FILE]\n) {\n\tlet currentPath = path.resolve(startDir);\n\tconst rootPath = path.parse(currentPath).root;\n\n\twhile (currentPath !== rootPath) {\n\t\t// Check if any marker exists in the current directory\n\t\tconst hasMarker = markers.some((marker) => {\n\t\t\tconst markerPath = path.join(currentPath, marker);\n\t\t\treturn fs.existsSync(markerPath);\n\t\t});\n\n\t\tif (hasMarker) {\n\t\t\treturn currentPath;\n\t\t}\n\n\t\t// Move up one directory\n\t\tcurrentPath = path.dirname(currentPath);\n\t}\n\n\t// Check the root directory as well\n\tconst hasMarkerInRoot = markers.some((marker) => {\n\t\tconst markerPath = path.join(rootPath, marker);\n\t\treturn fs.existsSync(markerPath);\n\t});\n\n\treturn hasMarkerInRoot ? rootPath : null;\n}\n\n// --- Dynamic Configuration Function --- (REMOVED)\n\n// --- Logging and Utility Functions ---\n\n// Set up logging based on log level\nconst LOG_LEVELS = {\n\tdebug: 0,\n\tinfo: 1,\n\twarn: 2,\n\terror: 3,\n\tsuccess: 1 // Treat success like info level\n};\n\n/**\n * Returns the task manager module\n * @returns {Promise<Object>} The task manager module object\n */\nasync function getTaskManager() {\n\treturn import('./task-manager.js');\n}\n\n/**\n * Enable silent logging mode\n */\nfunction enableSilentMode() {\n\tsilentMode = true;\n}\n\n/**\n * Disable silent logging mode\n */\nfunction disableSilentMode() {\n\tsilentMode = false;\n}\n\n/**\n * Check if silent mode is enabled\n * @returns {boolean} True if silent mode is enabled\n */\nfunction isSilentMode() {\n\treturn silentMode;\n}\n\n/**\n * Logs a message at the specified level\n * @param {string} level - The log level (debug, info, warn, error)\n * @param {...any} args - Arguments to log\n */\nfunction log(level, ...args) {\n\t// Immediately return if silentMode is enabled\n\tif (isSilentMode()) {\n\t\treturn;\n\t}\n\n\t// GUARD: Prevent circular dependency during config loading\n\t// Use a simple fallback log level instead of calling getLogLevel()\n\tlet configLevel = 'info'; // Default fallback\n\ttry {\n\t\t// Only try to get config level if we're not in the middle of config loading\n\t\tconfigLevel = getLogLevel() || 'info';\n\t} catch (error) {\n\t\t// If getLogLevel() fails (likely due to circular dependency),\n\t\t// use default 'info' level and continue\n\t\tconfigLevel = 'info';\n\t}\n\n\t// Use text prefixes instead of emojis\n\tconst prefixes = {\n\t\tdebug: chalk.gray('[DEBUG]'),\n\t\tinfo: chalk.blue('[INFO]'),\n\t\twarn: chalk.yellow('[WARN]'),\n\t\terror: chalk.red('[ERROR]'),\n\t\tsuccess: chalk.green('[SUCCESS]')\n\t};\n\n\t// Ensure level exists, default to info if not\n\tconst currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';\n\n\t// Check log level configuration\n\tif (\n\t\tLOG_LEVELS[currentLevel] >= (LOG_LEVELS[configLevel] ?? LOG_LEVELS.info)\n\t) {\n\t\tconst prefix = prefixes[currentLevel] || '';\n\t\t// Use console.log for all levels, let chalk handle coloring\n\t\t// Construct the message properly\n\t\tconst message = args\n\t\t\t.map((arg) => (typeof arg === 'object' ? JSON.stringify(arg) : arg))\n\t\t\t.join(' ');\n\t\tconsole.log(`${prefix} ${message}`);\n\t}\n}\n\n/**\n * Checks if the data object has a tagged structure (contains tag objects with tasks arrays)\n * @param {Object} data - The data object to check\n * @returns {boolean} True if the data has a tagged structure\n */\nfunction hasTaggedStructure(data) {\n\tif (!data || typeof data !== 'object') {\n\t\treturn false;\n\t}\n\n\t// Check if any top-level properties are objects with tasks arrays\n\tfor (const key in data) {\n\t\tif (\n\t\t\tdata.hasOwnProperty(key) &&\n\t\t\ttypeof data[key] === 'object' &&\n\t\t\tArray.isArray(data[key].tasks)\n\t\t) {\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n}\n\n/**\n * Reads and parses a JSON file\n * @param {string} filepath - Path to the JSON file\n * @param {string} [projectRoot] - Optional project root for tag resolution (used by MCP)\n * @param {string} [tag] - Optional tag to use instead of current tag resolution\n * @returns {Object|null} The parsed JSON data or null if error\n */\nfunction readJSON(filepath, projectRoot = null, tag = null) {\n\t// GUARD: Prevent circular dependency during config loading\n\tlet isDebug = false; // Default fallback\n\ttry {\n\t\t// Only try to get debug flag if we're not in the middle of config loading\n\t\tisDebug = getDebugFlag();\n\t} catch (error) {\n\t\t// If getDebugFlag() fails (likely due to circular dependency),\n\t\t// use default false and continue\n\t}\n\n\tif (isDebug) {\n\t\tconsole.log(\n\t\t\t`readJSON called with: ${filepath}, projectRoot: ${projectRoot}, tag: ${tag}`\n\t\t);\n\t}\n\n\tif (!filepath) {\n\t\treturn null;\n\t}\n\n\tlet data;\n\ttry {\n\t\tdata = JSON.parse(fs.readFileSync(filepath, 'utf8'));\n\t\tif (isDebug) {\n\t\t\tconsole.log(`Successfully read JSON from ${filepath}`);\n\t\t}\n\t} catch (err) {\n\t\tif (isDebug) {\n\t\t\tconsole.log(`Failed to read JSON from ${filepath}: ${err.message}`);\n\t\t}\n\t\treturn null;\n\t}\n\n\t// If it's not a tasks.json file, return as-is\n\tif (!filepath.includes('tasks.json') || !data) {\n\t\tif (isDebug) {\n\t\t\tconsole.log(`File is not tasks.json or data is null, returning as-is`);\n\t\t}\n\t\treturn data;\n\t}\n\n\t// Check if this is legacy format that needs migration\n\t// Only migrate if we have tasks at the ROOT level AND no tag-like structure\n\tif (\n\t\tArray.isArray(data.tasks) &&\n\t\t!data._rawTaggedData &&\n\t\t!hasTaggedStructure(data)\n\t) {\n\t\tif (isDebug) {\n\t\t\tconsole.log(`File is in legacy format, performing migration...`);\n\t\t}\n\n\t\t// This is legacy format - migrate it to tagged format\n\t\tconst migratedData = {\n\t\t\tmaster: {\n\t\t\t\ttasks: data.tasks,\n\t\t\t\tmetadata: data.metadata || {\n\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\tdescription: 'Tasks for master context'\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\n\t\t// Write the migrated data back to the file\n\t\ttry {\n\t\t\twriteJSON(filepath, migratedData);\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Successfully migrated legacy format to tagged format`);\n\t\t\t}\n\n\t\t\t// Perform complete migration (config.json, state.json)\n\t\t\tperformCompleteTagMigration(filepath);\n\n\t\t\t// Check and auto-switch git tags if enabled (after migration)\n\t\t\t// This needs to run synchronously BEFORE tag resolution\n\t\t\tif (projectRoot) {\n\t\t\t\ttry {\n\t\t\t\t\t// Run git integration synchronously\n\t\t\t\t\tgitUtils.checkAndAutoSwitchGitTagSync(projectRoot, filepath);\n\t\t\t\t} catch (error) {\n\t\t\t\t\t// Silent fail - don't break normal operations\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Mark for migration notice\n\t\t\tmarkMigrationForNotice(filepath);\n\t\t} catch (writeError) {\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Error writing migrated data: ${writeError.message}`);\n\t\t\t}\n\t\t\t// If write fails, continue with the original data\n\t\t}\n\n\t\t// Continue processing with the migrated data structure\n\t\tdata = migratedData;\n\t}\n\n\t// If we have tagged data, we need to resolve which tag to use\n\tif (typeof data === 'object' && !data.tasks) {\n\t\t// This is tagged format\n\t\tif (isDebug) {\n\t\t\tconsole.log(`File is in tagged format, resolving tag...`);\n\t\t}\n\n\t\t// Ensure all tags have proper metadata before proceeding\n\t\tfor (const tagName in data) {\n\t\t\tif (\n\t\t\t\tdata.hasOwnProperty(tagName) &&\n\t\t\t\ttypeof data[tagName] === 'object' &&\n\t\t\t\tdata[tagName].tasks\n\t\t\t) {\n\t\t\t\ttry {\n\t\t\t\t\tensureTagMetadata(data[tagName], {\n\t\t\t\t\t\tdescription: `Tasks for ${tagName} context`,\n\t\t\t\t\t\tskipUpdate: true // Don't update timestamp during read operations\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\t// If ensureTagMetadata fails, continue without metadata\n\t\t\t\t\tif (isDebug) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t`Failed to ensure metadata for tag ${tagName}: ${error.message}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Store reference to the raw tagged data for functions that need it\n\t\tconst originalTaggedData = JSON.parse(JSON.stringify(data));\n\n\t\t// Check and auto-switch git tags if enabled (for existing tagged format)\n\t\t// This needs to run synchronously BEFORE tag resolution\n\t\tif (projectRoot) {\n\t\t\ttry {\n\t\t\t\t// Run git integration synchronously\n\t\t\t\tgitUtils.checkAndAutoSwitchGitTagSync(projectRoot, filepath);\n\t\t\t} catch (error) {\n\t\t\t\t// Silent fail - don't break normal operations\n\t\t\t}\n\t\t}\n\n\t\ttry {\n\t\t\t// Default to master tag if anything goes wrong\n\t\t\tlet resolvedTag = 'master';\n\n\t\t\t// Try to resolve the correct tag, but don't fail if it doesn't work\n\t\t\ttry {\n\t\t\t\t// If tag is provided, use it directly\n\t\t\t\tif (tag) {\n\t\t\t\t\tresolvedTag = tag;\n\t\t\t\t} else if (projectRoot) {\n\t\t\t\t\t// Use provided projectRoot\n\t\t\t\t\tresolvedTag = resolveTag({ projectRoot });\n\t\t\t\t} else {\n\t\t\t\t\t// Try to derive projectRoot from filepath\n\t\t\t\t\tconst derivedProjectRoot = findProjectRoot(path.dirname(filepath));\n\t\t\t\t\tif (derivedProjectRoot) {\n\t\t\t\t\t\tresolvedTag = resolveTag({ projectRoot: derivedProjectRoot });\n\t\t\t\t\t}\n\t\t\t\t\t// If derivedProjectRoot is null, stick with 'master'\n\t\t\t\t}\n\t\t\t} catch (tagResolveError) {\n\t\t\t\tif (isDebug) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Tag resolution failed, using master: ${tagResolveError.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\t// resolvedTag stays as 'master'\n\t\t\t}\n\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Resolved tag: ${resolvedTag}`);\n\t\t\t}\n\n\t\t\t// Get the data for the resolved tag\n\t\t\tconst tagData = data[resolvedTag];\n\t\t\tif (tagData && tagData.tasks) {\n\t\t\t\t// Add the _rawTaggedData property and the resolved tag to the returned data\n\t\t\t\tconst result = {\n\t\t\t\t\t...tagData,\n\t\t\t\t\ttag: resolvedTag,\n\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t};\n\t\t\t\tif (isDebug) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Returning data for tag '${resolvedTag}' with ${tagData.tasks.length} tasks`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\treturn result;\n\t\t\t} else {\n\t\t\t\t// If the resolved tag doesn't exist, fall back to master\n\t\t\t\tconst masterData = data.master;\n\t\t\t\tif (masterData && masterData.tasks) {\n\t\t\t\t\tif (isDebug) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t`Tag '${resolvedTag}' not found, falling back to master with ${masterData.tasks.length} tasks`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t\treturn {\n\t\t\t\t\t\t...masterData,\n\t\t\t\t\t\ttag: 'master',\n\t\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t\t};\n\t\t\t\t} else {\n\t\t\t\t\tif (isDebug) {\n\t\t\t\t\t\tconsole.log(`No valid tag data found, returning empty structure`);\n\t\t\t\t\t}\n\t\t\t\t\t// Return empty structure if no valid data\n\t\t\t\t\treturn {\n\t\t\t\t\t\ttasks: [],\n\t\t\t\t\t\ttag: 'master',\n\t\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Error during tag resolution: ${error.message}`);\n\t\t\t}\n\t\t\t// If anything goes wrong, try to return master or empty\n\t\t\tconst masterData = data.master;\n\t\t\tif (masterData && masterData.tasks) {\n\t\t\t\treturn {\n\t\t\t\t\t...masterData,\n\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t};\n\t\t\t}\n\t\t\treturn {\n\t\t\t\ttasks: [],\n\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t};\n\t\t}\n\t}\n\n\t// If we reach here, it's some other format\n\tif (isDebug) {\n\t\tconsole.log(`File format not recognized, returning as-is`);\n\t}\n\treturn data;\n}\n\n/**\n * Performs complete tag migration including config.json and state.json updates\n * @param {string} tasksJsonPath - Path to the tasks.json file that was migrated\n */\nfunction performCompleteTagMigration(tasksJsonPath) {\n\ttry {\n\t\t// Derive project root from tasks.json path\n\t\tconst projectRoot =\n\t\t\tfindProjectRoot(path.dirname(tasksJsonPath)) ||\n\t\t\tpath.dirname(tasksJsonPath);\n\n\t\t// 1. Migrate config.json - add defaultTag and tags section\n\t\tconst configPath = path.join(projectRoot, '.taskmaster', 'config.json');\n\t\tif (fs.existsSync(configPath)) {\n\t\t\tmigrateConfigJson(configPath);\n\t\t}\n\n\t\t// 2. Create state.json if it doesn't exist\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\t\tif (!fs.existsSync(statePath)) {\n\t\t\tcreateStateJson(statePath);\n\t\t}\n\n\t\tif (getDebugFlag()) {\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Complete tag migration performed for project: ${projectRoot}`\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tif (getDebugFlag()) {\n\t\t\tlog('warn', `Error during complete tag migration: ${error.message}`);\n\t\t}\n\t}\n}\n\n/**\n * Migrates config.json to add tagged task system configuration\n * @param {string} configPath - Path to the config.json file\n */\nfunction migrateConfigJson(configPath) {\n\ttry {\n\t\tconst rawConfig = fs.readFileSync(configPath, 'utf8');\n\t\tconst config = JSON.parse(rawConfig);\n\t\tif (!config) return;\n\n\t\tlet modified = false;\n\n\t\t// Add global.defaultTag if missing\n\t\tif (!config.global) {\n\t\t\tconfig.global = {};\n\t\t}\n\t\tif (!config.global.defaultTag) {\n\t\t\tconfig.global.defaultTag = 'master';\n\t\t\tmodified = true;\n\t\t}\n\n\t\tif (modified) {\n\t\t\tfs.writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf8');\n\t\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\t\tconsole.log(\n\t\t\t\t\t'[DEBUG] Updated config.json with tagged task system settings'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.warn(`[WARN] Error migrating config.json: ${error.message}`);\n\t\t}\n\t}\n}\n\n/**\n * Creates initial state.json file for tagged task system\n * @param {string} statePath - Path where state.json should be created\n */\nfunction createStateJson(statePath) {\n\ttry {\n\t\tconst initialState = {\n\t\t\tcurrentTag: 'master',\n\t\t\tlastSwitched: new Date().toISOString(),\n\t\t\tbranchTagMapping: {},\n\t\t\tmigrationNoticeShown: false\n\t\t};\n\n\t\tfs.writeFileSync(statePath, JSON.stringify(initialState, null, 2), 'utf8');\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.log('[DEBUG] Created initial state.json for tagged task system');\n\t\t}\n\t} catch (error) {\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.warn(`[WARN] Error creating state.json: ${error.message}`);\n\t\t}\n\t}\n}\n\n/**\n * Marks in state.json that migration occurred and notice should be shown\n * @param {string} tasksJsonPath - Path to the tasks.json file\n */\nfunction markMigrationForNotice(tasksJsonPath) {\n\ttry {\n\t\tconst projectRoot = path.dirname(path.dirname(tasksJsonPath));\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\t// Ensure state.json exists\n\t\tif (!fs.existsSync(statePath)) {\n\t\t\tcreateStateJson(statePath);\n\t\t}\n\n\t\t// Read and update state to mark migration occurred using fs directly\n\t\ttry {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tconst stateData = JSON.parse(rawState) || {};\n\t\t\t// Only set to false if it's not already set (i.e., first time migration)\n\t\t\tif (stateData.migrationNoticeShown === undefined) {\n\t\t\t\tstateData.migrationNoticeShown = false;\n\t\t\t\tfs.writeFileSync(statePath, JSON.stringify(stateData, null, 2), 'utf8');\n\t\t\t}\n\t\t} catch (stateError) {\n\t\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\t\tconsole.warn(\n\t\t\t\t\t`[WARN] Error updating state for migration notice: ${stateError.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.warn(\n\t\t\t\t`[WARN] Error marking migration for notice: ${error.message}`\n\t\t\t);\n\t\t}\n\t}\n}\n\n/**\n * Writes and saves a JSON file. Handles tagged task lists properly.\n * @param {string} filepath - Path to the JSON file\n * @param {Object} data - Data to write (can be resolved tag data or raw tagged data)\n * @param {string} projectRoot - Optional project root for tag context\n * @param {string} tag - Optional tag for tag context\n */\nfunction writeJSON(filepath, data, projectRoot = null, tag = null) {\n\tconst isDebug = process.env.TASKMASTER_DEBUG === 'true';\n\n\ttry {\n\t\tlet finalData = data;\n\n\t\t// If data represents resolved tag data but lost _rawTaggedData (edge-case observed in MCP path)\n\t\tif (\n\t\t\t!data._rawTaggedData &&\n\t\t\tprojectRoot &&\n\t\t\tArray.isArray(data.tasks) &&\n\t\t\t!hasTaggedStructure(data)\n\t\t) {\n\t\t\tconst resolvedTag = tag || getCurrentTag(projectRoot);\n\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(\n\t\t\t\t\t`writeJSON: Detected resolved tag data missing _rawTaggedData. Re-reading raw data to prevent data loss for tag '${resolvedTag}'.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Re-read the full file to get the complete tagged structure\n\t\t\tconst rawFullData = JSON.parse(fs.readFileSync(filepath, 'utf8'));\n\n\t\t\t// Merge the updated data into the full structure\n\t\t\tfinalData = {\n\t\t\t\t...rawFullData,\n\t\t\t\t[resolvedTag]: {\n\t\t\t\t\t// Preserve existing tag metadata if it exists, otherwise use what's passed\n\t\t\t\t\t...(rawFullData[resolvedTag]?.metadata || {}),\n\t\t\t\t\t...(data.metadata ? { metadata: data.metadata } : {}),\n\t\t\t\t\ttasks: data.tasks // The updated tasks array is the source of truth here\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\t// If we have _rawTaggedData, this means we're working with resolved tag data\n\t\t// and need to merge it back into the full tagged structure\n\t\telse if (data && data._rawTaggedData && projectRoot) {\n\t\t\tconst resolvedTag = tag || getCurrentTag(projectRoot);\n\n\t\t\t// Get the original tagged data\n\t\t\tconst originalTaggedData = data._rawTaggedData;\n\n\t\t\t// Create a clean copy of the current resolved data (without internal properties)\n\t\t\tconst { _rawTaggedData, tag: _, ...cleanResolvedData } = data;\n\n\t\t\t// Update the specific tag with the resolved data\n\t\t\tfinalData = {\n\t\t\t\t...originalTaggedData,\n\t\t\t\t[resolvedTag]: cleanResolvedData\n\t\t\t};\n\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(\n\t\t\t\t\t`writeJSON: Merging resolved data back into tag '${resolvedTag}'`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Clean up any internal properties that shouldn't be persisted\n\t\tlet cleanData = finalData;\n\t\tif (cleanData && typeof cleanData === 'object') {\n\t\t\t// Remove any _rawTaggedData or tag properties from root level\n\t\t\tconst { _rawTaggedData, tag: tagProp, ...rootCleanData } = cleanData;\n\t\t\tcleanData = rootCleanData;\n\n\t\t\t// Additional cleanup for tag objects\n\t\t\tif (typeof cleanData === 'object' && !Array.isArray(cleanData)) {\n\t\t\t\tconst finalCleanData = {};\n\t\t\t\tfor (const [key, value] of Object.entries(cleanData)) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tvalue &&\n\t\t\t\t\t\ttypeof value === 'object' &&\n\t\t\t\t\t\tArray.isArray(value.tasks)\n\t\t\t\t\t) {\n\t\t\t\t\t\t// This is a tag object - clean up any rogue root-level properties\n\t\t\t\t\t\tconst { created, description, ...cleanTagData } = value;\n\n\t\t\t\t\t\t// Only keep the description if there's no metadata.description\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\tdescription &&\n\t\t\t\t\t\t\t(!cleanTagData.metadata || !cleanTagData.metadata.description)\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tcleanTagData.description = description;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfinalCleanData[key] = cleanTagData;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfinalCleanData[key] = value;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcleanData = finalCleanData;\n\t\t\t}\n\t\t}\n\n\t\tfs.writeFileSync(filepath, JSON.stringify(cleanData, null, 2), 'utf8');\n\n\t\tif (isDebug) {\n\t\t\tconsole.log(`writeJSON: Successfully wrote to ${filepath}`);\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `Error writing JSON file ${filepath}:`, error.message);\n\t\tif (isDebug) {\n\t\t\tlog('error', 'Full error details:', error);\n\t\t}\n\t}\n}\n\n/**\n * Sanitizes a prompt string for use in a shell command\n * @param {string} prompt The prompt to sanitize\n * @returns {string} Sanitized prompt\n */\nfunction sanitizePrompt(prompt) {\n\t// Replace double quotes with escaped double quotes\n\treturn prompt.replace(/\"/g, '\\\\\"');\n}\n\n/**\n * Reads the complexity report from file\n * @param {string} customPath - Optional custom path to the report\n * @returns {Object|null} The parsed complexity report or null if not found\n */\nfunction readComplexityReport(customPath = null) {\n\t// GUARD: Prevent circular dependency during config loading\n\tlet isDebug = false; // Default fallback\n\ttry {\n\t\t// Only try to get debug flag if we're not in the middle of config loading\n\t\tisDebug = getDebugFlag();\n\t} catch (error) {\n\t\t// If getDebugFlag() fails (likely due to circular dependency),\n\t\t// use default false and continue\n\t\tisDebug = false;\n\t}\n\n\ttry {\n\t\tlet reportPath;\n\t\tif (customPath) {\n\t\t\treportPath = customPath;\n\t\t} else {\n\t\t\t// Try new location first, then fall back to legacy\n\t\t\tconst newPath = path.join(process.cwd(), COMPLEXITY_REPORT_FILE);\n\t\t\tconst legacyPath = path.join(\n\t\t\t\tprocess.cwd(),\n\t\t\t\tLEGACY_COMPLEXITY_REPORT_FILE\n\t\t\t);\n\n\t\t\treportPath = fs.existsSync(newPath) ? newPath : legacyPath;\n\t\t}\n\n\t\tif (!fs.existsSync(reportPath)) {\n\t\t\tif (isDebug) {\n\t\t\t\tlog('debug', `Complexity report not found at ${reportPath}`);\n\t\t\t}\n\t\t\treturn null;\n\t\t}\n\n\t\tconst reportData = readJSON(reportPath);\n\t\tif (isDebug) {\n\t\t\tlog('debug', `Successfully read complexity report from ${reportPath}`);\n\t\t}\n\t\treturn reportData;\n\t} catch (error) {\n\t\tif (isDebug) {\n\t\t\tlog('error', `Error reading complexity report: ${error.message}`);\n\t\t}\n\t\treturn null;\n\t}\n}\n\n/**\n * Finds a task analysis in the complexity report\n * @param {Object} report - The complexity report\n * @param {number} taskId - The task ID to find\n * @returns {Object|null} The task analysis or null if not found\n */\nfunction findTaskInComplexityReport(report, taskId) {\n\tif (\n\t\t!report ||\n\t\t!report.complexityAnalysis ||\n\t\t!Array.isArray(report.complexityAnalysis)\n\t) {\n\t\treturn null;\n\t}\n\n\treturn report.complexityAnalysis.find((task) => task.taskId === taskId);\n}\n\nfunction addComplexityToTask(task, complexityReport) {\n\tlet taskId;\n\tif (task.isSubtask) {\n\t\ttaskId = task.parentTask.id;\n\t} else if (task.parentId) {\n\t\ttaskId = task.parentId;\n\t} else {\n\t\ttaskId = task.id;\n\t}\n\n\tconst taskAnalysis = findTaskInComplexityReport(complexityReport, taskId);\n\tif (taskAnalysis) {\n\t\ttask.complexityScore = taskAnalysis.complexityScore;\n\t}\n}\n\n/**\n * Checks if a task exists in the tasks array\n * @param {Array} tasks - The tasks array\n * @param {string|number} taskId - The task ID to check\n * @returns {boolean} True if the task exists, false otherwise\n */\nfunction taskExists(tasks, taskId) {\n\tif (!taskId || !tasks || !Array.isArray(tasks)) {\n\t\treturn false;\n\t}\n\n\t// Handle both regular task IDs and subtask IDs (e.g., \"1.2\")\n\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\tconst [parentId, subtaskId] = taskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\treturn false;\n\t\t}\n\n\t\treturn parentTask.subtasks.some((st) => st.id === subtaskId);\n\t}\n\n\tconst id = parseInt(taskId, 10);\n\treturn tasks.some((t) => t.id === id);\n}\n\n/**\n * Formats a task ID as a string\n * @param {string|number} id - The task ID to format\n * @returns {string} The formatted task ID\n */\nfunction formatTaskId(id) {\n\tif (typeof id === 'string' && id.includes('.')) {\n\t\treturn id; // Already formatted as a string with a dot (e.g., \"1.2\")\n\t}\n\n\tif (typeof id === 'number') {\n\t\treturn id.toString();\n\t}\n\n\treturn id;\n}\n\n/**\n * Finds a task by ID in the tasks array. Optionally filters subtasks by status.\n * @param {Array} tasks - The tasks array\n * @param {string|number} taskId - The task ID to find\n * @param {Object|null} complexityReport - Optional pre-loaded complexity report\n * @param {string} [statusFilter] - Optional status to filter subtasks by\n * @returns {{task: Object|null, originalSubtaskCount: number|null, originalSubtasks: Array|null}} The task object (potentially with filtered subtasks), the original subtask count, and original subtasks array if filtered, or nulls if not found.\n */\nfunction findTaskById(\n\ttasks,\n\ttaskId,\n\tcomplexityReport = null,\n\tstatusFilter = null\n) {\n\tif (!taskId || !tasks || !Array.isArray(tasks)) {\n\t\treturn { task: null, originalSubtaskCount: null };\n\t}\n\n\t// Check if it's a subtask ID (e.g., \"1.2\")\n\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\t// If looking for a subtask, statusFilter doesn't apply directly here.\n\t\tconst [parentId, subtaskId] = taskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\treturn { task: null, originalSubtaskCount: null, originalSubtasks: null };\n\t\t}\n\n\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\tif (subtask) {\n\t\t\t// Add reference to parent task for context\n\t\t\tsubtask.parentTask = {\n\t\t\t\tid: parentTask.id,\n\t\t\t\ttitle: parentTask.title,\n\t\t\t\tstatus: parentTask.status\n\t\t\t};\n\t\t\tsubtask.isSubtask = true;\n\t\t}\n\n\t\t// If we found a task, check for complexity data\n\t\tif (subtask && complexityReport) {\n\t\t\taddComplexityToTask(subtask, complexityReport);\n\t\t}\n\n\t\treturn {\n\t\t\ttask: subtask || null,\n\t\t\toriginalSubtaskCount: null,\n\t\t\toriginalSubtasks: null\n\t\t};\n\t}\n\n\tlet taskResult = null;\n\tlet originalSubtaskCount = null;\n\tlet originalSubtasks = null;\n\n\t// Find the main task\n\tconst id = parseInt(taskId, 10);\n\tconst task = tasks.find((t) => t.id === id) || null;\n\n\t// If task not found, return nulls\n\tif (!task) {\n\t\treturn { task: null, originalSubtaskCount: null, originalSubtasks: null };\n\t}\n\n\ttaskResult = task;\n\n\t// If task found and statusFilter provided, filter its subtasks\n\tif (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {\n\t\t// Store original subtasks and count before filtering\n\t\toriginalSubtasks = [...task.subtasks]; // Clone the original subtasks array\n\t\toriginalSubtaskCount = task.subtasks.length;\n\n\t\t// Clone the task to avoid modifying the original array\n\t\tconst filteredTask = { ...task };\n\t\tfilteredTask.subtasks = task.subtasks.filter(\n\t\t\t(subtask) =>\n\t\t\t\tsubtask.status &&\n\t\t\t\tsubtask.status.toLowerCase() === statusFilter.toLowerCase()\n\t\t);\n\n\t\ttaskResult = filteredTask;\n\t}\n\n\t// If task found and complexityReport provided, add complexity data\n\tif (taskResult && complexityReport) {\n\t\taddComplexityToTask(taskResult, complexityReport);\n\t}\n\n\t// Return the found task, original subtask count, and original subtasks\n\treturn { task: taskResult, originalSubtaskCount, originalSubtasks };\n}\n\n/**\n * Truncates text to a specified length\n * @param {string} text - The text to truncate\n * @param {number} maxLength - The maximum length\n * @returns {string} The truncated text\n */\nfunction truncate(text, maxLength) {\n\tif (!text || text.length <= maxLength) {\n\t\treturn text;\n\t}\n\n\treturn `${text.slice(0, maxLength - 3)}...`;\n}\n\n/**\n * Checks if array or object are empty\n * @param {*} value - The value to check\n * @returns {boolean} True if empty, false otherwise\n */\nfunction isEmpty(value) {\n\tif (Array.isArray(value)) {\n\t\treturn value.length === 0;\n\t} else if (typeof value === 'object' && value !== null) {\n\t\treturn Object.keys(value).length === 0;\n\t}\n\n\treturn false; // Not an array or object, or is null\n}\n\n/**\n * Find cycles in a dependency graph using DFS\n * @param {string} subtaskId - Current subtask ID\n * @param {Map} dependencyMap - Map of subtask IDs to their dependencies\n * @param {Set} visited - Set of visited nodes\n * @param {Set} recursionStack - Set of nodes in current recursion stack\n * @returns {Array} - List of dependency edges that need to be removed to break cycles\n */\nfunction findCycles(\n\tsubtaskId,\n\tdependencyMap,\n\tvisited = new Set(),\n\trecursionStack = new Set(),\n\tpath = []\n) {\n\t// Mark the current node as visited and part of recursion stack\n\tvisited.add(subtaskId);\n\trecursionStack.add(subtaskId);\n\tpath.push(subtaskId);\n\n\tconst cyclesToBreak = [];\n\n\t// Get all dependencies of the current subtask\n\tconst dependencies = dependencyMap.get(subtaskId) || [];\n\n\t// For each dependency\n\tfor (const depId of dependencies) {\n\t\t// If not visited, recursively check for cycles\n\t\tif (!visited.has(depId)) {\n\t\t\tconst cycles = findCycles(depId, dependencyMap, visited, recursionStack, [\n\t\t\t\t...path\n\t\t\t]);\n\t\t\tcyclesToBreak.push(...cycles);\n\t\t}\n\t\t// If the dependency is in the recursion stack, we found a cycle\n\t\telse if (recursionStack.has(depId)) {\n\t\t\t// Find the position of the dependency in the path\n\t\t\tconst cycleStartIndex = path.indexOf(depId);\n\t\t\t// The last edge in the cycle is what we want to remove\n\t\t\tconst cycleEdges = path.slice(cycleStartIndex);\n\t\t\t// We'll remove the last edge in the cycle (the one that points back)\n\t\t\tcyclesToBreak.push(depId);\n\t\t}\n\t}\n\n\t// Remove the node from recursion stack before returning\n\trecursionStack.delete(subtaskId);\n\n\treturn cyclesToBreak;\n}\n\n/**\n * Convert a string from camelCase to kebab-case\n * @param {string} str - The string to convert\n * @returns {string} The kebab-case version of the string\n */\nconst toKebabCase = (str) => {\n\t// Special handling for common acronyms\n\tconst withReplacedAcronyms = str\n\t\t.replace(/ID/g, 'Id')\n\t\t.replace(/API/g, 'Api')\n\t\t.replace(/UI/g, 'Ui')\n\t\t.replace(/URL/g, 'Url')\n\t\t.replace(/URI/g, 'Uri')\n\t\t.replace(/JSON/g, 'Json')\n\t\t.replace(/XML/g, 'Xml')\n\t\t.replace(/HTML/g, 'Html')\n\t\t.replace(/CSS/g, 'Css');\n\n\t// Insert hyphens before capital letters and convert to lowercase\n\treturn withReplacedAcronyms\n\t\t.replace(/([A-Z])/g, '-$1')\n\t\t.toLowerCase()\n\t\t.replace(/^-/, ''); // Remove leading hyphen if present\n};\n\n/**\n * Detect camelCase flags in command arguments\n * @param {string[]} args - Command line arguments to check\n * @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted\n */\nfunction detectCamelCaseFlags(args) {\n\tconst camelCaseFlags = [];\n\tfor (const arg of args) {\n\t\tif (arg.startsWith('--')) {\n\t\t\tconst flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =\n\n\t\t\t// Skip single-word flags - they can't be camelCase\n\t\t\tif (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Check for camelCase pattern (lowercase followed by uppercase)\n\t\t\tif (/[a-z][A-Z]/.test(flagName)) {\n\t\t\t\tconst kebabVersion = toKebabCase(flagName);\n\t\t\t\tif (kebabVersion !== flagName) {\n\t\t\t\t\tcamelCaseFlags.push({\n\t\t\t\t\t\toriginal: flagName,\n\t\t\t\t\t\tkebabCase: kebabVersion\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn camelCaseFlags;\n}\n\n/**\n * Aggregates an array of telemetry objects into a single summary object.\n * @param {Array<Object>} telemetryArray - Array of telemetryData objects.\n * @param {string} overallCommandName - The name for the aggregated command.\n * @returns {Object|null} Aggregated telemetry object or null if input is empty.\n */\nfunction aggregateTelemetry(telemetryArray, overallCommandName) {\n\tif (!telemetryArray || telemetryArray.length === 0) {\n\t\treturn null;\n\t}\n\n\tconst aggregated = {\n\t\ttimestamp: new Date().toISOString(), // Use current time for aggregation time\n\t\tuserId: telemetryArray[0].userId, // Assume userId is consistent\n\t\tcommandName: overallCommandName,\n\t\tmodelUsed: 'Multiple', // Default if models vary\n\t\tproviderName: 'Multiple', // Default if providers vary\n\t\tinputTokens: 0,\n\t\toutputTokens: 0,\n\t\ttotalTokens: 0,\n\t\ttotalCost: 0,\n\t\tcurrency: telemetryArray[0].currency || 'USD' // Assume consistent currency or default\n\t};\n\n\tconst uniqueModels = new Set();\n\tconst uniqueProviders = new Set();\n\tconst uniqueCurrencies = new Set();\n\n\ttelemetryArray.forEach((item) => {\n\t\taggregated.inputTokens += item.inputTokens || 0;\n\t\taggregated.outputTokens += item.outputTokens || 0;\n\t\taggregated.totalCost += item.totalCost || 0;\n\t\tuniqueModels.add(item.modelUsed);\n\t\tuniqueProviders.add(item.providerName);\n\t\tuniqueCurrencies.add(item.currency || 'USD');\n\t});\n\n\taggregated.totalTokens = aggregated.inputTokens + aggregated.outputTokens;\n\taggregated.totalCost = parseFloat(aggregated.totalCost.toFixed(6)); // Fix precision\n\n\tif (uniqueModels.size === 1) {\n\t\taggregated.modelUsed = [...uniqueModels][0];\n\t}\n\tif (uniqueProviders.size === 1) {\n\t\taggregated.providerName = [...uniqueProviders][0];\n\t}\n\tif (uniqueCurrencies.size > 1) {\n\t\taggregated.currency = 'Multiple'; // Mark if currencies actually differ\n\t} else if (uniqueCurrencies.size === 1) {\n\t\taggregated.currency = [...uniqueCurrencies][0];\n\t}\n\n\treturn aggregated;\n}\n\n/**\n * @deprecated Use TaskMaster.getCurrentTag() instead\n * Gets the current tag from state.json or falls back to defaultTag from config\n * @param {string} projectRoot - The project root directory (required)\n * @returns {string} The current tag name\n */\nfunction getCurrentTag(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getCurrentTag');\n\t}\n\n\ttry {\n\t\t// Try to read current tag from state.json using fs directly\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\t\tif (fs.existsSync(statePath)) {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tconst stateData = JSON.parse(rawState);\n\t\t\tif (stateData && stateData.currentTag) {\n\t\t\t\treturn stateData.currentTag;\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\t// Ignore errors, fall back to default\n\t}\n\n\t// Fall back to defaultTag from config using fs directly\n\ttry {\n\t\tconst configPath = path.join(projectRoot, '.taskmaster', 'config.json');\n\t\tif (fs.existsSync(configPath)) {\n\t\t\tconst rawConfig = fs.readFileSync(configPath, 'utf8');\n\t\t\tconst configData = JSON.parse(rawConfig);\n\t\t\tif (configData && configData.global && configData.global.defaultTag) {\n\t\t\t\treturn configData.global.defaultTag;\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\t// Ignore errors, use hardcoded default\n\t}\n\n\t// Final fallback\n\treturn 'master';\n}\n\n/**\n * Resolves the tag to use based on options\n * @param {Object} options - Options object\n * @param {string} options.projectRoot - The project root directory (required)\n * @param {string} [options.tag] - Explicit tag to use\n * @returns {string} The resolved tag name\n */\nfunction resolveTag(options = {}) {\n\tconst { projectRoot, tag } = options;\n\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for resolveTag');\n\t}\n\n\t// If explicit tag provided, use it\n\tif (tag) {\n\t\treturn tag;\n\t}\n\n\t// Otherwise get current tag from state/config\n\treturn getCurrentTag(projectRoot);\n}\n\n/**\n * Gets the tasks array for a specific tag from tagged tasks.json data\n * @param {Object} data - The parsed tasks.json data (after migration)\n * @param {string} tagName - The tag name to get tasks for\n * @returns {Array} The tasks array for the specified tag, or empty array if not found\n */\nfunction getTasksForTag(data, tagName) {\n\tif (!data || !tagName) {\n\t\treturn [];\n\t}\n\n\t// Handle migrated format: { \"master\": { \"tasks\": [...] }, \"otherTag\": { \"tasks\": [...] } }\n\tif (\n\t\tdata[tagName] &&\n\t\tdata[tagName].tasks &&\n\t\tArray.isArray(data[tagName].tasks)\n\t) {\n\t\treturn data[tagName].tasks;\n\t}\n\n\treturn [];\n}\n\n/**\n * Sets the tasks array for a specific tag in the data structure\n * @param {Object} data - The tasks.json data object\n * @param {string} tagName - The tag name to set tasks for\n * @param {Array} tasks - The tasks array to set\n * @returns {Object} The updated data object\n */\nfunction setTasksForTag(data, tagName, tasks) {\n\tif (!data) {\n\t\tdata = {};\n\t}\n\n\tif (!data[tagName]) {\n\t\tdata[tagName] = {};\n\t}\n\n\tdata[tagName].tasks = tasks || [];\n\treturn data;\n}\n\n/**\n * Flatten tasks array to include subtasks as individual searchable items\n * @param {Array} tasks - Array of task objects\n * @returns {Array} Flattened array including both tasks and subtasks\n */\nfunction flattenTasksWithSubtasks(tasks) {\n\tconst flattened = [];\n\n\tfor (const task of tasks) {\n\t\t// Add the main task\n\t\tflattened.push({\n\t\t\t...task,\n\t\t\tsearchableId: task.id.toString(), // For consistent ID handling\n\t\t\tisSubtask: false\n\t\t});\n\n\t\t// Add subtasks if they exist\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tfor (const subtask of task.subtasks) {\n\t\t\t\tflattened.push({\n\t\t\t\t\t...subtask,\n\t\t\t\t\tsearchableId: `${task.id}.${subtask.id}`, // Format: \"15.2\"\n\t\t\t\t\tisSubtask: true,\n\t\t\t\t\tparentId: task.id,\n\t\t\t\t\tparentTitle: task.title,\n\t\t\t\t\t// Enhance subtask context with parent information\n\t\t\t\t\ttitle: `${subtask.title} (subtask of: ${task.title})`,\n\t\t\t\t\tdescription: `${subtask.description} [Parent: ${task.description}]`\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn flattened;\n}\n\n/**\n * Ensures the tag object has a metadata object with created/updated timestamps.\n * @param {Object} tagObj - The tag object (e.g., data['master'])\n * @param {Object} [opts] - Optional fields (e.g., description, skipUpdate)\n * @param {string} [opts.description] - Description for the tag\n * @param {boolean} [opts.skipUpdate] - If true, don't update the 'updated' timestamp\n * @returns {Object} The updated tag object (for chaining)\n */\nfunction ensureTagMetadata(tagObj, opts = {}) {\n\tif (!tagObj || typeof tagObj !== 'object') {\n\t\tthrow new Error('tagObj must be a valid object');\n\t}\n\n\tconst now = new Date().toISOString();\n\n\tif (!tagObj.metadata) {\n\t\t// Create new metadata object\n\t\ttagObj.metadata = {\n\t\t\tcreated: now,\n\t\t\tupdated: now,\n\t\t\t...(opts.description ? { description: opts.description } : {})\n\t\t};\n\t} else {\n\t\t// Ensure existing metadata has required fields\n\t\tif (!tagObj.metadata.created) {\n\t\t\ttagObj.metadata.created = now;\n\t\t}\n\n\t\t// Update timestamp unless explicitly skipped\n\t\tif (!opts.skipUpdate) {\n\t\t\ttagObj.metadata.updated = now;\n\t\t}\n\n\t\t// Add description if provided and not already present\n\t\tif (opts.description && !tagObj.metadata.description) {\n\t\t\ttagObj.metadata.description = opts.description;\n\t\t}\n\t}\n\n\treturn tagObj;\n}\n\n// Export all utility functions and configuration\nexport {\n\tLOG_LEVELS,\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tsanitizePrompt,\n\treadComplexityReport,\n\tfindTaskInComplexityReport,\n\ttaskExists,\n\tformatTaskId,\n\tfindTaskById,\n\ttruncate,\n\tisEmpty,\n\tfindCycles,\n\ttoKebabCase,\n\tdetectCamelCaseFlags,\n\tdisableSilentMode,\n\tenableSilentMode,\n\tgetTaskManager,\n\tisSilentMode,\n\taddComplexityToTask,\n\tresolveEnvVariable,\n\tfindProjectRoot,\n\tgetTagAwareFilePath,\n\tslugifyTagForFilePath,\n\taggregateTelemetry,\n\tgetCurrentTag,\n\tresolveTag,\n\tgetTasksForTag,\n\tsetTasksForTag,\n\tperformCompleteTagMigration,\n\tmigrateConfigJson,\n\tcreateStateJson,\n\tmarkMigrationForNotice,\n\tflattenTasksWithSubtasks,\n\tensureTagMetadata\n};\n"], ["/claude-task-master/scripts/modules/task-manager/update-tasks.js", "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\nimport { z } from 'zod'; // Keep Zod for post-parsing validation\n\nimport {\n\tlog as consoleLog,\n\treadJSON,\n\twriteJSON,\n\ttruncate,\n\tisSilentMode\n} from '../utils.js';\n\nimport {\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getModelConfiguration } from './models.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';\n\n// Zod schema for validating the structure of tasks AFTER parsing\nconst updatedTaskSchema = z\n\t.object({\n\t\tid: z.number().int(),\n\t\ttitle: z.string(),\n\t\tdescription: z.string(),\n\t\tstatus: z.string(),\n\t\tdependencies: z.array(z.union([z.number().int(), z.string()])),\n\t\tpriority: z.string().nullable(),\n\t\tdetails: z.string().nullable(),\n\t\ttestStrategy: z.string().nullable(),\n\t\tsubtasks: z.array(z.any()).nullable() // Keep subtasks flexible for now\n\t})\n\t.strip(); // Allow potential extra fields during parsing if needed, then validate structure\n\n// Preprocessing schema that adds defaults before validation\nconst preprocessTaskSchema = z.preprocess((task) => {\n\t// Ensure task is an object\n\tif (typeof task !== 'object' || task === null) {\n\t\treturn {};\n\t}\n\n\t// Return task with defaults for missing fields\n\treturn {\n\t\t...task,\n\t\t// Add defaults for required fields if missing\n\t\tid: task.id ?? 0,\n\t\ttitle: task.title ?? 'Untitled Task',\n\t\tdescription: task.description ?? '',\n\t\tstatus: task.status ?? 'pending',\n\t\tdependencies: Array.isArray(task.dependencies) ? task.dependencies : [],\n\t\t// Optional fields - preserve undefined/null distinction\n\t\tpriority: task.hasOwnProperty('priority') ? task.priority : null,\n\t\tdetails: task.hasOwnProperty('details') ? task.details : null,\n\t\ttestStrategy: task.hasOwnProperty('testStrategy')\n\t\t\t? task.testStrategy\n\t\t\t: null,\n\t\tsubtasks: Array.isArray(task.subtasks)\n\t\t\t? task.subtasks\n\t\t\t: task.subtasks === null\n\t\t\t\t? null\n\t\t\t\t: []\n\t};\n}, updatedTaskSchema);\n\nconst updatedTaskArraySchema = z.array(updatedTaskSchema);\nconst preprocessedTaskArraySchema = z.array(preprocessTaskSchema);\n\n/**\n * Parses an array of task objects from AI's text response.\n * @param {string} text - Response text from AI.\n * @param {number} expectedCount - Expected number of tasks.\n * @param {Function | Object} logFn - The logging function or MCP log object.\n * @param {boolean} isMCP - Flag indicating if logFn is MCP logger.\n * @returns {Array} Parsed and validated tasks array.\n * @throws {Error} If parsing or validation fails.\n */\nfunction parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\t// Check silent mode for consoleLog\n\t\t\tconsoleLog(level, ...args);\n\t\t}\n\t};\n\n\treport(\n\t\t'info',\n\t\t'Attempting to parse updated tasks array from text response...'\n\t);\n\tif (!text || text.trim() === '')\n\t\tthrow new Error('AI response text is empty.');\n\n\tlet cleanedResponse = text.trim();\n\tconst originalResponseForDebug = cleanedResponse;\n\tlet parseMethodUsed = 'raw'; // Track which method worked\n\n\t// --- NEW Step 1: Try extracting between [] first ---\n\tconst firstBracketIndex = cleanedResponse.indexOf('[');\n\tconst lastBracketIndex = cleanedResponse.lastIndexOf(']');\n\tlet potentialJsonFromArray = null;\n\n\tif (firstBracketIndex !== -1 && lastBracketIndex > firstBracketIndex) {\n\t\tpotentialJsonFromArray = cleanedResponse.substring(\n\t\t\tfirstBracketIndex,\n\t\t\tlastBracketIndex + 1\n\t\t);\n\t\t// Basic check to ensure it's not just \"[]\" or malformed\n\t\tif (potentialJsonFromArray.length <= 2) {\n\t\t\tpotentialJsonFromArray = null; // Ignore empty array\n\t\t}\n\t}\n\n\t// If [] extraction yielded something, try parsing it immediately\n\tif (potentialJsonFromArray) {\n\t\ttry {\n\t\t\tconst testParse = JSON.parse(potentialJsonFromArray);\n\t\t\t// It worked! Use this as the primary cleaned response.\n\t\t\tcleanedResponse = potentialJsonFromArray;\n\t\t\tparseMethodUsed = 'brackets';\n\t\t} catch (e) {\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t'Content between [] looked promising but failed initial parse. Proceeding to other methods.'\n\t\t\t);\n\t\t\t// Reset cleanedResponse to original if bracket parsing failed\n\t\t\tcleanedResponse = originalResponseForDebug;\n\t\t}\n\t}\n\n\t// --- Step 2: If bracket parsing didn't work or wasn't applicable, try code block extraction ---\n\tif (parseMethodUsed === 'raw') {\n\t\t// Only look for ```json blocks now\n\t\tconst codeBlockMatch = cleanedResponse.match(\n\t\t\t/```json\\s*([\\s\\S]*?)\\s*```/i // Only match ```json\n\t\t);\n\t\tif (codeBlockMatch) {\n\t\t\tcleanedResponse = codeBlockMatch[1].trim();\n\t\t\tparseMethodUsed = 'codeblock';\n\t\t\treport('info', 'Extracted JSON content from JSON Markdown code block.');\n\t\t} else {\n\t\t\treport('info', 'No JSON code block found.');\n\t\t\t// --- Step 3: If code block failed, try stripping prefixes ---\n\t\t\tconst commonPrefixes = [\n\t\t\t\t'json\\n',\n\t\t\t\t'javascript\\n', // Keep checking common prefixes just in case\n\t\t\t\t'python\\n',\n\t\t\t\t'here are the updated tasks:',\n\t\t\t\t'here is the updated json:',\n\t\t\t\t'updated tasks:',\n\t\t\t\t'updated json:',\n\t\t\t\t'response:',\n\t\t\t\t'output:'\n\t\t\t];\n\t\t\tlet prefixFound = false;\n\t\t\tfor (const prefix of commonPrefixes) {\n\t\t\t\tif (cleanedResponse.toLowerCase().startsWith(prefix)) {\n\t\t\t\t\tcleanedResponse = cleanedResponse.substring(prefix.length).trim();\n\t\t\t\t\tparseMethodUsed = 'prefix';\n\t\t\t\t\treport('info', `Stripped prefix: \"${prefix.trim()}\"`);\n\t\t\t\t\tprefixFound = true;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (!prefixFound) {\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'Response does not appear to contain [], JSON code block, or known prefix. Attempting raw parse.'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t// --- Step 4: Attempt final parse ---\n\tlet parsedTasks;\n\ttry {\n\t\tparsedTasks = JSON.parse(cleanedResponse);\n\t} catch (parseError) {\n\t\treport('error', `Failed to parse JSON array: ${parseError.message}`);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Extraction method used: ${parseMethodUsed}` // Log which method failed\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`\n\t\t);\n\t\tthrow new Error(\n\t\t\t`Failed to parse JSON response array: ${parseError.message}`\n\t\t);\n\t}\n\n\t// --- Step 5 & 6: Validate Array structure and Zod schema ---\n\tif (!Array.isArray(parsedTasks)) {\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content is not an array. Type: ${typeof parsedTasks}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content sample: ${JSON.stringify(parsedTasks).substring(0, 200)}`\n\t\t);\n\t\tthrow new Error('Parsed AI response is not a valid JSON array.');\n\t}\n\n\treport('info', `Successfully parsed ${parsedTasks.length} potential tasks.`);\n\tif (expectedCount && parsedTasks.length !== expectedCount) {\n\t\treport(\n\t\t\t'warn',\n\t\t\t`Expected ${expectedCount} tasks, but parsed ${parsedTasks.length}.`\n\t\t);\n\t}\n\n\t// Log missing fields for debugging before preprocessing\n\tlet hasWarnings = false;\n\tparsedTasks.forEach((task, index) => {\n\t\tconst missingFields = [];\n\t\tif (!task.hasOwnProperty('id')) missingFields.push('id');\n\t\tif (!task.hasOwnProperty('status')) missingFields.push('status');\n\t\tif (!task.hasOwnProperty('dependencies'))\n\t\t\tmissingFields.push('dependencies');\n\n\t\tif (missingFields.length > 0) {\n\t\t\thasWarnings = true;\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t`Task ${index} is missing fields: ${missingFields.join(', ')} - will use defaults`\n\t\t\t);\n\t\t}\n\t});\n\n\tif (hasWarnings) {\n\t\treport(\n\t\t\t'warn',\n\t\t\t'Some tasks were missing required fields. Applying defaults...'\n\t\t);\n\t}\n\n\t// Use the preprocessing schema to add defaults and validate\n\tconst preprocessResult = preprocessedTaskArraySchema.safeParse(parsedTasks);\n\n\tif (!preprocessResult.success) {\n\t\t// This should rarely happen now since preprocessing adds defaults\n\t\treport('error', 'Failed to validate task array even after preprocessing.');\n\t\tpreprocessResult.error.errors.forEach((err) => {\n\t\t\treport('error', ` - Path '${err.path.join('.')}': ${err.message}`);\n\t\t});\n\n\t\tthrow new Error(\n\t\t\t`AI response failed validation: ${preprocessResult.error.message}`\n\t\t);\n\t}\n\n\treport('info', 'Successfully validated and transformed task structure.');\n\treturn preprocessResult.data.slice(\n\t\t0,\n\t\texpectedCount || preprocessResult.data.length\n\t);\n}\n\n/**\n * Update tasks based on new context using the unified AI service.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} fromId - Task ID to start updating from\n * @param {string} prompt - Prompt with new context\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP server.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json').\n */\nasync function updateTasks(\n\ttasksPath,\n\tfromId,\n\tprompt,\n\tuseResearch = false,\n\tcontext = {},\n\toutputFormat = 'text' // Default to text for CLI\n) {\n\tconst { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;\n\t// Use mcpLog if available, otherwise use the imported consoleLog function\n\tconst logFn = mcpLog || consoleLog;\n\t// Flag to easily check which logger type we have\n\tconst isMCP = !!mcpLog;\n\n\tif (isMCP)\n\t\tlogFn.info(`updateTasks called with context: session=${!!session}`);\n\telse logFn('info', `updateTasks called`); // CLI log\n\n\ttry {\n\t\tif (isMCP) logFn.info(`Updating tasks from ID ${fromId}`);\n\t\telse\n\t\t\tlogFn(\n\t\t\t\t'info',\n\t\t\t\t`Updating tasks from ID ${fromId} with prompt: \"${prompt}\"`\n\t\t\t);\n\n\t\t// Determine project root\n\t\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\t\tif (!projectRoot) {\n\t\t\tthrow new Error('Could not determine project root directory');\n\t\t}\n\n\t\t// --- Task Loading/Filtering (Updated to pass projectRoot and tag) ---\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks)\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}`);\n\t\tconst tasksToUpdate = data.tasks.filter(\n\t\t\t(task) => task.id >= fromId && task.status !== 'done'\n\t\t);\n\t\tif (tasksToUpdate.length === 0) {\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(`No tasks to update (ID >= ${fromId} and not 'done').`);\n\t\t\telse\n\t\t\t\tlogFn('info', `No tasks to update (ID >= ${fromId} and not 'done').`);\n\t\t\tif (outputFormat === 'text') console.log(/* yellow message */);\n\t\t\treturn; // Nothing to do\n\t\t}\n\t\t// --- End Task Loading/Filtering ---\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update');\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(prompt, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst tasksToUpdateIds = tasksToUpdate.map((t) => t.id.toString());\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([...tasksToUpdateIds, ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\tlogFn(\n\t\t\t\t'warn',\n\t\t\t\t`Could not gather additional context: ${contextError.message}`\n\t\t\t);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\t// --- Display Tasks to Update (CLI Only - Unchanged) ---\n\t\tif (outputFormat === 'text') {\n\t\t\t// Show the tasks that will be updated\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Status')\n\t\t\t\t],\n\t\t\t\tcolWidths: [5, 70, 20]\n\t\t\t});\n\n\t\t\ttasksToUpdate.forEach((task) => {\n\t\t\t\ttable.push([\n\t\t\t\t\ttask.id,\n\t\t\t\t\ttruncate(task.title, 57),\n\t\t\t\t\tgetStatusWithColor(task.status)\n\t\t\t\t]);\n\t\t\t});\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating ${tasksToUpdate.length} tasks`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t})\n\t\t\t);\n\n\t\t\tconsole.log(table.toString());\n\n\t\t\t// Display a message about how completed subtasks are handled\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.cyan.bold('How Completed Subtasks Are Handled:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• Subtasks marked as \"done\" or \"completed\" will be preserved\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• New subtasks will build upon what has already been completed\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• If completed work needs revision, a new subtask will be created instead of modifying done items\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• This approach maintains a clear record of completed work and new requirements'\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\t\t// --- End Display Tasks ---\n\n\t\t// --- Build Prompts (Using PromptManager) ---\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t'update-tasks',\n\t\t\t{\n\t\t\t\ttasks: tasksToUpdate,\n\t\t\t\tupdatePrompt: prompt,\n\t\t\t\tuseResearch,\n\t\t\t\tprojectContext: gatheredContext\n\t\t\t}\n\t\t);\n\t\t// --- End Build Prompts ---\n\n\t\t// --- AI Call ---\n\t\tlet loadingIndicator = null;\n\t\tlet aiServiceResponse = null;\n\n\t\tif (!isMCP && outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator('Updating tasks with AI...\\n');\n\t\t}\n\n\t\ttry {\n\t\t\t// Determine role based on research flag\n\t\t\tconst serviceRole = useResearch ? 'research' : 'main';\n\n\t\t\t// Call the unified AI service\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\trole: serviceRole,\n\t\t\t\tsession: session,\n\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tcommandName: 'update-tasks',\n\t\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (loadingIndicator)\n\t\t\t\tstopLoadingIndicator(loadingIndicator, 'AI update complete.');\n\n\t\t\t// Use the mainResult (text) for parsing\n\t\t\tconst parsedUpdatedTasks = parseUpdatedTasksFromText(\n\t\t\t\taiServiceResponse.mainResult,\n\t\t\t\ttasksToUpdate.length,\n\t\t\t\tlogFn,\n\t\t\t\tisMCP\n\t\t\t);\n\n\t\t\t// --- Update Tasks Data (Updated writeJSON call) ---\n\t\t\tif (!Array.isArray(parsedUpdatedTasks)) {\n\t\t\t\t// Should be caught by parser, but extra check\n\t\t\t\tthrow new Error(\n\t\t\t\t\t'Parsed AI response for updated tasks was not an array.'\n\t\t\t\t);\n\t\t\t}\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(\n\t\t\t\t\t`Received ${parsedUpdatedTasks.length} updated tasks from AI.`\n\t\t\t\t);\n\t\t\telse\n\t\t\t\tlogFn(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Received ${parsedUpdatedTasks.length} updated tasks from AI.`\n\t\t\t\t);\n\t\t\t// Create a map for efficient lookup\n\t\t\tconst updatedTasksMap = new Map(\n\t\t\t\tparsedUpdatedTasks.map((task) => [task.id, task])\n\t\t\t);\n\n\t\t\tlet actualUpdateCount = 0;\n\t\t\tdata.tasks.forEach((task, index) => {\n\t\t\t\tif (updatedTasksMap.has(task.id)) {\n\t\t\t\t\t// Only update if the task was part of the set sent to AI\n\t\t\t\t\tconst updatedTask = updatedTasksMap.get(task.id);\n\t\t\t\t\t// Merge the updated task with the existing one to preserve fields like subtasks\n\t\t\t\t\tdata.tasks[index] = {\n\t\t\t\t\t\t...task, // Keep all existing fields\n\t\t\t\t\t\t...updatedTask, // Override with updated fields\n\t\t\t\t\t\t// Ensure subtasks field is preserved if not provided by AI\n\t\t\t\t\t\tsubtasks:\n\t\t\t\t\t\t\tupdatedTask.subtasks !== undefined\n\t\t\t\t\t\t\t\t? updatedTask.subtasks\n\t\t\t\t\t\t\t\t: task.subtasks\n\t\t\t\t\t};\n\t\t\t\t\tactualUpdateCount++;\n\t\t\t\t}\n\t\t\t});\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(\n\t\t\t\t\t`Applied updates to ${actualUpdateCount} tasks in the dataset.`\n\t\t\t\t);\n\t\t\telse\n\t\t\t\tlogFn(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Applied updates to ${actualUpdateCount} tasks in the dataset.`\n\t\t\t\t);\n\n\t\t\t// Fix: Pass projectRoot and currentTag to writeJSON\n\t\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(\n\t\t\t\t\t`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`\n\t\t\t\t);\n\t\t\telse\n\t\t\t\tlogFn(\n\t\t\t\t\t'success',\n\t\t\t\t\t`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`\n\t\t\t\t);\n\t\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\n\t\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tupdatedTasks: parsedUpdatedTasks,\n\t\t\t\ttelemetryData: aiServiceResponse.telemetryData,\n\t\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\tif (isMCP) logFn.error(`Error during AI service call: ${error.message}`);\n\t\t\telse logFn('error', `Error during AI service call: ${error.message}`);\n\t\t\tif (error.message.includes('API key')) {\n\t\t\t\tif (isMCP)\n\t\t\t\t\tlogFn.error(\n\t\t\t\t\t\t'Please ensure API keys are configured correctly in .env or mcp.json.'\n\t\t\t\t\t);\n\t\t\t\telse\n\t\t\t\t\tlogFn(\n\t\t\t\t\t\t'error',\n\t\t\t\t\t\t'Please ensure API keys are configured correctly in .env or mcp.json.'\n\t\t\t\t\t);\n\t\t\t}\n\t\t\tthrow error;\n\t\t} finally {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t}\n\t} catch (error) {\n\t\t// --- General Error Handling (Unchanged) ---\n\t\tif (isMCP) logFn.error(`Error updating tasks: ${error.message}`);\n\t\telse logFn('error', `Error updating tasks: ${error.message}`);\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\tif (getDebugFlag(session)) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error; // Re-throw for MCP/programmatic callers\n\t\t}\n\t\t// --- End General Error Handling ---\n\t}\n}\n\nexport default updateTasks;\n"], ["/claude-task-master/scripts/modules/task-manager/parse-prd.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport { z } from 'zod';\n\nimport {\n\tlog,\n\twriteJSON,\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode,\n\treadJSON,\n\tfindTaskById,\n\tensureTagMetadata,\n\tgetCurrentTag\n} from '../utils.js';\n\nimport { generateObjectService } from '../ai-services-unified.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport { displayAiUsageSummary } from '../ui.js';\n\n// Define the Zod schema for a SINGLE task object\nconst prdSingleTaskSchema = z.object({\n\tid: z.number().int().positive(),\n\ttitle: z.string().min(1),\n\tdescription: z.string().min(1),\n\tdetails: z.string().nullable(),\n\ttestStrategy: z.string().nullable(),\n\tpriority: z.enum(['high', 'medium', 'low']).nullable(),\n\tdependencies: z.array(z.number().int().positive()).nullable(),\n\tstatus: z.string().nullable()\n});\n\n// Define the Zod schema for the ENTIRE expected AI response object\nconst prdResponseSchema = z.object({\n\ttasks: z.array(prdSingleTaskSchema),\n\tmetadata: z.object({\n\t\tprojectName: z.string(),\n\t\ttotalTasks: z.number(),\n\t\tsourceFile: z.string(),\n\t\tgeneratedAt: z.string()\n\t})\n});\n\n/**\n * Parse a PRD file and generate tasks\n * @param {string} prdPath - Path to the PRD file\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} numTasks - Number of tasks to generate\n * @param {Object} options - Additional options\n * @param {boolean} [options.force=false] - Whether to overwrite existing tasks.json.\n * @param {boolean} [options.append=false] - Append to existing tasks file.\n * @param {boolean} [options.research=false] - Use research model for enhanced PRD analysis.\n * @param {Object} [options.reportProgress] - Function to report progress (optional, likely unused).\n * @param {Object} [options.mcpLog] - MCP logger object (optional).\n * @param {Object} [options.session] - Session object from MCP server (optional).\n * @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).\n * @param {string} [options.tag] - Target tag for task generation.\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json').\n */\nasync function parsePRD(prdPath, tasksPath, numTasks, options = {}) {\n\tconst {\n\t\treportProgress,\n\t\tmcpLog,\n\t\tsession,\n\t\tprojectRoot,\n\t\tforce = false,\n\t\tappend = false,\n\t\tresearch = false,\n\t\ttag\n\t} = options;\n\tconst isMCP = !!mcpLog;\n\tconst outputFormat = isMCP ? 'json' : 'text';\n\n\t// Use the provided tag, or the current active tag, or default to 'master'\n\tconst targetTag = tag;\n\n\tconst logFn = mcpLog\n\t\t? mcpLog\n\t\t: {\n\t\t\t\t// Wrapper for CLI\n\t\t\t\tinfo: (...args) => log('info', ...args),\n\t\t\t\twarn: (...args) => log('warn', ...args),\n\t\t\t\terror: (...args) => log('error', ...args),\n\t\t\t\tdebug: (...args) => log('debug', ...args),\n\t\t\t\tsuccess: (...args) => log('success', ...args)\n\t\t\t};\n\n\t// Create custom reporter using logFn\n\tconst report = (message, level = 'info') => {\n\t\t// Check logFn directly\n\t\tif (logFn && typeof logFn[level] === 'function') {\n\t\t\tlogFn[level](message);\n\t\t} else if (!isSilentMode() && outputFormat === 'text') {\n\t\t\t// Fallback to original log only if necessary and in CLI text mode\n\t\t\tlog(level, message);\n\t\t}\n\t};\n\n\treport(\n\t\t`Parsing PRD file: ${prdPath}, Force: ${force}, Append: ${append}, Research: ${research}`\n\t);\n\n\tlet existingTasks = [];\n\tlet nextId = 1;\n\tlet aiServiceResponse = null;\n\n\ttry {\n\t\t// Check if there are existing tasks in the target tag\n\t\tlet hasExistingTasksInTag = false;\n\t\tif (fs.existsSync(tasksPath)) {\n\t\t\ttry {\n\t\t\t\t// Read the entire file to check if the tag exists\n\t\t\t\tconst existingFileContent = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\tconst allData = JSON.parse(existingFileContent);\n\n\t\t\t\t// Check if the target tag exists and has tasks\n\t\t\t\tif (\n\t\t\t\t\tallData[targetTag] &&\n\t\t\t\t\tArray.isArray(allData[targetTag].tasks) &&\n\t\t\t\t\tallData[targetTag].tasks.length > 0\n\t\t\t\t) {\n\t\t\t\t\thasExistingTasksInTag = true;\n\t\t\t\t\texistingTasks = allData[targetTag].tasks;\n\t\t\t\t\tnextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// If we can't read the file or parse it, assume no existing tasks in this tag\n\t\t\t\thasExistingTasksInTag = false;\n\t\t\t}\n\t\t}\n\n\t\t// Handle file existence and overwrite/append logic based on target tag\n\t\tif (hasExistingTasksInTag) {\n\t\t\tif (append) {\n\t\t\t\treport(\n\t\t\t\t\t`Append mode enabled. Found ${existingTasks.length} existing tasks in tag '${targetTag}'. Next ID will be ${nextId}.`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t} else if (!force) {\n\t\t\t\t// Not appending and not forcing overwrite, and there are existing tasks in the target tag\n\t\t\t\tconst overwriteError = new Error(\n\t\t\t\t\t`Tag '${targetTag}' already contains ${existingTasks.length} tasks. Use --force to overwrite or --append to add to existing tasks.`\n\t\t\t\t);\n\t\t\t\treport(overwriteError.message, 'error');\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.error(chalk.red(overwriteError.message));\n\t\t\t\t}\n\t\t\t\tthrow overwriteError;\n\t\t\t} else {\n\t\t\t\t// Force overwrite is true\n\t\t\t\treport(\n\t\t\t\t\t`Force flag enabled. Overwriting existing tasks in tag '${targetTag}'.`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\t// No existing tasks in target tag, proceed without confirmation\n\t\t\treport(\n\t\t\t\t`Tag '${targetTag}' is empty or doesn't exist. Creating/updating tag with new tasks.`,\n\t\t\t\t'info'\n\t\t\t);\n\t\t}\n\n\t\treport(`Reading PRD content from ${prdPath}`, 'info');\n\t\tconst prdContent = fs.readFileSync(prdPath, 'utf8');\n\t\tif (!prdContent) {\n\t\t\tthrow new Error(`Input file ${prdPath} is empty or could not be read.`);\n\t\t}\n\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\t// Get defaultTaskPriority from config\n\t\tconst { getDefaultPriority } = await import('../config-manager.js');\n\t\tconst defaultTaskPriority = getDefaultPriority(projectRoot) || 'medium';\n\n\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t'parse-prd',\n\t\t\t{\n\t\t\t\tresearch,\n\t\t\t\tnumTasks,\n\t\t\t\tnextId,\n\t\t\t\tprdContent,\n\t\t\t\tprdPath,\n\t\t\t\tdefaultTaskPriority\n\t\t\t}\n\t\t);\n\n\t\t// Call the unified AI service\n\t\treport(\n\t\t\t`Calling AI service to generate tasks from PRD${research ? ' with research-backed analysis' : ''}...`,\n\t\t\t'info'\n\t\t);\n\n\t\t// Call generateObjectService with the CORRECT schema and additional telemetry params\n\t\taiServiceResponse = await generateObjectService({\n\t\t\trole: research ? 'research' : 'main', // Use research role if flag is set\n\t\t\tsession: session,\n\t\t\tprojectRoot: projectRoot,\n\t\t\tschema: prdResponseSchema,\n\t\t\tobjectName: 'tasks_data',\n\t\t\tsystemPrompt: systemPrompt,\n\t\t\tprompt: userPrompt,\n\t\t\tcommandName: 'parse-prd',\n\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t});\n\n\t\t// Create the directory if it doesn't exist\n\t\tconst tasksDir = path.dirname(tasksPath);\n\t\tif (!fs.existsSync(tasksDir)) {\n\t\t\tfs.mkdirSync(tasksDir, { recursive: true });\n\t\t}\n\t\tlogFn.success(\n\t\t\t`Successfully parsed PRD via AI service${research ? ' with research-backed analysis' : ''}.`\n\t\t);\n\n\t\t// Validate and Process Tasks\n\t\t// const generatedData = aiServiceResponse?.mainResult?.object;\n\n\t\t// Robustly get the actual AI-generated object\n\t\tlet generatedData = null;\n\t\tif (aiServiceResponse?.mainResult) {\n\t\t\tif (\n\t\t\t\ttypeof aiServiceResponse.mainResult === 'object' &&\n\t\t\t\taiServiceResponse.mainResult !== null &&\n\t\t\t\t'tasks' in aiServiceResponse.mainResult\n\t\t\t) {\n\t\t\t\t// If mainResult itself is the object with a 'tasks' property\n\t\t\t\tgeneratedData = aiServiceResponse.mainResult;\n\t\t\t} else if (\n\t\t\t\ttypeof aiServiceResponse.mainResult.object === 'object' &&\n\t\t\t\taiServiceResponse.mainResult.object !== null &&\n\t\t\t\t'tasks' in aiServiceResponse.mainResult.object\n\t\t\t) {\n\t\t\t\t// If mainResult.object is the object with a 'tasks' property\n\t\t\t\tgeneratedData = aiServiceResponse.mainResult.object;\n\t\t\t}\n\t\t}\n\n\t\tif (!generatedData || !Array.isArray(generatedData.tasks)) {\n\t\t\tlogFn.error(\n\t\t\t\t`Internal Error: generateObjectService returned unexpected data structure: ${JSON.stringify(generatedData)}`\n\t\t\t);\n\t\t\tthrow new Error(\n\t\t\t\t'AI service returned unexpected data structure after validation.'\n\t\t\t);\n\t\t}\n\n\t\tlet currentId = nextId;\n\t\tconst taskMap = new Map();\n\t\tconst processedNewTasks = generatedData.tasks.map((task) => {\n\t\t\tconst newId = currentId++;\n\t\t\ttaskMap.set(task.id, newId);\n\t\t\treturn {\n\t\t\t\t...task,\n\t\t\t\tid: newId,\n\t\t\t\tstatus: 'pending',\n\t\t\t\tpriority: task.priority || 'medium',\n\t\t\t\tdependencies: Array.isArray(task.dependencies) ? task.dependencies : [],\n\t\t\t\tsubtasks: []\n\t\t\t};\n\t\t});\n\n\t\t// Remap dependencies for the NEWLY processed tasks\n\t\tprocessedNewTasks.forEach((task) => {\n\t\t\ttask.dependencies = task.dependencies\n\t\t\t\t.map((depId) => taskMap.get(depId)) // Map old AI ID to new sequential ID\n\t\t\t\t.filter(\n\t\t\t\t\t(newDepId) =>\n\t\t\t\t\t\tnewDepId != null && // Must exist\n\t\t\t\t\t\tnewDepId < task.id && // Must be a lower ID (could be existing or newly generated)\n\t\t\t\t\t\t(findTaskById(existingTasks, newDepId) || // Check if it exists in old tasks OR\n\t\t\t\t\t\t\tprocessedNewTasks.some((t) => t.id === newDepId)) // check if it exists in new tasks\n\t\t\t\t);\n\t\t});\n\n\t\tconst finalTasks = append\n\t\t\t? [...existingTasks, ...processedNewTasks]\n\t\t\t: processedNewTasks;\n\n\t\t// Read the existing file to preserve other tags\n\t\tlet outputData = {};\n\t\tif (fs.existsSync(tasksPath)) {\n\t\t\ttry {\n\t\t\t\tconst existingFileContent = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\toutputData = JSON.parse(existingFileContent);\n\t\t\t} catch (error) {\n\t\t\t\t// If we can't read the existing file, start with empty object\n\t\t\t\toutputData = {};\n\t\t\t}\n\t\t}\n\n\t\t// Update only the target tag, preserving other tags\n\t\toutputData[targetTag] = {\n\t\t\ttasks: finalTasks,\n\t\t\tmetadata: {\n\t\t\t\tcreated:\n\t\t\t\t\toutputData[targetTag]?.metadata?.created || new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription: `Tasks for ${targetTag} context`\n\t\t\t}\n\t\t};\n\n\t\t// Ensure the target tag has proper metadata\n\t\tensureTagMetadata(outputData[targetTag], {\n\t\t\tdescription: `Tasks for ${targetTag} context`\n\t\t});\n\n\t\t// Write the complete data structure back to the file\n\t\tfs.writeFileSync(tasksPath, JSON.stringify(outputData, null, 2));\n\t\treport(\n\t\t\t`Successfully ${append ? 'appended' : 'generated'} ${processedNewTasks.length} tasks in ${tasksPath}${research ? ' with research-backed analysis' : ''}`,\n\t\t\t'success'\n\t\t);\n\n\t\t// Generate markdown task files after writing tasks.json\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });\n\n\t\t// Handle CLI output (e.g., success message)\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Successfully generated ${processedNewTasks.length} new tasks${research ? ' with research-backed analysis' : ''}. Total tasks in ${tasksPath}: ${finalTasks.length}`\n\t\t\t\t\t),\n\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\\n` +\n\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tif (aiServiceResponse && aiServiceResponse.telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t}\n\t\t}\n\n\t\t// Return telemetry data\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\ttasksPath,\n\t\t\ttelemetryData: aiServiceResponse?.telemetryData,\n\t\t\ttagInfo: aiServiceResponse?.tagInfo\n\t\t};\n\t} catch (error) {\n\t\treport(`Error parsing PRD: ${error.message}`, 'error');\n\n\t\t// Only show error UI for text output (CLI)\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\tif (getDebugFlag(projectRoot)) {\n\t\t\t\t// Use projectRoot for debug flag check\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t}\n\n\t\tthrow error; // Always re-throw for proper error handling\n\t}\n}\n\nexport default parsePRD;\n"], ["/claude-task-master/scripts/modules/ui.js", "/**\n * ui.js\n * User interface functions for the Task Master CLI\n */\n\nimport chalk from 'chalk';\nimport figlet from 'figlet';\nimport boxen from 'boxen';\nimport ora from 'ora';\nimport Table from 'cli-table3';\nimport gradient from 'gradient-string';\nimport readline from 'readline';\nimport {\n\tlog,\n\tfindTaskById,\n\treadJSON,\n\ttruncate,\n\tisSilentMode\n} from './utils.js';\nimport fs from 'fs';\nimport {\n\tfindNextTask,\n\tanalyzeTaskComplexity,\n\treadComplexityReport\n} from './task-manager.js';\nimport { getProjectName, getDefaultSubtasks } from './config-manager.js';\nimport { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';\nimport {\n\tTASKMASTER_CONFIG_FILE,\n\tTASKMASTER_TASKS_FILE\n} from '../../src/constants/paths.js';\nimport { getTaskMasterVersion } from '../../src/utils/getVersion.js';\n\n// Create a color gradient for the banner\nconst coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);\nconst warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']);\n\n/**\n * Display FYI notice about tagged task lists (only if migration occurred)\n * @param {Object} data - Data object that may contain _migrationHappened flag\n */\nfunction displayTaggedTasksFYI(data) {\n\tif (isSilentMode() || !data || !data._migrationHappened) return;\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('FYI: ') +\n\t\t\t\tchalk.gray('Taskmaster now supports separate task lists per tag. ') +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t'Use the --tag flag to create/read/update/filter tasks by tag.'\n\t\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 2, right: 2 },\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Display a small, non-intrusive indicator showing the current tag context\n * @param {string} tagName - The tag name to display\n * @param {Object} options - Display options\n * @param {boolean} [options.skipIfMaster=false] - Don't show indicator if tag is 'master'\n * @param {boolean} [options.dim=false] - Use dimmed styling\n */\nfunction displayCurrentTagIndicator(tag, options = {}) {\n\tif (isSilentMode()) return;\n\n\tconst { skipIfMaster = false, dim = false } = options;\n\n\t// Skip display for master tag only if explicitly requested\n\tif (skipIfMaster && tag === 'master') return;\n\n\t// Create a small, tasteful tag indicator\n\tconst tagIcon = '🏷️';\n\tconst tagText = dim\n\t\t? chalk.gray(`${tagIcon} tag: ${tag}`)\n\t\t: chalk.dim(`${tagIcon} tag: `) + chalk.cyan(tag);\n\n\tconsole.log(tagText);\n}\n\n/**\n * Display a fancy banner for the CLI\n */\nfunction displayBanner() {\n\tif (isSilentMode()) return;\n\n\t// console.clear(); // Removing this to avoid clearing the terminal per command\n\tconst bannerText = figlet.textSync('Task Master', {\n\t\tfont: 'Standard',\n\t\thorizontalLayout: 'default',\n\t\tverticalLayout: 'default'\n\t});\n\n\tconsole.log(coolGradient(bannerText));\n\n\t// Add creator credit line below the banner\n\tconsole.log(\n\t\tchalk.dim('by ') + chalk.cyan.underline('https://x.com/eyaltoledano')\n\t);\n\n\t// Read version directly from package.json\n\tconst version = getTaskMasterVersion();\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white(\n\t\t\t\t`${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${getProjectName(null)}`\n\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tmargin: { top: 0, bottom: 1 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'cyan'\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Start a loading indicator with an animated spinner\n * @param {string} message - Message to display next to the spinner\n * @returns {Object} Spinner object\n */\nfunction startLoadingIndicator(message) {\n\tif (isSilentMode()) return null;\n\n\tconst spinner = ora({\n\t\ttext: message,\n\t\tcolor: 'cyan'\n\t}).start();\n\n\treturn spinner;\n}\n\n/**\n * Stop a loading indicator (basic stop, no success/fail indicator)\n * @param {Object} spinner - Spinner object to stop\n */\nfunction stopLoadingIndicator(spinner) {\n\tif (spinner && typeof spinner.stop === 'function') {\n\t\tspinner.stop();\n\t}\n}\n\n/**\n * Complete a loading indicator with success (shows checkmark)\n * @param {Object} spinner - Spinner object to complete\n * @param {string} message - Optional success message (defaults to current text)\n */\nfunction succeedLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.succeed === 'function') {\n\t\tif (message) {\n\t\t\tspinner.succeed(message);\n\t\t} else {\n\t\t\tspinner.succeed();\n\t\t}\n\t}\n}\n\n/**\n * Complete a loading indicator with failure (shows X)\n * @param {Object} spinner - Spinner object to fail\n * @param {string} message - Optional failure message (defaults to current text)\n */\nfunction failLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.fail === 'function') {\n\t\tif (message) {\n\t\t\tspinner.fail(message);\n\t\t} else {\n\t\t\tspinner.fail();\n\t\t}\n\t}\n}\n\n/**\n * Complete a loading indicator with warning (shows warning symbol)\n * @param {Object} spinner - Spinner object to warn\n * @param {string} message - Optional warning message (defaults to current text)\n */\nfunction warnLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.warn === 'function') {\n\t\tif (message) {\n\t\t\tspinner.warn(message);\n\t\t} else {\n\t\t\tspinner.warn();\n\t\t}\n\t}\n}\n\n/**\n * Complete a loading indicator with info (shows info symbol)\n * @param {Object} spinner - Spinner object to complete with info\n * @param {string} message - Optional info message (defaults to current text)\n */\nfunction infoLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.info === 'function') {\n\t\tif (message) {\n\t\t\tspinner.info(message);\n\t\t} else {\n\t\t\tspinner.info();\n\t\t}\n\t}\n}\n\n/**\n * Create a colored progress bar\n * @param {number} percent - The completion percentage\n * @param {number} length - The total length of the progress bar in characters\n * @param {Object} statusBreakdown - Optional breakdown of non-complete statuses (e.g., {pending: 20, 'in-progress': 10})\n * @returns {string} The formatted progress bar\n */\nfunction createProgressBar(percent, length = 30, statusBreakdown = null) {\n\t// Adjust the percent to treat deferred and cancelled as complete\n\tconst effectivePercent = statusBreakdown\n\t\t? Math.min(\n\t\t\t\t100,\n\t\t\t\tpercent +\n\t\t\t\t\t(statusBreakdown.deferred || 0) +\n\t\t\t\t\t(statusBreakdown.cancelled || 0)\n\t\t\t)\n\t\t: percent;\n\n\t// Calculate how many characters to fill for \"true completion\"\n\tconst trueCompletedFilled = Math.round((percent * length) / 100);\n\n\t// Calculate how many characters to fill for \"effective completion\" (including deferred/cancelled)\n\tconst effectiveCompletedFilled = Math.round(\n\t\t(effectivePercent * length) / 100\n\t);\n\n\t// The \"deferred/cancelled\" section (difference between true and effective)\n\tconst deferredCancelledFilled =\n\t\teffectiveCompletedFilled - trueCompletedFilled;\n\n\t// Set the empty section (remaining after effective completion)\n\tconst empty = length - effectiveCompletedFilled;\n\n\t// Determine color based on percentage for the completed section\n\tlet completedColor;\n\tif (percent < 25) {\n\t\tcompletedColor = chalk.red;\n\t} else if (percent < 50) {\n\t\tcompletedColor = chalk.hex('#FFA500'); // Orange\n\t} else if (percent < 75) {\n\t\tcompletedColor = chalk.yellow;\n\t} else if (percent < 100) {\n\t\tcompletedColor = chalk.green;\n\t} else {\n\t\tcompletedColor = chalk.hex('#006400'); // Dark green\n\t}\n\n\t// Create colored sections\n\tconst completedSection = completedColor('█'.repeat(trueCompletedFilled));\n\n\t// Gray section for deferred/cancelled items\n\tconst deferredCancelledSection = chalk.gray(\n\t\t'█'.repeat(deferredCancelledFilled)\n\t);\n\n\t// If we have a status breakdown, create a multi-colored remaining section\n\tlet remainingSection = '';\n\n\tif (statusBreakdown && empty > 0) {\n\t\t// Status colors (matching the statusConfig colors in getStatusWithColor)\n\t\tconst statusColors = {\n\t\t\tpending: chalk.yellow,\n\t\t\t'in-progress': chalk.hex('#FFA500'), // Orange\n\t\t\tblocked: chalk.red,\n\t\t\treview: chalk.magenta\n\t\t\t// Deferred and cancelled are treated as part of the completed section\n\t\t};\n\n\t\t// Calculate proportions for each status\n\t\tconst totalRemaining = Object.entries(statusBreakdown)\n\t\t\t.filter(\n\t\t\t\t([status]) =>\n\t\t\t\t\t!['deferred', 'cancelled', 'done', 'completed'].includes(status)\n\t\t\t)\n\t\t\t.reduce((sum, [_, val]) => sum + val, 0);\n\n\t\t// If no remaining tasks with tracked statuses, just use gray\n\t\tif (totalRemaining <= 0) {\n\t\t\tremainingSection = chalk.gray('░'.repeat(empty));\n\t\t} else {\n\t\t\t// Track how many characters we've added\n\t\t\tlet addedChars = 0;\n\n\t\t\t// Add each status section proportionally\n\t\t\tfor (const [status, percentage] of Object.entries(statusBreakdown)) {\n\t\t\t\t// Skip statuses that are considered complete\n\t\t\t\tif (['deferred', 'cancelled', 'done', 'completed'].includes(status))\n\t\t\t\t\tcontinue;\n\n\t\t\t\t// Calculate how many characters this status should fill\n\t\t\t\tconst statusChars = Math.round((percentage / totalRemaining) * empty);\n\n\t\t\t\t// Make sure we don't exceed the total length due to rounding\n\t\t\t\tconst actualChars = Math.min(statusChars, empty - addedChars);\n\n\t\t\t\t// Add colored section for this status\n\t\t\t\tconst colorFn = statusColors[status] || chalk.gray;\n\t\t\t\tremainingSection += colorFn('░'.repeat(actualChars));\n\n\t\t\t\taddedChars += actualChars;\n\t\t\t}\n\n\t\t\t// If we have any remaining space due to rounding, fill with gray\n\t\t\tif (addedChars < empty) {\n\t\t\t\tremainingSection += chalk.gray('░'.repeat(empty - addedChars));\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Default to gray for the empty section if no breakdown provided\n\t\tremainingSection = chalk.gray('░'.repeat(empty));\n\t}\n\n\t// Effective percentage text color should reflect the highest category\n\tconst percentTextColor =\n\t\tpercent === 100\n\t\t\t? chalk.hex('#006400') // Dark green for 100%\n\t\t\t: effectivePercent === 100\n\t\t\t\t? chalk.gray // Gray for 100% with deferred/cancelled\n\t\t\t\t: completedColor; // Otherwise match the completed color\n\n\t// Build the complete progress bar\n\treturn `${completedSection}${deferredCancelledSection}${remainingSection} ${percentTextColor(`${effectivePercent.toFixed(0)}%`)}`;\n}\n\n/**\n * Get a colored status string based on the status value\n * @param {string} status - Task status (e.g., \"done\", \"pending\", \"in-progress\")\n * @param {boolean} forTable - Whether the status is being displayed in a table\n * @returns {string} Colored status string\n */\nfunction getStatusWithColor(status, forTable = false) {\n\tif (!status) {\n\t\treturn chalk.gray('❓ unknown');\n\t}\n\n\tconst statusConfig = {\n\t\tdone: { color: chalk.green, icon: '✓', tableIcon: '✓' },\n\t\tcompleted: { color: chalk.green, icon: '✓', tableIcon: '✓' },\n\t\tpending: { color: chalk.yellow, icon: '○', tableIcon: '⏱' },\n\t\t'in-progress': { color: chalk.hex('#FFA500'), icon: '🔄', tableIcon: '►' },\n\t\tdeferred: { color: chalk.gray, icon: 'x', tableIcon: '⏱' },\n\t\tblocked: { color: chalk.red, icon: '!', tableIcon: '✗' },\n\t\treview: { color: chalk.magenta, icon: '?', tableIcon: '?' },\n\t\tcancelled: { color: chalk.gray, icon: '❌', tableIcon: 'x' }\n\t};\n\n\tconst config = statusConfig[status.toLowerCase()] || {\n\t\tcolor: chalk.red,\n\t\ticon: '❌',\n\t\ttableIcon: '✗'\n\t};\n\n\t// Use simpler icons for table display to prevent border issues\n\tif (forTable) {\n\t\t// Use ASCII characters instead of Unicode for completely stable display\n\t\tconst simpleIcons = {\n\t\t\tdone: '✓',\n\t\t\tcompleted: '✓',\n\t\t\tpending: '○',\n\t\t\t'in-progress': '►',\n\t\t\tdeferred: 'x',\n\t\t\tblocked: '!', // Using plain x character for better compatibility\n\t\t\treview: '?' // Using circled dot symbol\n\t\t};\n\t\tconst simpleIcon = simpleIcons[status.toLowerCase()] || 'x';\n\t\treturn config.color(`${simpleIcon} ${status}`);\n\t}\n\n\treturn config.color(`${config.icon} ${status}`);\n}\n\n/**\n * Format dependencies list with status indicators\n * @param {Array} dependencies - Array of dependency IDs\n * @param {Array} allTasks - Array of all tasks\n * @param {boolean} forConsole - Whether the output is for console display\n * @param {Object|null} complexityReport - Optional pre-loaded complexity report\n * @returns {string} Formatted dependencies string\n */\nfunction formatDependenciesWithStatus(\n\tdependencies,\n\tallTasks,\n\tforConsole = false,\n\tcomplexityReport = null // Add complexityReport parameter\n) {\n\tif (\n\t\t!dependencies ||\n\t\t!Array.isArray(dependencies) ||\n\t\tdependencies.length === 0\n\t) {\n\t\treturn forConsole ? chalk.gray('None') : 'None';\n\t}\n\n\tconst formattedDeps = dependencies.map((depId) => {\n\t\tconst depIdStr = depId.toString(); // Ensure string format for display\n\n\t\t// Check if it's already a fully qualified subtask ID (like \"22.1\")\n\t\tif (depIdStr.includes('.')) {\n\t\t\tconst [parentId, subtaskId] = depIdStr\n\t\t\t\t.split('.')\n\t\t\t\t.map((id) => parseInt(id, 10));\n\n\t\t\t// Find the parent task\n\t\t\tconst parentTask = allTasks.find((t) => t.id === parentId);\n\t\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\t\treturn forConsole\n\t\t\t\t\t? chalk.red(`${depIdStr} (Not found)`)\n\t\t\t\t\t: `${depIdStr} (Not found)`;\n\t\t\t}\n\n\t\t\t// Find the subtask\n\t\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\t\tif (!subtask) {\n\t\t\t\treturn forConsole\n\t\t\t\t\t? chalk.red(`${depIdStr} (Not found)`)\n\t\t\t\t\t: `${depIdStr} (Not found)`;\n\t\t\t}\n\n\t\t\t// Format with status\n\t\t\tconst status = subtask.status || 'pending';\n\t\t\tconst isDone =\n\t\t\t\tstatus.toLowerCase() === 'done' || status.toLowerCase() === 'completed';\n\t\t\tconst isInProgress = status.toLowerCase() === 'in-progress';\n\n\t\t\tif (forConsole) {\n\t\t\t\tif (isDone) {\n\t\t\t\t\treturn chalk.green.bold(depIdStr);\n\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\treturn chalk.hex('#FFA500').bold(depIdStr);\n\t\t\t\t} else {\n\t\t\t\t\treturn chalk.red.bold(depIdStr);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// For plain text output (task files), return just the ID without any formatting or emoji\n\t\t\treturn depIdStr;\n\t\t}\n\n\t\t// If depId is a number less than 100, it's likely a reference to a subtask ID in the current task\n\t\t// This case is typically handled elsewhere (in task-specific code) before calling this function\n\n\t\t// For regular task dependencies (not subtasks)\n\t\t// Convert string depId to number if needed\n\t\tconst numericDepId =\n\t\t\ttypeof depId === 'string' ? parseInt(depId, 10) : depId;\n\n\t\t// Look up the task using the numeric ID\n\t\tconst depTaskResult = findTaskById(\n\t\t\tallTasks,\n\t\t\tnumericDepId,\n\t\t\tcomplexityReport\n\t\t);\n\t\tconst depTask = depTaskResult.task; // Access the task object from the result\n\n\t\tif (!depTask) {\n\t\t\treturn forConsole\n\t\t\t\t? chalk.red(`${depIdStr} (Not found)`)\n\t\t\t\t: `${depIdStr} (Not found)`;\n\t\t}\n\n\t\t// Format with status\n\t\tconst status = depTask.status || 'pending';\n\t\tconst isDone =\n\t\t\tstatus.toLowerCase() === 'done' || status.toLowerCase() === 'completed';\n\t\tconst isInProgress = status.toLowerCase() === 'in-progress';\n\n\t\tif (forConsole) {\n\t\t\tif (isDone) {\n\t\t\t\treturn chalk.green.bold(depIdStr);\n\t\t\t} else if (isInProgress) {\n\t\t\t\treturn chalk.yellow.bold(depIdStr);\n\t\t\t} else {\n\t\t\t\treturn chalk.red.bold(depIdStr);\n\t\t\t}\n\t\t}\n\n\t\t// For plain text output (task files), return just the ID without any formatting or emoji\n\t\treturn depIdStr;\n\t});\n\n\treturn formattedDeps.join(', ');\n}\n\n/**\n * Display a comprehensive help guide\n */\nfunction displayHelp() {\n\t// Get terminal width - moved to top of function to make it available throughout\n\tconst terminalWidth = process.stdout.columns || 100; // Default to 100 if can't detect\n\n\tconsole.log(\n\t\tboxen(chalk.white.bold('Task Master CLI'), {\n\t\t\tpadding: 1,\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t})\n\t);\n\n\t// Command categories\n\tconst commandCategories = [\n\t\t{\n\t\t\ttitle: 'Project Setup & Configuration',\n\t\t\tcolor: 'blue',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'init',\n\t\t\t\t\targs: '[--name=<name>] [--description=<desc>] [-y]',\n\t\t\t\t\tdesc: 'Initialize a new project with Task Master structure'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'View current AI model configuration and available models'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --setup',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Run interactive setup to configure AI models'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --set-main',\n\t\t\t\t\targs: '<model_id>',\n\t\t\t\t\tdesc: 'Set the primary model for task generation'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --set-research',\n\t\t\t\t\targs: '<model_id>',\n\t\t\t\t\tdesc: 'Set the model for research operations'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --set-fallback',\n\t\t\t\t\targs: '<model_id>',\n\t\t\t\t\tdesc: 'Set the fallback model (optional)'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Generation',\n\t\t\tcolor: 'cyan',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'parse-prd',\n\t\t\t\t\targs: '--input=<file.txt> [--num-tasks=10]',\n\t\t\t\t\tdesc: 'Generate tasks from a PRD document'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'generate',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Create individual task files from tasks.json'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Management',\n\t\t\tcolor: 'green',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'list',\n\t\t\t\t\targs: '[--status=<status>] [--with-subtasks]',\n\t\t\t\t\tdesc: 'List all tasks with their status'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'set-status',\n\t\t\t\t\targs: '--id=<id> --status=<status>',\n\t\t\t\t\tdesc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'sync-readme',\n\t\t\t\t\targs: '[--with-subtasks] [--status=<status>]',\n\t\t\t\t\tdesc: 'Export tasks to README.md with professional formatting'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'update',\n\t\t\t\t\targs: '--from=<id> --prompt=\"<context>\"',\n\t\t\t\t\tdesc: 'Update multiple tasks based on new requirements'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'update-task',\n\t\t\t\t\targs: '--id=<id> --prompt=\"<context>\"',\n\t\t\t\t\tdesc: 'Update a single specific task with new information'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'update-subtask',\n\t\t\t\t\targs: '--id=<parentId.subtaskId> --prompt=\"<context>\"',\n\t\t\t\t\tdesc: 'Append additional information to a subtask'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'add-task',\n\t\t\t\t\targs: '--prompt=\"<text>\" [--dependencies=<ids>] [--priority=<priority>]',\n\t\t\t\t\tdesc: 'Add a new task using AI'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'remove-task',\n\t\t\t\t\targs: '--id=<id> [-y]',\n\t\t\t\t\tdesc: 'Permanently remove a task or subtask'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Subtask Management',\n\t\t\tcolor: 'yellow',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'add-subtask',\n\t\t\t\t\targs: '--parent=<id> --title=\"<title>\" [--description=\"<desc>\"]',\n\t\t\t\t\tdesc: 'Add a new subtask to a parent task'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'add-subtask',\n\t\t\t\t\targs: '--parent=<id> --task-id=<id>',\n\t\t\t\t\tdesc: 'Convert an existing task into a subtask'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'remove-subtask',\n\t\t\t\t\targs: '--id=<parentId.subtaskId> [--convert]',\n\t\t\t\t\tdesc: 'Remove a subtask (optionally convert to standalone task)'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'clear-subtasks',\n\t\t\t\t\targs: '--id=<id>',\n\t\t\t\t\tdesc: 'Remove all subtasks from specified tasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'clear-subtasks --all',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Remove subtasks from all tasks'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Analysis & Breakdown',\n\t\t\tcolor: 'magenta',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'analyze-complexity',\n\t\t\t\t\targs: '[--research] [--threshold=5]',\n\t\t\t\t\tdesc: 'Analyze tasks and generate expansion recommendations'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'complexity-report',\n\t\t\t\t\targs: '[--file=<path>]',\n\t\t\t\t\tdesc: 'Display the complexity analysis report'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'expand',\n\t\t\t\t\targs: '--id=<id> [--num=5] [--research] [--prompt=\"<context>\"]',\n\t\t\t\t\tdesc: 'Break down tasks into detailed subtasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'expand --all',\n\t\t\t\t\targs: '[--force] [--research]',\n\t\t\t\t\tdesc: 'Expand all pending tasks with subtasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'research',\n\t\t\t\t\targs: '\"<prompt>\" [-i=<task_ids>] [-f=<file_paths>] [-c=\"<context>\"] [--tree] [-s=<save_file>] [-d=<detail_level>]',\n\t\t\t\t\tdesc: 'Perform AI-powered research queries with project context'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Navigation & Viewing',\n\t\t\tcolor: 'cyan',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'next',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Show the next task to work on based on dependencies'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'show',\n\t\t\t\t\targs: '<id>',\n\t\t\t\t\tdesc: 'Display detailed information about a specific task'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Tag Management',\n\t\t\tcolor: 'magenta',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'tags',\n\t\t\t\t\targs: '[--show-metadata]',\n\t\t\t\t\tdesc: 'List all available tags with task counts'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'add-tag',\n\t\t\t\t\targs: '<tagName> [--copy-from-current] [--copy-from=<tag>] [-d=\"<desc>\"]',\n\t\t\t\t\tdesc: 'Create a new tag context for organizing tasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'use-tag',\n\t\t\t\t\targs: '<tagName>',\n\t\t\t\t\tdesc: 'Switch to a different tag context'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'delete-tag',\n\t\t\t\t\targs: '<tagName> [--yes]',\n\t\t\t\t\tdesc: 'Delete an existing tag and all its tasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'rename-tag',\n\t\t\t\t\targs: '<oldName> <newName>',\n\t\t\t\t\tdesc: 'Rename an existing tag'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'copy-tag',\n\t\t\t\t\targs: '<sourceName> <targetName> [-d=\"<desc>\"]',\n\t\t\t\t\tdesc: 'Copy an existing tag to create a new tag with the same tasks'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Dependency Management',\n\t\t\tcolor: 'blue',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'add-dependency',\n\t\t\t\t\targs: '--id=<id> --depends-on=<id>',\n\t\t\t\t\tdesc: 'Add a dependency to a task'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'remove-dependency',\n\t\t\t\t\targs: '--id=<id> --depends-on=<id>',\n\t\t\t\t\tdesc: 'Remove a dependency from a task'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'validate-dependencies',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Identify invalid dependencies without fixing them'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'fix-dependencies',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Fix invalid dependencies automatically'\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t];\n\n\t// Display each category\n\tcommandCategories.forEach((category) => {\n\t\tconsole.log(\n\t\t\tboxen(chalk[category.color].bold(category.title), {\n\t\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: category.color,\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\t// Calculate dynamic column widths - adjust ratios as needed\n\t\tconst nameWidth = Math.max(25, Math.floor(terminalWidth * 0.2)); // 20% of width but min 25\n\t\tconst argsWidth = Math.max(40, Math.floor(terminalWidth * 0.35)); // 35% of width but min 40\n\t\tconst descWidth = Math.max(45, Math.floor(terminalWidth * 0.45) - 10); // 45% of width but min 45, minus some buffer\n\n\t\tconst commandTable = new Table({\n\t\t\tcolWidths: [nameWidth, argsWidth, descWidth],\n\t\t\tchars: {\n\t\t\t\ttop: '',\n\t\t\t\t'top-mid': '',\n\t\t\t\t'top-left': '',\n\t\t\t\t'top-right': '',\n\t\t\t\tbottom: '',\n\t\t\t\t'bottom-mid': '',\n\t\t\t\t'bottom-left': '',\n\t\t\t\t'bottom-right': '',\n\t\t\t\tleft: '',\n\t\t\t\t'left-mid': '',\n\t\t\t\tmid: '',\n\t\t\t\t'mid-mid': '',\n\t\t\t\tright: '',\n\t\t\t\t'right-mid': '',\n\t\t\t\tmiddle: ' '\n\t\t\t},\n\t\t\tstyle: { border: [], 'padding-left': 4 },\n\t\t\twordWrap: true\n\t\t});\n\n\t\tcategory.commands.forEach((cmd, index) => {\n\t\t\tcommandTable.push([\n\t\t\t\t`${chalk.yellow.bold(cmd.name)}${chalk.reset('')}`,\n\t\t\t\t`${chalk.white(cmd.args)}${chalk.reset('')}`,\n\t\t\t\t`${chalk.dim(cmd.desc)}${chalk.reset('')}`\n\t\t\t]);\n\t\t});\n\n\t\tconsole.log(commandTable.toString());\n\t\tconsole.log('');\n\t});\n\n\t// Display configuration section\n\tconsole.log(\n\t\tboxen(chalk.cyan.bold('Configuration'), {\n\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\tborderColor: 'cyan',\n\t\t\tborderStyle: 'round'\n\t\t})\n\t);\n\n\t// Get terminal width if not already defined\n\tconst configTerminalWidth = terminalWidth || process.stdout.columns || 100;\n\n\t// Calculate dynamic column widths for config table\n\tconst configKeyWidth = Math.max(30, Math.floor(configTerminalWidth * 0.25));\n\tconst configDescWidth = Math.max(50, Math.floor(configTerminalWidth * 0.45));\n\tconst configValueWidth = Math.max(\n\t\t30,\n\t\tMath.floor(configTerminalWidth * 0.3) - 10\n\t);\n\n\tconst configTable = new Table({\n\t\tcolWidths: [configKeyWidth, configDescWidth, configValueWidth],\n\t\tchars: {\n\t\t\ttop: '',\n\t\t\t'top-mid': '',\n\t\t\t'top-left': '',\n\t\t\t'top-right': '',\n\t\t\tbottom: '',\n\t\t\t'bottom-mid': '',\n\t\t\t'bottom-left': '',\n\t\t\t'bottom-right': '',\n\t\t\tleft: '',\n\t\t\t'left-mid': '',\n\t\t\tmid: '',\n\t\t\t'mid-mid': '',\n\t\t\tright: '',\n\t\t\t'right-mid': '',\n\t\t\tmiddle: ' '\n\t\t},\n\t\tstyle: { border: [], 'padding-left': 4 },\n\t\twordWrap: true\n\t});\n\n\tconfigTable.push(\n\t\t[\n\t\t\t`${chalk.yellow(TASKMASTER_CONFIG_FILE)}${chalk.reset('')}`,\n\t\t\t`${chalk.white('AI model configuration file (project root)')}${chalk.reset('')}`,\n\t\t\t`${chalk.dim('Managed by models cmd')}${chalk.reset('')}`\n\t\t],\n\t\t[\n\t\t\t`${chalk.yellow('API Keys (.env)')}${chalk.reset('')}`,\n\t\t\t`${chalk.white('API keys for AI providers (ANTHROPIC_API_KEY, etc.)')}${chalk.reset('')}`,\n\t\t\t`${chalk.dim('Required in .env file')}${chalk.reset('')}`\n\t\t],\n\t\t[\n\t\t\t`${chalk.yellow('MCP Keys (mcp.json)')}${chalk.reset('')}`,\n\t\t\t`${chalk.white('API keys for Cursor integration')}${chalk.reset('')}`,\n\t\t\t`${chalk.dim('Required in .cursor/')}${chalk.reset('')}`\n\t\t]\n\t);\n\n\tconsole.log(configTable.toString());\n\tconsole.log('');\n\n\t// Show helpful hints\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Quick Start:') +\n\t\t\t\t'\\n\\n' +\n\t\t\t\tchalk.cyan('1. Create Project: ') +\n\t\t\t\tchalk.white('task-master init') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('2. Setup Models: ') +\n\t\t\t\tchalk.white('task-master models --setup') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('3. Parse PRD: ') +\n\t\t\t\tchalk.white('task-master parse-prd --input=<prd-file>') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('4. List Tasks: ') +\n\t\t\t\tchalk.white('task-master list') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('5. Find Next Task: ') +\n\t\t\t\tchalk.white('task-master next'),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 },\n\t\t\t\twidth: Math.min(configTerminalWidth - 10, 100) // Limit width to terminal width minus padding, max 100\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Get colored complexity score\n * @param {number} score - Complexity score (1-10)\n * @returns {string} Colored complexity score\n */\nfunction getComplexityWithColor(score) {\n\tif (score <= 3) return chalk.green(`● ${score}`);\n\tif (score <= 6) return chalk.yellow(`● ${score}`);\n\treturn chalk.red(`● ${score}`);\n}\n\n/**\n * Truncate a string to a maximum length and add ellipsis if needed\n * @param {string} str - The string to truncate\n * @param {number} maxLength - Maximum length\n * @returns {string} Truncated string\n */\nfunction truncateString(str, maxLength) {\n\tif (!str) return '';\n\tif (str.length <= maxLength) return str;\n\treturn str.substring(0, maxLength - 3) + '...';\n}\n\n/**\n * Display the next task to work on\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} complexityReportPath - Path to the complexity report file\n * @param {string} tag - Optional tag to override current tag resolution\n */\nasync function displayNextTask(\n\ttasksPath,\n\tcomplexityReportPath = null,\n\tcontext = {}\n) {\n\t// Extract parameters from context\n\tconst { projectRoot, tag } = context;\n\n\t// Read the tasks file with proper projectRoot for tag resolution\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Read complexity report once\n\tconst complexityReport = readComplexityReport(complexityReportPath);\n\n\t// Find the next task\n\tconst nextTask = findNextTask(data.tasks, complexityReport);\n\n\tif (!nextTask) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow('No eligible tasks found!\\n\\n') +\n\t\t\t\t\t'All pending tasks have unsatisfied dependencies, or all tasks are completed.',\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t\treturn;\n\t}\n\n\t// Display the task in a nice format\n\tconsole.log(\n\t\tboxen(chalk.white.bold(`Next Task: #${nextTask.id} - ${nextTask.title}`), {\n\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t})\n\t);\n\n\t// Create a table with task details\n\tconst taskTable = new Table({\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\tcolWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],\n\t\twordWrap: true\n\t});\n\n\t// Priority with color\n\tconst priorityColors = {\n\t\thigh: chalk.red.bold,\n\t\tmedium: chalk.yellow,\n\t\tlow: chalk.gray\n\t};\n\tconst priorityColor =\n\t\tpriorityColors[nextTask.priority || 'medium'] || chalk.white;\n\n\t// Add task details to table\n\ttaskTable.push(\n\t\t[chalk.cyan.bold('ID:'), nextTask.id.toString()],\n\t\t[chalk.cyan.bold('Title:'), nextTask.title],\n\t\t[\n\t\t\tchalk.cyan.bold('Priority:'),\n\t\t\tpriorityColor(nextTask.priority || 'medium')\n\t\t],\n\t\t[\n\t\t\tchalk.cyan.bold('Dependencies:'),\n\t\t\tformatDependenciesWithStatus(\n\t\t\t\tnextTask.dependencies,\n\t\t\t\tdata.tasks,\n\t\t\t\ttrue,\n\t\t\t\tcomplexityReport\n\t\t\t)\n\t\t],\n\t\t[\n\t\t\tchalk.cyan.bold('Complexity:'),\n\t\t\tnextTask.complexityScore\n\t\t\t\t? getComplexityWithColor(nextTask.complexityScore)\n\t\t\t\t: chalk.gray('N/A')\n\t\t],\n\t\t[chalk.cyan.bold('Description:'), nextTask.description]\n\t);\n\n\tconsole.log(taskTable.toString());\n\n\t// If task has details, show them in a separate box\n\tif (nextTask.details && nextTask.details.trim().length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Implementation Details:') + '\\n\\n' + nextTask.details,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Determine if the nextTask is a subtask\n\tconst isSubtask = !!nextTask.parentId;\n\n\t// Show subtasks if they exist (only for parent tasks)\n\tif (!isSubtask && nextTask.subtasks && nextTask.subtasks.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Subtasks'), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\t// Calculate available width for the subtask table\n\t\tconst availableWidth = process.stdout.columns - 10 || 100; // Default to 100 if can't detect\n\n\t\t// Define percentage-based column widths\n\t\tconst idWidthPct = 8;\n\t\tconst statusWidthPct = 15;\n\t\tconst depsWidthPct = 25;\n\t\tconst titleWidthPct = 100 - idWidthPct - statusWidthPct - depsWidthPct;\n\n\t\t// Calculate actual column widths\n\t\tconst idWidth = Math.floor(availableWidth * (idWidthPct / 100));\n\t\tconst statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));\n\t\tconst depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));\n\t\tconst titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));\n\n\t\t// Create a table for subtasks with improved handling\n\t\tconst subtaskTable = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.magenta.bold('ID'),\n\t\t\t\tchalk.magenta.bold('Status'),\n\t\t\t\tchalk.magenta.bold('Title'),\n\t\t\t\tchalk.magenta.bold('Deps')\n\t\t\t],\n\t\t\tcolWidths: [idWidth, statusWidth, titleWidth, depsWidth],\n\t\t\tstyle: {\n\t\t\t\thead: [],\n\t\t\t\tborder: [],\n\t\t\t\t'padding-top': 0,\n\t\t\t\t'padding-bottom': 0,\n\t\t\t\tcompact: true\n\t\t\t},\n\t\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\t\twordWrap: true\n\t\t});\n\n\t\t// Add subtasks to table\n\t\tnextTask.subtasks.forEach((st) => {\n\t\t\tconst statusColor =\n\t\t\t\t{\n\t\t\t\t\tdone: chalk.green,\n\t\t\t\t\tcompleted: chalk.green,\n\t\t\t\t\tpending: chalk.yellow,\n\t\t\t\t\t'in-progress': chalk.blue\n\t\t\t\t}[st.status || 'pending'] || chalk.white;\n\n\t\t\t// Format subtask dependencies\n\t\t\tlet subtaskDeps = 'None';\n\t\t\tif (st.dependencies && st.dependencies.length > 0) {\n\t\t\t\t// Format dependencies with correct notation\n\t\t\t\tconst formattedDeps = st.dependencies.map((depId) => {\n\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\tconst foundSubtask = nextTask.subtasks.find(\n\t\t\t\t\t\t\t(st) => st.id === depId\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (foundSubtask) {\n\t\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\t\tfoundSubtask.status === 'done' ||\n\t\t\t\t\t\t\t\tfoundSubtask.status === 'completed';\n\t\t\t\t\t\t\tconst isInProgress = foundSubtask.status === 'in-progress';\n\n\t\t\t\t\t\t\t// Use consistent color formatting instead of emojis\n\t\t\t\t\t\t\tif (isDone) {\n\t\t\t\t\t\t\t\treturn chalk.green.bold(`${nextTask.id}.${depId}`);\n\t\t\t\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\t\t\t\treturn chalk.hex('#FFA500').bold(`${nextTask.id}.${depId}`);\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\treturn chalk.red.bold(`${nextTask.id}.${depId}`);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn chalk.red(`${nextTask.id}.${depId} (Not found)`);\n\t\t\t\t\t}\n\t\t\t\t\treturn depId;\n\t\t\t\t});\n\n\t\t\t\t// Join the formatted dependencies directly instead of passing to formatDependenciesWithStatus again\n\t\t\t\tsubtaskDeps =\n\t\t\t\t\tformattedDeps.length === 1\n\t\t\t\t\t\t? formattedDeps[0]\n\t\t\t\t\t\t: formattedDeps.join(chalk.white(', '));\n\t\t\t}\n\n\t\t\tsubtaskTable.push([\n\t\t\t\t`${nextTask.id}.${st.id}`,\n\t\t\t\tstatusColor(st.status || 'pending'),\n\t\t\t\tst.title,\n\t\t\t\tsubtaskDeps\n\t\t\t]);\n\t\t});\n\n\t\tconsole.log(subtaskTable.toString());\n\t}\n\n\t// Suggest expanding if no subtasks (only for parent tasks without subtasks)\n\tif (!isSubtask && (!nextTask.subtasks || nextTask.subtasks.length === 0)) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow('No subtasks found. Consider breaking down this task:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t`Run: ${chalk.cyan(`task-master expand --id=${nextTask.id}`)}`\n\t\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Show action suggestions\n\tlet suggestedActionsContent = chalk.white.bold('Suggested Actions:') + '\\n';\n\tif (isSubtask) {\n\t\t// Suggested actions for a subtask\n\t\tsuggestedActionsContent +=\n\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=in-progress`)}\\n` +\n\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=done`)}\\n` +\n\t\t\t`${chalk.cyan('3.')} View parent task: ${chalk.yellow(`task-master show --id=${nextTask.parentId}`)}`;\n\t} else {\n\t\t// Suggested actions for a parent task\n\t\tsuggestedActionsContent +=\n\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=in-progress`)}\\n` +\n\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=done`)}\\n` +\n\t\t\t(nextTask.subtasks && nextTask.subtasks.length > 0\n\t\t\t\t? `${chalk.cyan('3.')} Update subtask status: ${chalk.yellow(`task-master set-status --id=${nextTask.id}.1 --status=done`)}` // Example: first subtask\n\t\t\t\t: `${chalk.cyan('3.')} Break down into subtasks: ${chalk.yellow(`task-master expand --id=${nextTask.id}`)}`);\n\t}\n\n\tconsole.log(\n\t\tboxen(suggestedActionsContent, {\n\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\tborderColor: 'green',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1 }\n\t\t})\n\t);\n\n\t// Show FYI notice if migration occurred\n\tdisplayTaggedTasksFYI(data);\n}\n\n/**\n * Display a specific task by ID\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string|number} taskId - The ID of the task to display\n * @param {string} complexityReportPath - Path to the complexity report file\n * @param {string} [statusFilter] - Optional status to filter subtasks by\n * @param {object} context - Context object containing projectRoot and tag\n * @param {string} context.projectRoot - Project root path\n * @param {string} context.tag - Tag for the task\n */\nasync function displayTaskById(\n\ttasksPath,\n\ttaskId,\n\tcomplexityReportPath = null,\n\tstatusFilter = null,\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\n\t// Read the tasks file with proper projectRoot for tag resolution\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Read complexity report once\n\tconst complexityReport = readComplexityReport(complexityReportPath);\n\n\t// Find the task by ID, applying the status filter if provided\n\t// Returns { task, originalSubtaskCount, originalSubtasks }\n\tconst { task, originalSubtaskCount, originalSubtasks } = findTaskById(\n\t\tdata.tasks,\n\t\ttaskId,\n\t\tcomplexityReport,\n\t\tstatusFilter\n\t);\n\n\tif (!task) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.yellow(`Task with ID ${taskId} not found!`), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t})\n\t\t);\n\t\treturn;\n\t}\n\n\t// Handle subtask display specially (This logic remains the same)\n\tif (task.isSubtask || task.parentTask) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold(\n\t\t\t\t\t`Subtask: #${task.parentTask.id}.${task.id} - ${task.title}`\n\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'magenta',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\n\t\tconst subtaskTable = new Table({\n\t\t\tstyle: {\n\t\t\t\thead: [],\n\t\t\t\tborder: [],\n\t\t\t\t'padding-top': 0,\n\t\t\t\t'padding-bottom': 0,\n\t\t\t\tcompact: true\n\t\t\t},\n\t\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\t\tcolWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],\n\t\t\twordWrap: true\n\t\t});\n\t\tsubtaskTable.push(\n\t\t\t[chalk.cyan.bold('ID:'), `${task.parentTask.id}.${task.id}`],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Parent Task:'),\n\t\t\t\t`#${task.parentTask.id} - ${task.parentTask.title}`\n\t\t\t],\n\t\t\t[chalk.cyan.bold('Title:'), task.title],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Status:'),\n\t\t\t\tgetStatusWithColor(task.status || 'pending', true)\n\t\t\t],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Complexity:'),\n\t\t\t\ttask.complexityScore\n\t\t\t\t\t? getComplexityWithColor(task.complexityScore)\n\t\t\t\t\t: chalk.gray('N/A')\n\t\t\t],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Description:'),\n\t\t\t\ttask.description || 'No description provided.'\n\t\t\t]\n\t\t);\n\t\tconsole.log(subtaskTable.toString());\n\n\t\tif (task.details && task.details.trim().length > 0) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Implementation Details:') + '\\n\\n' + task.details,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${task.parentTask.id}.${task.id} --status=in-progress`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${task.parentTask.id}.${task.id} --status=done`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('3.')} View parent task: ${chalk.yellow(`task-master show --id=${task.parentTask.id}`)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t\treturn; // Exit after displaying subtask details\n\t}\n\n\t// --- Display Regular Task Details ---\n\tconsole.log(\n\t\tboxen(chalk.white.bold(`Task: #${task.id} - ${task.title}`), {\n\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t})\n\t);\n\n\tconst taskTable = new Table({\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\tcolWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],\n\t\twordWrap: true\n\t});\n\tconst priorityColors = {\n\t\thigh: chalk.red.bold,\n\t\tmedium: chalk.yellow,\n\t\tlow: chalk.gray\n\t};\n\tconst priorityColor =\n\t\tpriorityColors[task.priority || 'medium'] || chalk.white;\n\ttaskTable.push(\n\t\t[chalk.cyan.bold('ID:'), task.id.toString()],\n\t\t[chalk.cyan.bold('Title:'), task.title],\n\t\t[\n\t\t\tchalk.cyan.bold('Status:'),\n\t\t\tgetStatusWithColor(task.status || 'pending', true)\n\t\t],\n\t\t[chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')],\n\t\t[\n\t\t\tchalk.cyan.bold('Dependencies:'),\n\t\t\tformatDependenciesWithStatus(\n\t\t\t\ttask.dependencies,\n\t\t\t\tdata.tasks,\n\t\t\t\ttrue,\n\t\t\t\tcomplexityReport\n\t\t\t)\n\t\t],\n\t\t[\n\t\t\tchalk.cyan.bold('Complexity:'),\n\t\t\ttask.complexityScore\n\t\t\t\t? getComplexityWithColor(task.complexityScore)\n\t\t\t\t: chalk.gray('N/A')\n\t\t],\n\t\t[chalk.cyan.bold('Description:'), task.description]\n\t);\n\tconsole.log(taskTable.toString());\n\n\tif (task.details && task.details.trim().length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Implementation Details:') + '\\n\\n' + task.details,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\tif (task.testStrategy && task.testStrategy.trim().length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Test Strategy:') + '\\n\\n' + task.testStrategy, {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t})\n\t\t);\n\t}\n\n\t// --- Subtask Table Display (uses filtered list: task.subtasks) ---\n\tif (task.subtasks && task.subtasks.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Subtasks'), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\tconst availableWidth = process.stdout.columns - 10 || 100;\n\t\tconst idWidthPct = 10;\n\t\tconst statusWidthPct = 15;\n\t\tconst depsWidthPct = 25;\n\t\tconst titleWidthPct = 100 - idWidthPct - statusWidthPct - depsWidthPct;\n\t\tconst idWidth = Math.floor(availableWidth * (idWidthPct / 100));\n\t\tconst statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));\n\t\tconst depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));\n\t\tconst titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));\n\n\t\tconst subtaskTable = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.magenta.bold('ID'),\n\t\t\t\tchalk.magenta.bold('Status'),\n\t\t\t\tchalk.magenta.bold('Title'),\n\t\t\t\tchalk.magenta.bold('Deps')\n\t\t\t],\n\t\t\tcolWidths: [idWidth, statusWidth, titleWidth, depsWidth],\n\t\t\tstyle: {\n\t\t\t\thead: [],\n\t\t\t\tborder: [],\n\t\t\t\t'padding-top': 0,\n\t\t\t\t'padding-bottom': 0,\n\t\t\t\tcompact: true\n\t\t\t},\n\t\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\t\twordWrap: true\n\t\t});\n\n\t\t// Populate table with the potentially filtered subtasks\n\t\ttask.subtasks.forEach((st) => {\n\t\t\tconst statusColorMap = {\n\t\t\t\tdone: chalk.green,\n\t\t\t\tcompleted: chalk.green,\n\t\t\t\tpending: chalk.yellow,\n\t\t\t\t'in-progress': chalk.blue\n\t\t\t};\n\t\t\tconst statusColor = statusColorMap[st.status || 'pending'] || chalk.white;\n\t\t\tlet subtaskDeps = 'None';\n\t\t\tif (st.dependencies && st.dependencies.length > 0) {\n\t\t\t\tconst formattedDeps = st.dependencies.map((depId) => {\n\t\t\t\t\t// Use the original, unfiltered list for dependency status lookup\n\t\t\t\t\tconst sourceListForDeps = originalSubtasks || task.subtasks;\n\t\t\t\t\tconst foundDepSubtask =\n\t\t\t\t\t\ttypeof depId === 'number' && depId < 100\n\t\t\t\t\t\t\t? sourceListForDeps.find((sub) => sub.id === depId)\n\t\t\t\t\t\t\t: null;\n\n\t\t\t\t\tif (foundDepSubtask) {\n\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\tfoundDepSubtask.status === 'done' ||\n\t\t\t\t\t\t\tfoundDepSubtask.status === 'completed';\n\t\t\t\t\t\tconst isInProgress = foundDepSubtask.status === 'in-progress';\n\t\t\t\t\t\tconst color = isDone\n\t\t\t\t\t\t\t? chalk.green.bold\n\t\t\t\t\t\t\t: isInProgress\n\t\t\t\t\t\t\t\t? chalk.hex('#FFA500').bold\n\t\t\t\t\t\t\t\t: chalk.red.bold;\n\t\t\t\t\t\treturn color(`${task.id}.${depId}`);\n\t\t\t\t\t} else if (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\treturn chalk.red(`${task.id}.${depId} (Not found)`);\n\t\t\t\t\t}\n\t\t\t\t\treturn depId; // Assume it's a top-level task ID if not a number < 100\n\t\t\t\t});\n\t\t\t\tsubtaskDeps =\n\t\t\t\t\tformattedDeps.length === 1\n\t\t\t\t\t\t? formattedDeps[0]\n\t\t\t\t\t\t: formattedDeps.join(chalk.white(', '));\n\t\t\t}\n\t\t\tsubtaskTable.push([\n\t\t\t\t`${task.id}.${st.id}`,\n\t\t\t\tstatusColor(st.status || 'pending'),\n\t\t\t\tst.title,\n\t\t\t\tsubtaskDeps\n\t\t\t]);\n\t\t});\n\t\tconsole.log(subtaskTable.toString());\n\n\t\t// Display filter summary line *immediately after the table* if a filter was applied\n\t\tif (statusFilter && originalSubtaskCount !== null) {\n\t\t\tconsole.log(\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t` Filtered by status: ${chalk.bold(statusFilter)}. Showing ${chalk.bold(task.subtasks.length)} of ${chalk.bold(originalSubtaskCount)} subtasks.`\n\t\t\t\t)\n\t\t\t);\n\t\t\t// Add a newline for spacing before the progress bar if the filter line was shown\n\t\t\tconsole.log();\n\t\t}\n\t\t// --- Conditional Messages for No Subtasks Shown ---\n\t} else if (statusFilter && originalSubtaskCount === 0) {\n\t\t// Case where filter applied, but the parent task had 0 subtasks originally\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t`No subtasks found matching status: ${statusFilter} (Task has no subtasks)`\n\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t} else if (\n\t\tstatusFilter &&\n\t\toriginalSubtaskCount > 0 &&\n\t\ttask.subtasks.length === 0\n\t) {\n\t\t// Case where filter applied, original subtasks existed, but none matched\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t`No subtasks found matching status: ${statusFilter} (out of ${originalSubtaskCount} total)`\n\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t} else if (\n\t\t!statusFilter &&\n\t\t(!originalSubtasks || originalSubtasks.length === 0)\n\t) {\n\t\t// Case where NO filter applied AND the task genuinely has no subtasks\n\t\t// Use the authoritative originalSubtasks if it exists (from filtering), else check task.subtasks\n\t\tconst actualSubtasks = originalSubtasks || task.subtasks;\n\t\tif (!actualSubtasks || actualSubtasks.length === 0) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.yellow('No subtasks found. Consider breaking down this task:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t`Run: ${chalk.cyan(`task-master expand --id=${task.id}`)}`\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\t}\n\n\t// --- Subtask Progress Bar Display (uses originalSubtasks or task.subtasks) ---\n\t// Determine the list to use for progress calculation (always the original if available and filtering happened)\n\tconst subtasksForProgress = originalSubtasks || task.subtasks; // Use original if filtering occurred, else the potentially empty task.subtasks\n\n\t// Only show progress if there are actually subtasks\n\tif (subtasksForProgress && subtasksForProgress.length > 0) {\n\t\tconst totalSubtasks = subtasksForProgress.length;\n\t\tconst completedSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t).length;\n\n\t\t// Count other statuses from the original/complete list\n\t\tconst inProgressSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'in-progress'\n\t\t).length;\n\t\tconst pendingSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'pending'\n\t\t).length;\n\t\tconst blockedSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'blocked'\n\t\t).length;\n\t\tconst deferredSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'deferred'\n\t\t).length;\n\t\tconst cancelledSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'cancelled'\n\t\t).length;\n\n\t\tconst statusBreakdown = {\n\t\t\t// Calculate breakdown based on the complete list\n\t\t\t'in-progress': (inProgressSubtasks / totalSubtasks) * 100,\n\t\t\tpending: (pendingSubtasks / totalSubtasks) * 100,\n\t\t\tblocked: (blockedSubtasks / totalSubtasks) * 100,\n\t\t\tdeferred: (deferredSubtasks / totalSubtasks) * 100,\n\t\t\tcancelled: (cancelledSubtasks / totalSubtasks) * 100\n\t\t};\n\t\tconst completionPercentage = (completedSubtasks / totalSubtasks) * 100;\n\n\t\tconst availableWidth = process.stdout.columns || 80;\n\t\tconst boxPadding = 2;\n\t\tconst boxBorders = 2;\n\t\tconst percentTextLength = 5;\n\t\tconst progressBarLength = Math.max(\n\t\t\t20,\n\t\t\tMath.min(\n\t\t\t\t60,\n\t\t\t\tavailableWidth - boxPadding - boxBorders - percentTextLength - 35\n\t\t\t)\n\t\t);\n\n\t\tconst statusCounts =\n\t\t\t`${chalk.green('✓ Done:')} ${completedSubtasks} ${chalk.hex('#FFA500')('► In Progress:')} ${inProgressSubtasks} ${chalk.yellow('○ Pending:')} ${pendingSubtasks}\\n` +\n\t\t\t`${chalk.red('! Blocked:')} ${blockedSubtasks} ${chalk.gray('⏱ Deferred:')} ${deferredSubtasks} ${chalk.gray('✗ Cancelled:')} ${cancelledSubtasks}`;\n\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Subtask Progress:') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t`${chalk.cyan('Completed:')} ${completedSubtasks}/${totalSubtasks} (${completionPercentage.toFixed(1)}%)\\n` +\n\t\t\t\t\t`${statusCounts}\\n` +\n\t\t\t\t\t`${chalk.cyan('Progress:')} ${createProgressBar(completionPercentage, progressBarLength, statusBreakdown)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\twidth: Math.min(availableWidth - 10, 100),\n\t\t\t\t\ttextAlignment: 'left'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// --- Suggested Actions ---\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t'\\n' +\n\t\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${task.id} --status=in-progress`)}\\n` +\n\t\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${task.id} --status=done`)}\\n` +\n\t\t\t\t// Determine action 3 based on whether subtasks *exist* (use the source list for progress)\n\t\t\t\t(subtasksForProgress && subtasksForProgress.length > 0\n\t\t\t\t\t? `${chalk.cyan('3.')} Update subtask status: ${chalk.yellow(`task-master set-status --id=${task.id}.1 --status=done`)}` // Example uses .1\n\t\t\t\t\t: `${chalk.cyan('3.')} Break down into subtasks: ${chalk.yellow(`task-master expand --id=${task.id}`)}`),\n\t\t\t{\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'green',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Show FYI notice if migration occurred\n\tdisplayTaggedTasksFYI(data);\n}\n\n/**\n * Display the complexity analysis report in a nice format\n * @param {string} reportPath - Path to the complexity report file\n */\nasync function displayComplexityReport(reportPath) {\n\t// Check if the report exists\n\tif (!fs.existsSync(reportPath)) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow(`No complexity report found at ${reportPath}\\n\\n`) +\n\t\t\t\t\t'Would you like to generate one now?',\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\n\t\tconst rl = readline.createInterface({\n\t\t\tinput: process.stdin,\n\t\t\toutput: process.stdout\n\t\t});\n\n\t\tconst answer = await new Promise((resolve) => {\n\t\t\trl.question(chalk.cyan('Generate complexity report? (y/n): '), resolve);\n\t\t});\n\t\trl.close();\n\n\t\tif (answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes') {\n\t\t\t// Call the analyze-complexity command\n\t\t\tconsole.log(chalk.blue('Generating complexity report...'));\n\t\t\tconst tasksPath = TASKMASTER_TASKS_FILE;\n\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\t'❌ No tasks.json file found. Please run \"task-master init\" or create a tasks.json file.'\n\t\t\t\t);\n\t\t\t\treturn null;\n\t\t\t}\n\n\t\t\tawait analyzeTaskComplexity({\n\t\t\t\toutput: reportPath,\n\t\t\t\tresearch: false, // Default to no research for speed\n\t\t\t\tfile: tasksPath\n\t\t\t});\n\t\t\t// Read the newly generated report\n\t\t\treturn displayComplexityReport(reportPath);\n\t\t} else {\n\t\t\tconsole.log(chalk.yellow('Report generation cancelled.'));\n\t\t\treturn;\n\t\t}\n\t}\n\n\t// Read the report\n\tlet report;\n\ttry {\n\t\treport = JSON.parse(fs.readFileSync(reportPath, 'utf8'));\n\t} catch (error) {\n\t\tlog('error', `Error reading complexity report: ${error.message}`);\n\t\treturn;\n\t}\n\n\t// Display report header\n\tconsole.log(\n\t\tboxen(chalk.white.bold('Task Complexity Analysis Report'), {\n\t\t\tpadding: 1,\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t})\n\t);\n\n\t// Display metadata\n\tconst metaTable = new Table({\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: {\n\t\t\tmid: '',\n\t\t\t'left-mid': '',\n\t\t\t'mid-mid': '',\n\t\t\t'right-mid': ''\n\t\t},\n\t\tcolWidths: [20, 50]\n\t});\n\n\tmetaTable.push(\n\t\t[\n\t\t\tchalk.cyan.bold('Generated:'),\n\t\t\tnew Date(report.meta.generatedAt).toLocaleString()\n\t\t],\n\t\t[chalk.cyan.bold('Tasks Analyzed:'), report.meta.tasksAnalyzed],\n\t\t[chalk.cyan.bold('Threshold Score:'), report.meta.thresholdScore],\n\t\t[chalk.cyan.bold('Project:'), report.meta.projectName],\n\t\t[\n\t\t\tchalk.cyan.bold('Research-backed:'),\n\t\t\treport.meta.usedResearch ? 'Yes' : 'No'\n\t\t]\n\t);\n\n\tconsole.log(metaTable.toString());\n\n\t// Sort tasks by complexity score (highest first)\n\tconst sortedTasks = [...report.complexityAnalysis].sort(\n\t\t(a, b) => b.complexityScore - a.complexityScore\n\t);\n\n\t// Determine which tasks need expansion based on threshold\n\tconst tasksNeedingExpansion = sortedTasks.filter(\n\t\t(task) => task.complexityScore >= report.meta.thresholdScore\n\t);\n\tconst simpleTasks = sortedTasks.filter(\n\t\t(task) => task.complexityScore < report.meta.thresholdScore\n\t);\n\n\t// Create progress bar to show complexity distribution\n\tconst complexityDistribution = [0, 0, 0]; // Low (0-4), Medium (5-7), High (8-10)\n\tsortedTasks.forEach((task) => {\n\t\tif (task.complexityScore < 5) complexityDistribution[0]++;\n\t\telse if (task.complexityScore < 8) complexityDistribution[1]++;\n\t\telse complexityDistribution[2]++;\n\t});\n\n\tconst percentLow = Math.round(\n\t\t(complexityDistribution[0] / sortedTasks.length) * 100\n\t);\n\tconst percentMedium = Math.round(\n\t\t(complexityDistribution[1] / sortedTasks.length) * 100\n\t);\n\tconst percentHigh = Math.round(\n\t\t(complexityDistribution[2] / sortedTasks.length) * 100\n\t);\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Complexity Distribution\\n\\n') +\n\t\t\t\t`${chalk.green.bold('Low (1-4):')} ${complexityDistribution[0]} tasks (${percentLow}%)\\n` +\n\t\t\t\t`${chalk.yellow.bold('Medium (5-7):')} ${complexityDistribution[1]} tasks (${percentMedium}%)\\n` +\n\t\t\t\t`${chalk.red.bold('High (8-10):')} ${complexityDistribution[2]} tasks (${percentHigh}%)`,\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Get terminal width\n\tconst terminalWidth = process.stdout.columns || 100; // Default to 100 if can't detect\n\n\t// Calculate dynamic column widths\n\tconst idWidth = 12;\n\tconst titleWidth = Math.floor(terminalWidth * 0.25); // 25% of width\n\tconst scoreWidth = 8;\n\tconst subtasksWidth = 8;\n\t// Command column gets the remaining space (minus some buffer for borders)\n\tconst commandWidth =\n\t\tterminalWidth - idWidth - titleWidth - scoreWidth - subtasksWidth - 10;\n\n\t// Create table with new column widths and word wrapping\n\tconst complexTable = new Table({\n\t\thead: [\n\t\t\tchalk.yellow.bold('ID'),\n\t\t\tchalk.yellow.bold('Title'),\n\t\t\tchalk.yellow.bold('Score'),\n\t\t\tchalk.yellow.bold('Subtasks'),\n\t\t\tchalk.yellow.bold('Expansion Command')\n\t\t],\n\t\tcolWidths: [idWidth, titleWidth, scoreWidth, subtasksWidth, commandWidth],\n\t\tstyle: { head: [], border: [] },\n\t\twordWrap: true,\n\t\twrapOnWordBoundary: true\n\t});\n\n\t// When adding rows, don't truncate the expansion command\n\ttasksNeedingExpansion.forEach((task) => {\n\t\tconst expansionCommand = `task-master expand --id=${task.taskId} --num=${task.recommendedSubtasks}${task.expansionPrompt ? ` --prompt=\"${task.expansionPrompt}\"` : ''}`;\n\n\t\tcomplexTable.push([\n\t\t\ttask.taskId,\n\t\t\ttruncate(task.taskTitle, titleWidth - 3), // Still truncate title for readability\n\t\t\tgetComplexityWithColor(task.complexityScore),\n\t\t\ttask.recommendedSubtasks,\n\t\t\tchalk.cyan(expansionCommand) // Don't truncate - allow wrapping\n\t\t]);\n\t});\n\n\tconsole.log(complexTable.toString());\n\n\t// Create table for simple tasks\n\tif (simpleTasks.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.green.bold(`Simple Tasks (${simpleTasks.length})`), {\n\t\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: 'green',\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\tconst simpleTable = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.green.bold('ID'),\n\t\t\t\tchalk.green.bold('Title'),\n\t\t\t\tchalk.green.bold('Score'),\n\t\t\t\tchalk.green.bold('Reasoning')\n\t\t\t],\n\t\t\tcolWidths: [5, 40, 8, 50],\n\t\t\tstyle: { head: [], border: [] }\n\t\t});\n\n\t\tsimpleTasks.forEach((task) => {\n\t\t\tsimpleTable.push([\n\t\t\t\ttask.taskId,\n\t\t\t\ttruncate(task.taskTitle, 37),\n\t\t\t\tgetComplexityWithColor(task.complexityScore),\n\t\t\t\ttruncate(task.reasoning, 47)\n\t\t\t]);\n\t\t});\n\n\t\tconsole.log(simpleTable.toString());\n\t}\n\n\t// Show action suggestions\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t'\\n\\n' +\n\t\t\t\t`${chalk.cyan('1.')} Expand all complex tasks: ${chalk.yellow(`task-master expand --all`)}\\n` +\n\t\t\t\t`${chalk.cyan('2.')} Expand a specific task: ${chalk.yellow(`task-master expand --id=<id>`)}\\n` +\n\t\t\t\t`${chalk.cyan('3.')} Regenerate with research: ${chalk.yellow(`task-master analyze-complexity --research`)}`,\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Generate a prompt for complexity analysis\n * @param {Object} tasksData - Tasks data object containing tasks array\n * @returns {string} Generated prompt\n */\nfunction generateComplexityAnalysisPrompt(tasksData) {\n\tconst defaultSubtasks = getDefaultSubtasks(null); // Use the getter\n\treturn `Analyze the complexity of the following tasks and provide recommendations for subtask breakdown:\n\n${tasksData.tasks\n\t.map(\n\t\t(task) => `\nTask ID: ${task.id}\nTitle: ${task.title}\nDescription: ${task.description}\nDetails: ${task.details}\nDependencies: ${JSON.stringify(task.dependencies || [])}\nPriority: ${task.priority || 'medium'}\n`\n\t)\n\t.join('\\n---\\n')}\n\nAnalyze each task and return a JSON array with the following structure for each task:\n[\n {\n \"taskId\": number,\n \"taskTitle\": string,\n \"complexityScore\": number (1-10),\n \"recommendedSubtasks\": number (${Math.max(3, defaultSubtasks - 1)}-${Math.min(8, defaultSubtasks + 2)}),\n \"expansionPrompt\": string (a specific prompt for generating good subtasks),\n \"reasoning\": string (brief explanation of your assessment)\n },\n ...\n]\n\nIMPORTANT: Make sure to include an analysis for EVERY task listed above, with the correct taskId matching each task's ID.\n`;\n}\n\n/**\n * Confirm overwriting existing tasks.json file\n * @param {string} tasksPath - Path to the tasks.json file\n * @returns {Promise<boolean>} - Promise resolving to true if user confirms, false otherwise\n */\nasync function confirmTaskOverwrite(tasksPath) {\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.yellow(\n\t\t\t\t\"It looks like you've already generated tasks for this project.\\n\"\n\t\t\t) +\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t'Executing this command will overwrite any existing tasks.'\n\t\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\tconst rl = readline.createInterface({\n\t\tinput: process.stdin,\n\t\toutput: process.stdout\n\t});\n\n\tconst answer = await new Promise((resolve) => {\n\t\trl.question(\n\t\t\tchalk.cyan('Are you sure you wish to continue? (y/N): '),\n\t\t\tresolve\n\t\t);\n\t});\n\trl.close();\n\n\treturn answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes';\n}\n\n/**\n * Displays the API key status for different providers.\n * @param {Array<{provider: string, cli: boolean, mcp: boolean}>} statusReport - The report generated by getApiKeyStatusReport.\n */\nfunction displayApiKeyStatus(statusReport) {\n\tif (!statusReport || statusReport.length === 0) {\n\t\tconsole.log(chalk.yellow('No API key status information available.'));\n\t\treturn;\n\t}\n\n\tconst table = new Table({\n\t\thead: [\n\t\t\tchalk.cyan('Provider'),\n\t\t\tchalk.cyan('CLI Key (.env)'),\n\t\t\tchalk.cyan('MCP Key (mcp.json)')\n\t\t],\n\t\tcolWidths: [15, 20, 25],\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' }\n\t});\n\n\tstatusReport.forEach(({ provider, cli, mcp }) => {\n\t\tconst cliStatus = cli ? chalk.green('✅ Found') : chalk.red('❌ Missing');\n\t\tconst mcpStatus = mcp ? chalk.green('✅ Found') : chalk.red('❌ Missing');\n\t\t// Capitalize provider name for display\n\t\tconst providerName = provider.charAt(0).toUpperCase() + provider.slice(1);\n\t\ttable.push([providerName, cliStatus, mcpStatus]);\n\t});\n\n\tconsole.log(chalk.bold('\\n🔑 API Key Status:'));\n\tconsole.log(table.toString());\n\tconsole.log(\n\t\tchalk.gray(\n\t\t\t` Note: Some providers (e.g., Azure, Ollama) may require additional endpoint configuration in ${TASKMASTER_CONFIG_FILE}.`\n\t\t)\n\t);\n}\n\n// --- Formatting Helpers (Potentially move some to utils.js if reusable) ---\n\nconst formatSweScoreWithTertileStars = (score, allModels) => {\n\t// ... (Implementation from previous version or refine) ...\n\tif (score === null || score === undefined || score <= 0) return 'N/A';\n\tconst formattedPercentage = `${(score * 100).toFixed(1)}%`;\n\n\tconst validScores = allModels\n\t\t.map((m) => m.sweScore)\n\t\t.filter((s) => s !== null && s !== undefined && s > 0);\n\tconst sortedScores = [...validScores].sort((a, b) => b - a);\n\tconst n = sortedScores.length;\n\tlet stars = chalk.gray('☆☆☆');\n\n\tif (n > 0) {\n\t\tconst topThirdIndex = Math.max(0, Math.floor(n / 3) - 1);\n\t\tconst midThirdIndex = Math.max(0, Math.floor((2 * n) / 3) - 1);\n\t\tif (score >= sortedScores[topThirdIndex]) stars = chalk.yellow('★★★');\n\t\telse if (score >= sortedScores[midThirdIndex])\n\t\t\tstars = chalk.yellow('★★') + chalk.gray('☆');\n\t\telse stars = chalk.yellow('★') + chalk.gray('☆☆');\n\t}\n\treturn `${formattedPercentage} ${stars}`;\n};\n\nconst formatCost = (costObj) => {\n\t// ... (Implementation from previous version or refine) ...\n\tif (!costObj) return 'N/A';\n\tif (costObj.input === 0 && costObj.output === 0) {\n\t\treturn chalk.green('Free');\n\t}\n\tconst formatSingleCost = (costValue) => {\n\t\tif (costValue === null || costValue === undefined) return 'N/A';\n\t\tconst isInteger = Number.isInteger(costValue);\n\t\treturn `$${costValue.toFixed(isInteger ? 0 : 2)}`;\n\t};\n\treturn `${formatSingleCost(costObj.input)} in, ${formatSingleCost(costObj.output)} out`;\n};\n\n// --- Display Functions ---\n\n/**\n * Displays the currently configured active models.\n * @param {ConfigData} configData - The active configuration data.\n * @param {AvailableModel[]} allAvailableModels - Needed for SWE score tertiles.\n */\nfunction displayModelConfiguration(configData, allAvailableModels = []) {\n\tconsole.log(chalk.cyan.bold('\\nActive Model Configuration:'));\n\tconst active = configData.activeModels;\n\tconst activeTable = new Table({\n\t\thead: [\n\t\t\t'Role',\n\t\t\t'Provider',\n\t\t\t'Model ID',\n\t\t\t'SWE Score',\n\t\t\t'Cost ($/1M tkns)'\n\t\t\t// 'API Key Status' // Removed, handled by separate displayApiKeyStatus\n\t\t].map((h) => chalk.cyan.bold(h)),\n\t\tcolWidths: [10, 14, 30, 18, 20 /*, 28 */], // Adjusted widths\n\t\tstyle: { head: ['cyan', 'bold'] }\n\t});\n\n\tactiveTable.push([\n\t\tchalk.white('Main'),\n\t\tactive.main.provider,\n\t\tactive.main.modelId,\n\t\tformatSweScoreWithTertileStars(active.main.sweScore, allAvailableModels),\n\t\tformatCost(active.main.cost)\n\t\t// getCombinedStatus(active.main.keyStatus) // Removed\n\t]);\n\tactiveTable.push([\n\t\tchalk.white('Research'),\n\t\tactive.research.provider,\n\t\tactive.research.modelId,\n\t\tformatSweScoreWithTertileStars(\n\t\t\tactive.research.sweScore,\n\t\t\tallAvailableModels\n\t\t),\n\t\tformatCost(active.research.cost)\n\t\t// getCombinedStatus(active.research.keyStatus) // Removed\n\t]);\n\tif (active.fallback && active.fallback.provider && active.fallback.modelId) {\n\t\tactiveTable.push([\n\t\t\tchalk.white('Fallback'),\n\t\t\tactive.fallback.provider,\n\t\t\tactive.fallback.modelId,\n\t\t\tformatSweScoreWithTertileStars(\n\t\t\t\tactive.fallback.sweScore,\n\t\t\t\tallAvailableModels\n\t\t\t),\n\t\t\tformatCost(active.fallback.cost)\n\t\t\t// getCombinedStatus(active.fallback.keyStatus) // Removed\n\t\t]);\n\t} else {\n\t\tactiveTable.push([\n\t\t\tchalk.white('Fallback'),\n\t\t\tchalk.gray('-'),\n\t\t\tchalk.gray('(Not Set)'),\n\t\t\tchalk.gray('-'),\n\t\t\tchalk.gray('-')\n\t\t\t// chalk.gray('-') // Removed\n\t\t]);\n\t}\n\tconsole.log(activeTable.toString());\n}\n\n/**\n * Displays the list of available models not currently configured.\n * @param {AvailableModel[]} availableModels - List of available models.\n */\nfunction displayAvailableModels(availableModels) {\n\tif (!availableModels || availableModels.length === 0) {\n\t\tconsole.log(\n\t\t\tchalk.gray('\\n(No other models available or all are configured)')\n\t\t);\n\t\treturn;\n\t}\n\n\tconsole.log(chalk.cyan.bold('\\nOther Available Models:'));\n\tconst availableTable = new Table({\n\t\thead: ['Provider', 'Model ID', 'SWE Score', 'Cost ($/1M tkns)'].map((h) =>\n\t\t\tchalk.cyan.bold(h)\n\t\t),\n\t\tcolWidths: [15, 40, 18, 25],\n\t\tstyle: { head: ['cyan', 'bold'] }\n\t});\n\n\tavailableModels.forEach((model) => {\n\t\tavailableTable.push([\n\t\t\tmodel.provider,\n\t\t\tmodel.modelId,\n\t\t\tformatSweScoreWithTertileStars(model.sweScore, availableModels), // Pass itself for comparison\n\t\t\tformatCost(model.cost)\n\t\t]);\n\t});\n\tconsole.log(availableTable.toString());\n\n\t// --- Suggested Actions Section (moved here from models command) ---\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`1. Set main model: ${chalk.yellow('task-master models --set-main <model_id>')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`2. Set research model: ${chalk.yellow('task-master models --set-research <model_id>')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`3. Set fallback model: ${chalk.yellow('task-master models --set-fallback <model_id>')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`4. Run interactive setup: ${chalk.yellow('task-master models --setup')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`5. Use custom ollama/openrouter models: ${chalk.yellow('task-master models --openrouter|ollama --set-main|research|fallback <model_id>')}`\n\t\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Displays AI usage telemetry summary in the CLI.\n * @param {object} telemetryData - The telemetry data object.\n * @param {string} outputType - 'cli' or 'mcp' (though typically only called for 'cli').\n */\nfunction displayAiUsageSummary(telemetryData, outputType = 'cli') {\n\tif (\n\t\t(outputType !== 'cli' && outputType !== 'text') ||\n\t\t!telemetryData ||\n\t\tisSilentMode()\n\t) {\n\t\treturn; // Only display for CLI and if data exists and not in silent mode\n\t}\n\n\tconst {\n\t\tmodelUsed,\n\t\tproviderName,\n\t\tinputTokens,\n\t\toutputTokens,\n\t\ttotalTokens,\n\t\ttotalCost,\n\t\tcommandName\n\t} = telemetryData;\n\n\tlet summary = chalk.bold.blue('AI Usage Summary:') + '\\n';\n\tsummary += chalk.gray(` Command: ${commandName}\\n`);\n\tsummary += chalk.gray(` Provider: ${providerName}\\n`);\n\tsummary += chalk.gray(` Model: ${modelUsed}\\n`);\n\tsummary += chalk.gray(\n\t\t` Tokens: ${totalTokens} (Input: ${inputTokens}, Output: ${outputTokens})\\n`\n\t);\n\tsummary += chalk.gray(` Est. Cost: $${totalCost.toFixed(6)}`);\n\n\tconsole.log(\n\t\tboxen(summary, {\n\t\t\tpadding: 1,\n\t\t\tmargin: { top: 1 },\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\ttitle: '💡 Telemetry',\n\t\t\ttitleAlignment: 'center'\n\t\t})\n\t);\n}\n\n/**\n * Display multiple tasks in a compact summary format with interactive drill-down\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Array<string>} taskIds - Array of task IDs to display\n * @param {string} complexityReportPath - Path to complexity report\n * @param {string} statusFilter - Optional status filter for subtasks\n * @param {Object} context - Context object containing projectRoot and tag\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nasync function displayMultipleTasksSummary(\n\ttasksPath,\n\ttaskIds,\n\tcomplexityReportPath = null,\n\tstatusFilter = null,\n\tcontext = {}\n) {\n\tdisplayBanner();\n\n\t// Extract projectRoot and tag from context\n\tconst projectRoot = context.projectRoot || null;\n\tconst tag = context.tag || null;\n\n\t// Read the tasks file with proper projectRoot for tag resolution\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Read complexity report once\n\tconst complexityReport = readComplexityReport(complexityReportPath);\n\n\t// Find all requested tasks\n\tconst foundTasks = [];\n\tconst notFoundIds = [];\n\n\ttaskIds.forEach((id) => {\n\t\tconst { task } = findTaskById(\n\t\t\tdata.tasks,\n\t\t\tid,\n\t\t\tcomplexityReport,\n\t\t\tstatusFilter\n\t\t);\n\t\tif (task) {\n\t\t\tfoundTasks.push(task);\n\t\t} else {\n\t\t\tnotFoundIds.push(id);\n\t\t}\n\t});\n\n\t// Show not found tasks\n\tif (notFoundIds.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.yellow(`Tasks not found: ${notFoundIds.join(', ')}`), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t})\n\t\t);\n\t}\n\n\tif (foundTasks.length === 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.red('No valid tasks found to display'), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'red',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t})\n\t\t);\n\t\treturn;\n\t}\n\n\t// Display header\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold(\n\t\t\t\t`Task Summary (${foundTasks.length} task${foundTasks.length === 1 ? '' : 's'})`\n\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Calculate terminal width for responsive layout\n\tconst terminalWidth = process.stdout.columns || 100;\n\tconst availableWidth = terminalWidth - 10;\n\n\t// Create compact summary table\n\tconst summaryTable = new Table({\n\t\thead: [\n\t\t\tchalk.cyan.bold('ID'),\n\t\t\tchalk.cyan.bold('Title'),\n\t\t\tchalk.cyan.bold('Status'),\n\t\t\tchalk.cyan.bold('Priority'),\n\t\t\tchalk.cyan.bold('Subtasks'),\n\t\t\tchalk.cyan.bold('Progress')\n\t\t],\n\t\tcolWidths: [\n\t\t\tMath.floor(availableWidth * 0.08), // ID: 8%\n\t\t\tMath.floor(availableWidth * 0.35), // Title: 35%\n\t\t\tMath.floor(availableWidth * 0.12), // Status: 12%\n\t\t\tMath.floor(availableWidth * 0.1), // Priority: 10%\n\t\t\tMath.floor(availableWidth * 0.15), // Subtasks: 15%\n\t\t\tMath.floor(availableWidth * 0.2) // Progress: 20%\n\t\t],\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\twordWrap: true\n\t});\n\n\t// Add each task to the summary table\n\tfoundTasks.forEach((task) => {\n\t\t// Handle subtask case\n\t\tif (task.isSubtask || task.parentTask) {\n\t\t\tconst parentId = task.parentTask ? task.parentTask.id : 'Unknown';\n\t\t\tsummaryTable.push([\n\t\t\t\t`${parentId}.${task.id}`,\n\t\t\t\ttruncate(task.title, Math.floor(availableWidth * 0.35) - 3),\n\t\t\t\tgetStatusWithColor(task.status || 'pending', true),\n\t\t\t\tchalk.gray('(subtask)'),\n\t\t\t\tchalk.gray('N/A'),\n\t\t\t\tchalk.gray('N/A')\n\t\t\t]);\n\t\t\treturn;\n\t\t}\n\n\t\t// Handle regular task\n\t\tconst priorityColors = {\n\t\t\thigh: chalk.red.bold,\n\t\t\tmedium: chalk.yellow,\n\t\t\tlow: chalk.gray\n\t\t};\n\t\tconst priorityColor =\n\t\t\tpriorityColors[task.priority || 'medium'] || chalk.white;\n\n\t\t// Calculate subtask summary\n\t\tlet subtaskSummary = chalk.gray('None');\n\t\tlet progressBar = chalk.gray('N/A');\n\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tconst total = task.subtasks.length;\n\t\t\tconst completed = task.subtasks.filter(\n\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t).length;\n\t\t\tconst inProgress = task.subtasks.filter(\n\t\t\t\t(st) => st.status === 'in-progress'\n\t\t\t).length;\n\t\t\tconst pending = task.subtasks.filter(\n\t\t\t\t(st) => st.status === 'pending'\n\t\t\t).length;\n\n\t\t\t// Compact subtask count with status indicators\n\t\t\tsubtaskSummary = `${chalk.green(completed)}/${total}`;\n\t\t\tif (inProgress > 0)\n\t\t\t\tsubtaskSummary += ` ${chalk.hex('#FFA500')(`+${inProgress}`)}`;\n\t\t\tif (pending > 0) subtaskSummary += ` ${chalk.yellow(`(${pending})`)}`;\n\n\t\t\t// Mini progress bar (shorter than usual)\n\t\t\tconst completionPercentage = (completed / total) * 100;\n\t\t\tconst barLength = 8; // Compact bar\n\t\t\tconst statusBreakdown = {\n\t\t\t\t'in-progress': (inProgress / total) * 100,\n\t\t\t\tpending: (pending / total) * 100\n\t\t\t};\n\t\t\tprogressBar = createProgressBar(\n\t\t\t\tcompletionPercentage,\n\t\t\t\tbarLength,\n\t\t\t\tstatusBreakdown\n\t\t\t);\n\t\t}\n\n\t\tsummaryTable.push([\n\t\t\ttask.id.toString(),\n\t\t\ttruncate(task.title, Math.floor(availableWidth * 0.35) - 3),\n\t\t\tgetStatusWithColor(task.status || 'pending', true),\n\t\t\tpriorityColor(task.priority || 'medium'),\n\t\t\tsubtaskSummary,\n\t\t\tprogressBar\n\t\t]);\n\t});\n\n\tconsole.log(summaryTable.toString());\n\n\t// Interactive drill-down prompt\n\tif (foundTasks.length > 1) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Interactive Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.cyan('• Press Enter to view available actions for all tasks') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t'• Type a task ID (e.g., \"3\" or \"3.2\") to view that specific task'\n\t\t\t\t\t) +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.cyan('• Type \"q\" to quit'),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\n\t\tconst rl = readline.createInterface({\n\t\t\tinput: process.stdin,\n\t\t\toutput: process.stdout\n\t\t});\n\n\t\tconst choice = await new Promise((resolve) => {\n\t\t\trl.question(chalk.cyan('Your choice: '), resolve);\n\t\t});\n\t\trl.close();\n\n\t\tif (choice.toLowerCase() === 'q') {\n\t\t\treturn;\n\t\t} else if (choice.trim() === '') {\n\t\t\t// Show action menu for selected tasks\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Available Actions for Selected Tasks:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('1.') +\n\t\t\t\t\t\t' Mark all as in-progress' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('2.') +\n\t\t\t\t\t\t' Mark all as done' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('3.') +\n\t\t\t\t\t\t' Show next available task' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('4.') +\n\t\t\t\t\t\t' Expand all tasks (generate subtasks)' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('5.') +\n\t\t\t\t\t\t' View dependency relationships' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('6.') +\n\t\t\t\t\t\t' Generate task files' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.gray('Or type a task ID to view details'),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tconst rl2 = readline.createInterface({\n\t\t\t\tinput: process.stdin,\n\t\t\t\toutput: process.stdout\n\t\t\t});\n\n\t\t\tconst actionChoice = await new Promise((resolve) => {\n\t\t\t\trl2.question(chalk.cyan('Choose action (1-6): '), resolve);\n\t\t\t});\n\t\t\trl2.close();\n\n\t\t\tconst taskIdList = foundTasks.map((t) => t.id).join(',');\n\n\t\t\tswitch (actionChoice.trim()) {\n\t\t\t\tcase '1':\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`\\n→ Command: task-master set-status --id=${taskIdList} --status=in-progress`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t'✓ Copy and run this command to mark all tasks as in-progress'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '2':\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`\\n→ Command: task-master set-status --id=${taskIdList} --status=done`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green('✓ Copy and run this command to mark all tasks as done')\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '3':\n\t\t\t\t\tconsole.log(chalk.blue(`\\n→ Command: task-master next`));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t'✓ Copy and run this command to see the next available task'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '4':\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`\\n→ Command: task-master expand --id=${taskIdList} --research`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t'✓ Copy and run this command to expand all selected tasks into subtasks'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '5': {\n\t\t\t\t\t// Show dependency visualization\n\t\t\t\t\tconsole.log(chalk.white.bold('\\nDependency Relationships:'));\n\t\t\t\t\tlet hasDependencies = false;\n\t\t\t\t\tfoundTasks.forEach((task) => {\n\t\t\t\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t`Task ${task.id} depends on: ${task.dependencies.join(', ')}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\thasDependencies = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\tif (!hasDependencies) {\n\t\t\t\t\t\tconsole.log(chalk.gray('No dependencies found for selected tasks'));\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase '6':\n\t\t\t\t\tconsole.log(chalk.blue(`\\n→ Command: task-master generate`));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green('✓ Copy and run this command to generate task files')\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tdefault:\n\t\t\t\t\tif (actionChoice.trim().length > 0) {\n\t\t\t\t\t\tconsole.log(chalk.yellow(`Invalid choice: ${actionChoice.trim()}`));\n\t\t\t\t\t\tconsole.log(chalk.gray('Please choose 1-6 or type a task ID'));\n\t\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Show specific task\n\t\t\tawait displayTaskById(\n\t\t\t\ttasksPath,\n\t\t\t\tchoice.trim(),\n\t\t\t\tcomplexityReportPath,\n\t\t\t\tstatusFilter,\n\t\t\t\tcontext\n\t\t\t);\n\t\t}\n\t} else {\n\t\t// Single task - show suggested actions\n\t\tconst task = foundTasks[0];\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t`${chalk.cyan('1.')} View full details: ${chalk.yellow(`task-master show ${task.id}`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('2.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${task.id} --status=in-progress`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('3.')} Mark as done: ${chalk.yellow(`task-master set-status --id=${task.id} --status=done`)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n}\n\n/**\n * Display context analysis results with beautiful formatting\n * @param {Object} analysisData - Analysis data from ContextGatherer\n * @param {string} semanticQuery - The original query used for semantic search\n * @param {number} contextSize - Size of gathered context in characters\n */\nfunction displayContextAnalysis(analysisData, semanticQuery, contextSize) {\n\tif (isSilentMode() || !analysisData) return;\n\n\tconst { highRelevance, mediumRelevance, recentTasks, allRelevantTasks } =\n\t\tanalysisData;\n\n\t// Create the context analysis display\n\tlet analysisContent = chalk.white.bold('Context Analysis') + '\\n\\n';\n\n\t// Query info\n\tanalysisContent +=\n\t\tchalk.gray('Query: ') + chalk.white(`\"${semanticQuery}\"`) + '\\n';\n\tanalysisContent +=\n\t\tchalk.gray('Context size: ') +\n\t\tchalk.cyan(`${contextSize.toLocaleString()} characters`) +\n\t\t'\\n';\n\tanalysisContent +=\n\t\tchalk.gray('Tasks found: ') +\n\t\tchalk.yellow(`${allRelevantTasks.length} relevant tasks`) +\n\t\t'\\n\\n';\n\n\t// High relevance matches\n\tif (highRelevance.length > 0) {\n\t\tanalysisContent += chalk.green.bold('🎯 High Relevance Matches:') + '\\n';\n\t\thighRelevance.slice(0, 3).forEach((task) => {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.green(` • Task ${task.id}: ${truncate(task.title, 50)}`) + '\\n';\n\t\t});\n\t\tif (highRelevance.length > 3) {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.green(\n\t\t\t\t\t` • ... and ${highRelevance.length - 3} more high relevance tasks`\n\t\t\t\t) + '\\n';\n\t\t}\n\t\tanalysisContent += '\\n';\n\t}\n\n\t// Medium relevance matches\n\tif (mediumRelevance.length > 0) {\n\t\tanalysisContent += chalk.yellow.bold('📋 Medium Relevance Matches:') + '\\n';\n\t\tmediumRelevance.slice(0, 3).forEach((task) => {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.yellow(` • Task ${task.id}: ${truncate(task.title, 50)}`) + '\\n';\n\t\t});\n\t\tif (mediumRelevance.length > 3) {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t` • ... and ${mediumRelevance.length - 3} more medium relevance tasks`\n\t\t\t\t) + '\\n';\n\t\t}\n\t\tanalysisContent += '\\n';\n\t}\n\n\t// Recent tasks (if they contributed)\n\tconst recentTasksNotInRelevance = recentTasks.filter(\n\t\t(task) =>\n\t\t\t!highRelevance.some((hr) => hr.id === task.id) &&\n\t\t\t!mediumRelevance.some((mr) => mr.id === task.id)\n\t);\n\n\tif (recentTasksNotInRelevance.length > 0) {\n\t\tanalysisContent += chalk.cyan.bold('🕒 Recent Tasks (for context):') + '\\n';\n\t\trecentTasksNotInRelevance.slice(0, 2).forEach((task) => {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.cyan(` • Task ${task.id}: ${truncate(task.title, 50)}`) + '\\n';\n\t\t});\n\t\tif (recentTasksNotInRelevance.length > 2) {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t` • ... and ${recentTasksNotInRelevance.length - 2} more recent tasks`\n\t\t\t\t) + '\\n';\n\t\t}\n\t}\n\n\tconsole.log(\n\t\tboxen(analysisContent, {\n\t\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\tborderStyle: 'round',\n\t\t\tborderColor: 'blue',\n\t\t\ttitle: chalk.blue('🔍 Context Gathering'),\n\t\t\ttitleAlignment: 'center'\n\t\t})\n\t);\n}\n\n// Export UI functions\nexport {\n\tdisplayBanner,\n\tdisplayTaggedTasksFYI,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tcreateProgressBar,\n\tgetStatusWithColor,\n\tformatDependenciesWithStatus,\n\tdisplayHelp,\n\tgetComplexityWithColor,\n\tdisplayNextTask,\n\tdisplayTaskById,\n\tdisplayComplexityReport,\n\tgenerateComplexityAnalysisPrompt,\n\tconfirmTaskOverwrite,\n\tdisplayApiKeyStatus,\n\tdisplayModelConfiguration,\n\tdisplayAvailableModels,\n\tdisplayAiUsageSummary,\n\tdisplayMultipleTasksSummary,\n\tsucceedLoadingIndicator,\n\tfailLoadingIndicator,\n\twarnLoadingIndicator,\n\tinfoLoadingIndicator,\n\tdisplayContextAnalysis,\n\tdisplayCurrentTagIndicator\n};\n"], ["/claude-task-master/scripts/modules/task-manager/remove-task.js", "import path from 'path';\nimport * as fs from 'fs';\nimport { readJSON, writeJSON, log, findTaskById } from '../utils.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport taskExists from './task-exists.js';\n\n/**\n * Removes one or more tasks or subtasks from the tasks file\n * @param {string} tasksPath - Path to the tasks file\n * @param {string} taskIds - Comma-separated string of task/subtask IDs to remove (e.g., '5,6.1,7')\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @returns {Object} Result object with success status, messages, and removed task info\n */\nasync function removeTask(tasksPath, taskIds, context = {}) {\n\tconst { projectRoot, tag } = context;\n\tconst results = {\n\t\tsuccess: true,\n\t\tmessages: [],\n\t\terrors: [],\n\t\tremovedTasks: []\n\t};\n\tconst taskIdsToRemove = taskIds\n\t\t.split(',')\n\t\t.map((id) => id.trim())\n\t\t.filter(Boolean); // Remove empty strings if any\n\n\tif (taskIdsToRemove.length === 0) {\n\t\tresults.success = false;\n\t\tresults.errors.push('No valid task IDs provided.');\n\t\treturn results;\n\t}\n\n\ttry {\n\t\t// Read the tasks file ONCE before the loop, preserving the full tagged structure\n\t\tconst rawData = readJSON(tasksPath, projectRoot, tag); // Read raw data\n\t\tif (!rawData) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use the full tagged data if available, otherwise use the data as is\n\t\tconst fullTaggedData = rawData._rawTaggedData || rawData;\n\n\t\tif (!fullTaggedData[tag] || !fullTaggedData[tag].tasks) {\n\t\t\tthrow new Error(`Tag '${tag}' not found or has no tasks.`);\n\t\t}\n\n\t\tconst tasks = fullTaggedData[tag].tasks; // Work with tasks from the correct tag\n\n\t\tconst tasksToDeleteFiles = []; // Collect IDs of main tasks whose files should be deleted\n\n\t\tfor (const taskId of taskIdsToRemove) {\n\t\t\t// Check if the task ID exists *before* attempting removal\n\t\t\tif (!taskExists(tasks, taskId)) {\n\t\t\t\tconst errorMsg = `Task with ID ${taskId} in tag '${tag}' not found or already removed.`;\n\t\t\t\tresults.errors.push(errorMsg);\n\t\t\t\tresults.success = false; // Mark overall success as false if any error occurs\n\t\t\t\tcontinue; // Skip to the next ID\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Handle subtask removal (e.g., '5.2')\n\t\t\t\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\t\t\t\tconst [parentTaskId, subtaskId] = taskId\n\t\t\t\t\t\t.split('.')\n\t\t\t\t\t\t.map((id) => parseInt(id, 10));\n\n\t\t\t\t\t// Find the parent task\n\t\t\t\t\tconst parentTask = tasks.find((t) => t.id === parentTaskId);\n\t\t\t\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Parent task ${parentTaskId} or its subtasks not found for subtask ${taskId}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Find the subtask to remove\n\t\t\t\t\tconst subtaskIndex = parentTask.subtasks.findIndex(\n\t\t\t\t\t\t(st) => st.id === subtaskId\n\t\t\t\t\t);\n\t\t\t\t\tif (subtaskIndex === -1) {\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Subtask ${subtaskId} not found in parent task ${parentTaskId}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Store the subtask info before removal\n\t\t\t\t\tconst removedSubtask = {\n\t\t\t\t\t\t...parentTask.subtasks[subtaskIndex],\n\t\t\t\t\t\tparentTaskId: parentTaskId\n\t\t\t\t\t};\n\t\t\t\t\tresults.removedTasks.push(removedSubtask);\n\n\t\t\t\t\t// Remove the subtask from the parent\n\t\t\t\t\tparentTask.subtasks.splice(subtaskIndex, 1);\n\n\t\t\t\t\tresults.messages.push(\n\t\t\t\t\t\t`Successfully removed subtask ${taskId} from tag '${tag}'`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\t// Handle main task removal\n\t\t\t\telse {\n\t\t\t\t\tconst taskIdNum = parseInt(taskId, 10);\n\t\t\t\t\tconst taskIndex = tasks.findIndex((t) => t.id === taskIdNum);\n\t\t\t\t\tif (taskIndex === -1) {\n\t\t\t\t\t\tthrow new Error(`Task with ID ${taskId} not found in tag '${tag}'`);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Store the task info before removal\n\t\t\t\t\tconst removedTask = tasks[taskIndex];\n\t\t\t\t\tresults.removedTasks.push(removedTask);\n\t\t\t\t\ttasksToDeleteFiles.push(taskIdNum); // Add to list for file deletion\n\n\t\t\t\t\t// Remove the task from the main array\n\t\t\t\t\ttasks.splice(taskIndex, 1);\n\n\t\t\t\t\tresults.messages.push(\n\t\t\t\t\t\t`Successfully removed task ${taskId} from tag '${tag}'`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (innerError) {\n\t\t\t\t// Catch errors specific to processing *this* ID\n\t\t\t\tconst errorMsg = `Error processing ID ${taskId}: ${innerError.message}`;\n\t\t\t\tresults.errors.push(errorMsg);\n\t\t\t\tresults.success = false;\n\t\t\t\tlog('warn', errorMsg); // Log as warning and continue with next ID\n\t\t\t}\n\t\t} // End of loop through taskIdsToRemove\n\n\t\t// --- Post-Loop Operations ---\n\n\t\t// Only proceed with cleanup and saving if at least one task was potentially removed\n\t\tif (results.removedTasks.length > 0) {\n\t\t\tconst allRemovedIds = new Set(\n\t\t\t\ttaskIdsToRemove.map((id) =>\n\t\t\t\t\ttypeof id === 'string' && id.includes('.') ? id : parseInt(id, 10)\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// Update the tasks in the current tag of the full data structure\n\t\t\tfullTaggedData[tag].tasks = tasks;\n\n\t\t\t// Remove dependencies from all tags\n\t\t\tfor (const tagName in fullTaggedData) {\n\t\t\t\tif (\n\t\t\t\t\tObject.prototype.hasOwnProperty.call(fullTaggedData, tagName) &&\n\t\t\t\t\tfullTaggedData[tagName] &&\n\t\t\t\t\tfullTaggedData[tagName].tasks\n\t\t\t\t) {\n\t\t\t\t\tconst currentTagTasks = fullTaggedData[tagName].tasks;\n\t\t\t\t\tcurrentTagTasks.forEach((task) => {\n\t\t\t\t\t\tif (task.dependencies) {\n\t\t\t\t\t\t\ttask.dependencies = task.dependencies.filter(\n\t\t\t\t\t\t\t\t(depId) => !allRemovedIds.has(depId)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (task.subtasks) {\n\t\t\t\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\t\t\t\tif (subtask.dependencies) {\n\t\t\t\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter(\n\t\t\t\t\t\t\t\t\t\t(depId) =>\n\t\t\t\t\t\t\t\t\t\t\t!allRemovedIds.has(`${task.id}.${depId}`) &&\n\t\t\t\t\t\t\t\t\t\t\t!allRemovedIds.has(depId)\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Save the updated raw data structure\n\t\t\twriteJSON(tasksPath, fullTaggedData, projectRoot, tag);\n\n\t\t\t// Delete task files AFTER saving tasks.json\n\t\t\tfor (const taskIdNum of tasksToDeleteFiles) {\n\t\t\t\tconst taskFileName = path.join(\n\t\t\t\t\tpath.dirname(tasksPath),\n\t\t\t\t\t`task_${taskIdNum.toString().padStart(3, '0')}.txt`\n\t\t\t\t);\n\t\t\t\tif (fs.existsSync(taskFileName)) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tfs.unlinkSync(taskFileName);\n\t\t\t\t\t\tresults.messages.push(`Deleted task file: ${taskFileName}`);\n\t\t\t\t\t} catch (unlinkError) {\n\t\t\t\t\t\tconst unlinkMsg = `Failed to delete task file ${taskFileName}: ${unlinkError.message}`;\n\t\t\t\t\t\tresults.errors.push(unlinkMsg);\n\t\t\t\t\t\tresults.success = false;\n\t\t\t\t\t\tlog('warn', unlinkMsg);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Generate updated task files ONCE, with context\n\t\t\t// try {\n\t\t\t// \tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t\t// \t\tprojectRoot,\n\t\t\t// \t\ttag\n\t\t\t// \t});\n\t\t\t// \tresults.messages.push('Task files regenerated successfully.');\n\t\t\t// } catch (genError) {\n\t\t\t// \tconst genErrMsg = `Failed to regenerate task files: ${genError.message}`;\n\t\t\t// \tresults.errors.push(genErrMsg);\n\t\t\t// \tresults.success = false;\n\t\t\t// \tlog('warn', genErrMsg);\n\t\t\t// }\n\t\t} else if (results.errors.length === 0) {\n\t\t\tresults.messages.push('No tasks found matching the provided IDs.');\n\t\t}\n\n\t\t// Consolidate messages for final output\n\t\tconst finalMessage = results.messages.join('\\n');\n\t\tconst finalError = results.errors.join('\\n');\n\n\t\treturn {\n\t\t\tsuccess: results.success,\n\t\t\tmessage: finalMessage || 'No tasks were removed.',\n\t\t\terror: finalError || null,\n\t\t\tremovedTasks: results.removedTasks\n\t\t};\n\t} catch (error) {\n\t\t// Catch errors from reading file or other initial setup\n\t\tlog('error', `Error removing tasks: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\tmessage: '',\n\t\t\terror: `Operation failed: ${error.message}`,\n\t\t\tremovedTasks: []\n\t\t};\n\t}\n}\n\nexport default removeTask;\n"], ["/claude-task-master/scripts/modules/task-manager/move-task.js", "import path from 'path';\nimport { log, readJSON, writeJSON, setTasksForTag } from '../utils.js';\nimport { isTaskDependentOn } from '../task-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\n\n/**\n * Move one or more tasks/subtasks to new positions\n * @param {string} tasksPath - Path to tasks.json file\n * @param {string} sourceId - ID(s) of the task/subtask to move (e.g., '5' or '5.2' or '5,6,7')\n * @param {string} destinationId - ID(s) of the destination (e.g., '7' or '7.3' or '7,8,9')\n * @param {boolean} generateFiles - Whether to regenerate task files after moving\n * @param {Object} options - Additional options\n * @param {string} options.projectRoot - Project root directory for tag resolution\n * @param {string} options.tag - Explicit tag to use (optional)\n * @returns {Object} Result object with moved task details\n */\nasync function moveTask(\n\ttasksPath,\n\tsourceId,\n\tdestinationId,\n\tgenerateFiles = false,\n\toptions = {}\n) {\n\tconst { projectRoot, tag } = options;\n\t// Check if we have comma-separated IDs (batch move)\n\tconst sourceIds = sourceId.split(',').map((id) => id.trim());\n\tconst destinationIds = destinationId.split(',').map((id) => id.trim());\n\n\tif (sourceIds.length !== destinationIds.length) {\n\t\tthrow new Error(\n\t\t\t`Number of source IDs (${sourceIds.length}) must match number of destination IDs (${destinationIds.length})`\n\t\t);\n\t}\n\n\t// For batch moves, process each pair sequentially\n\tif (sourceIds.length > 1) {\n\t\tconst results = [];\n\t\tfor (let i = 0; i < sourceIds.length; i++) {\n\t\t\tconst result = await moveTask(\n\t\t\t\ttasksPath,\n\t\t\t\tsourceIds[i],\n\t\t\t\tdestinationIds[i],\n\t\t\t\tfalse, // Don't generate files for each individual move\n\t\t\t\toptions\n\t\t\t);\n\t\t\tresults.push(result);\n\t\t}\n\n\t\t// Generate files once at the end if requested\n\t\tif (generateFiles) {\n\t\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t\t\ttag: tag,\n\t\t\t\tprojectRoot: projectRoot\n\t\t\t});\n\t\t}\n\n\t\treturn {\n\t\t\tmessage: `Successfully moved ${sourceIds.length} tasks/subtasks`,\n\t\t\tmoves: results\n\t\t};\n\t}\n\n\t// Single move logic\n\t// Read the raw data without tag resolution to preserve tagged structure\n\tlet rawData = readJSON(tasksPath, projectRoot, tag);\n\n\t// Handle the case where readJSON returns resolved data with _rawTaggedData\n\tif (rawData && rawData._rawTaggedData) {\n\t\t// Use the raw tagged data and discard the resolved view\n\t\trawData = rawData._rawTaggedData;\n\t}\n\n\t// Ensure the tag exists in the raw data\n\tif (!rawData || !rawData[tag] || !Array.isArray(rawData[tag].tasks)) {\n\t\tthrow new Error(\n\t\t\t`Invalid tasks file or tag \"${tag}\" not found at ${tasksPath}`\n\t\t);\n\t}\n\n\t// Get the tasks for the current tag\n\tconst tasks = rawData[tag].tasks;\n\n\tlog(\n\t\t'info',\n\t\t`Moving task/subtask ${sourceId} to ${destinationId} (tag: ${tag})`\n\t);\n\n\t// Parse source and destination IDs\n\tconst isSourceSubtask = sourceId.includes('.');\n\tconst isDestSubtask = destinationId.includes('.');\n\n\tlet result;\n\n\tif (isSourceSubtask && isDestSubtask) {\n\t\t// Subtask to subtask\n\t\tresult = moveSubtaskToSubtask(tasks, sourceId, destinationId);\n\t} else if (isSourceSubtask && !isDestSubtask) {\n\t\t// Subtask to task\n\t\tresult = moveSubtaskToTask(tasks, sourceId, destinationId);\n\t} else if (!isSourceSubtask && isDestSubtask) {\n\t\t// Task to subtask\n\t\tresult = moveTaskToSubtask(tasks, sourceId, destinationId);\n\t} else {\n\t\t// Task to task\n\t\tresult = moveTaskToTask(tasks, sourceId, destinationId);\n\t}\n\n\t// Update the data structure with the modified tasks\n\trawData[tag].tasks = tasks;\n\n\t// Always write the data object, never the _rawTaggedData directly\n\t// The writeJSON function will filter out _rawTaggedData automatically\n\twriteJSON(tasksPath, rawData, options.projectRoot, tag);\n\n\tif (generateFiles) {\n\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t\ttag: tag,\n\t\t\tprojectRoot: projectRoot\n\t\t});\n\t}\n\n\treturn result;\n}\n\n// Helper functions for different move scenarios\nfunction moveSubtaskToSubtask(tasks, sourceId, destinationId) {\n\t// Parse IDs\n\tconst [sourceParentId, sourceSubtaskId] = sourceId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\tconst [destParentId, destSubtaskId] = destinationId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\n\t// Find source and destination parent tasks\n\tconst sourceParentTask = tasks.find((t) => t.id === sourceParentId);\n\tconst destParentTask = tasks.find((t) => t.id === destParentId);\n\n\tif (!sourceParentTask) {\n\t\tthrow new Error(`Source parent task with ID ${sourceParentId} not found`);\n\t}\n\tif (!destParentTask) {\n\t\tthrow new Error(\n\t\t\t`Destination parent task with ID ${destParentId} not found`\n\t\t);\n\t}\n\n\t// Initialize subtasks arrays if they don't exist (based on commit fixes)\n\tif (!sourceParentTask.subtasks) {\n\t\tsourceParentTask.subtasks = [];\n\t}\n\tif (!destParentTask.subtasks) {\n\t\tdestParentTask.subtasks = [];\n\t}\n\n\t// Find source subtask\n\tconst sourceSubtaskIndex = sourceParentTask.subtasks.findIndex(\n\t\t(st) => st.id === sourceSubtaskId\n\t);\n\tif (sourceSubtaskIndex === -1) {\n\t\tthrow new Error(`Source subtask ${sourceId} not found`);\n\t}\n\n\tconst sourceSubtask = sourceParentTask.subtasks[sourceSubtaskIndex];\n\n\tif (sourceParentId === destParentId) {\n\t\t// Moving within the same parent\n\t\tif (destParentTask.subtasks.length > 0) {\n\t\t\tconst destSubtaskIndex = destParentTask.subtasks.findIndex(\n\t\t\t\t(st) => st.id === destSubtaskId\n\t\t\t);\n\t\t\tif (destSubtaskIndex !== -1) {\n\t\t\t\t// Remove from old position\n\t\t\t\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\t\t\t\t// Insert at new position (adjust index if moving within same array)\n\t\t\t\tconst adjustedIndex =\n\t\t\t\t\tsourceSubtaskIndex < destSubtaskIndex\n\t\t\t\t\t\t? destSubtaskIndex - 1\n\t\t\t\t\t\t: destSubtaskIndex;\n\t\t\t\tdestParentTask.subtasks.splice(adjustedIndex + 1, 0, sourceSubtask);\n\t\t\t} else {\n\t\t\t\t// Destination subtask doesn't exist, insert at end\n\t\t\t\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\t\t\t\tdestParentTask.subtasks.push(sourceSubtask);\n\t\t\t}\n\t\t} else {\n\t\t\t// No existing subtasks, this will be the first one\n\t\t\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\t\t\tdestParentTask.subtasks.push(sourceSubtask);\n\t\t}\n\t} else {\n\t\t// Moving between different parents\n\t\tmoveSubtaskToAnotherParent(\n\t\t\tsourceSubtask,\n\t\t\tsourceParentTask,\n\t\t\tsourceSubtaskIndex,\n\t\t\tdestParentTask,\n\t\t\tdestSubtaskId\n\t\t);\n\t}\n\n\treturn {\n\t\tmessage: `Moved subtask ${sourceId} to ${destinationId}`,\n\t\tmovedItem: sourceSubtask\n\t};\n}\n\nfunction moveSubtaskToTask(tasks, sourceId, destinationId) {\n\t// Parse source ID\n\tconst [sourceParentId, sourceSubtaskId] = sourceId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\tconst destTaskId = parseInt(destinationId, 10);\n\n\t// Find source parent and destination task\n\tconst sourceParentTask = tasks.find((t) => t.id === sourceParentId);\n\n\tif (!sourceParentTask) {\n\t\tthrow new Error(`Source parent task with ID ${sourceParentId} not found`);\n\t}\n\tif (!sourceParentTask.subtasks) {\n\t\tthrow new Error(`Source parent task ${sourceParentId} has no subtasks`);\n\t}\n\n\t// Find source subtask\n\tconst sourceSubtaskIndex = sourceParentTask.subtasks.findIndex(\n\t\t(st) => st.id === sourceSubtaskId\n\t);\n\tif (sourceSubtaskIndex === -1) {\n\t\tthrow new Error(`Source subtask ${sourceId} not found`);\n\t}\n\n\tconst sourceSubtask = sourceParentTask.subtasks[sourceSubtaskIndex];\n\n\t// Check if destination task exists\n\tconst existingDestTask = tasks.find((t) => t.id === destTaskId);\n\tif (existingDestTask) {\n\t\tthrow new Error(\n\t\t\t`Cannot move to existing task ID ${destTaskId}. Choose a different ID or use subtask destination.`\n\t\t);\n\t}\n\n\t// Create new task from subtask\n\tconst newTask = {\n\t\tid: destTaskId,\n\t\ttitle: sourceSubtask.title,\n\t\tdescription: sourceSubtask.description,\n\t\tstatus: sourceSubtask.status || 'pending',\n\t\tdependencies: sourceSubtask.dependencies || [],\n\t\tpriority: sourceSubtask.priority || 'medium',\n\t\tdetails: sourceSubtask.details || '',\n\t\ttestStrategy: sourceSubtask.testStrategy || '',\n\t\tsubtasks: []\n\t};\n\n\t// Remove subtask from source parent\n\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\n\t// Insert new task in correct position\n\tconst insertIndex = tasks.findIndex((t) => t.id > destTaskId);\n\tif (insertIndex === -1) {\n\t\ttasks.push(newTask);\n\t} else {\n\t\ttasks.splice(insertIndex, 0, newTask);\n\t}\n\n\treturn {\n\t\tmessage: `Converted subtask ${sourceId} to task ${destinationId}`,\n\t\tmovedItem: newTask\n\t};\n}\n\nfunction moveTaskToSubtask(tasks, sourceId, destinationId) {\n\t// Parse IDs\n\tconst sourceTaskId = parseInt(sourceId, 10);\n\tconst [destParentId, destSubtaskId] = destinationId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\n\t// Find source task and destination parent\n\tconst sourceTaskIndex = tasks.findIndex((t) => t.id === sourceTaskId);\n\tconst destParentTask = tasks.find((t) => t.id === destParentId);\n\n\tif (sourceTaskIndex === -1) {\n\t\tthrow new Error(`Source task with ID ${sourceTaskId} not found`);\n\t}\n\tif (!destParentTask) {\n\t\tthrow new Error(\n\t\t\t`Destination parent task with ID ${destParentId} not found`\n\t\t);\n\t}\n\n\tconst sourceTask = tasks[sourceTaskIndex];\n\n\t// Initialize subtasks array if it doesn't exist (based on commit fixes)\n\tif (!destParentTask.subtasks) {\n\t\tdestParentTask.subtasks = [];\n\t}\n\n\t// Create new subtask from task\n\tconst newSubtask = {\n\t\tid: destSubtaskId,\n\t\ttitle: sourceTask.title,\n\t\tdescription: sourceTask.description,\n\t\tstatus: sourceTask.status || 'pending',\n\t\tdependencies: sourceTask.dependencies || [],\n\t\tdetails: sourceTask.details || '',\n\t\ttestStrategy: sourceTask.testStrategy || ''\n\t};\n\n\t// Find insertion position (based on commit fixes)\n\tlet destSubtaskIndex = -1;\n\tif (destParentTask.subtasks.length > 0) {\n\t\tdestSubtaskIndex = destParentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === destSubtaskId\n\t\t);\n\t\tif (destSubtaskIndex === -1) {\n\t\t\t// Subtask doesn't exist, we'll insert at the end\n\t\t\tdestSubtaskIndex = destParentTask.subtasks.length - 1;\n\t\t}\n\t}\n\n\t// Insert at specific position (based on commit fixes)\n\tconst insertPosition = destSubtaskIndex === -1 ? 0 : destSubtaskIndex + 1;\n\tdestParentTask.subtasks.splice(insertPosition, 0, newSubtask);\n\n\t// Remove the original task from the tasks array\n\ttasks.splice(sourceTaskIndex, 1);\n\n\treturn {\n\t\tmessage: `Converted task ${sourceId} to subtask ${destinationId}`,\n\t\tmovedItem: newSubtask\n\t};\n}\n\nfunction moveTaskToTask(tasks, sourceId, destinationId) {\n\tconst sourceTaskId = parseInt(sourceId, 10);\n\tconst destTaskId = parseInt(destinationId, 10);\n\n\t// Find source task\n\tconst sourceTaskIndex = tasks.findIndex((t) => t.id === sourceTaskId);\n\tif (sourceTaskIndex === -1) {\n\t\tthrow new Error(`Source task with ID ${sourceTaskId} not found`);\n\t}\n\n\tconst sourceTask = tasks[sourceTaskIndex];\n\n\t// Check if destination exists\n\tconst destTaskIndex = tasks.findIndex((t) => t.id === destTaskId);\n\n\tif (destTaskIndex !== -1) {\n\t\t// Destination exists - this could be overwriting or swapping\n\t\tconst destTask = tasks[destTaskIndex];\n\n\t\t// For now, throw an error to avoid accidental overwrites\n\t\tthrow new Error(\n\t\t\t`Task with ID ${destTaskId} already exists. Use a different destination ID.`\n\t\t);\n\t} else {\n\t\t// Destination doesn't exist - create new task ID\n\t\treturn moveTaskToNewId(tasks, sourceTaskIndex, sourceTask, destTaskId);\n\t}\n}\n\nfunction moveSubtaskToAnotherParent(\n\tsourceSubtask,\n\tsourceParentTask,\n\tsourceSubtaskIndex,\n\tdestParentTask,\n\tdestSubtaskId\n) {\n\tconst destSubtaskId_num = parseInt(destSubtaskId, 10);\n\n\t// Create new subtask with destination ID\n\tconst newSubtask = {\n\t\t...sourceSubtask,\n\t\tid: destSubtaskId_num\n\t};\n\n\t// Initialize subtasks array if it doesn't exist (based on commit fixes)\n\tif (!destParentTask.subtasks) {\n\t\tdestParentTask.subtasks = [];\n\t}\n\n\t// Find insertion position\n\tlet destSubtaskIndex = -1;\n\tif (destParentTask.subtasks.length > 0) {\n\t\tdestSubtaskIndex = destParentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === destSubtaskId_num\n\t\t);\n\t\tif (destSubtaskIndex === -1) {\n\t\t\t// Subtask doesn't exist, we'll insert at the end\n\t\t\tdestSubtaskIndex = destParentTask.subtasks.length - 1;\n\t\t}\n\t}\n\n\t// Insert at the destination position (based on commit fixes)\n\tconst insertPosition = destSubtaskIndex === -1 ? 0 : destSubtaskIndex + 1;\n\tdestParentTask.subtasks.splice(insertPosition, 0, newSubtask);\n\n\t// Remove the subtask from the original parent\n\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\n\treturn newSubtask;\n}\n\nfunction moveTaskToNewId(tasks, sourceTaskIndex, sourceTask, destTaskId) {\n\tconst destTaskIndex = tasks.findIndex((t) => t.id === destTaskId);\n\n\t// Create moved task with new ID\n\tconst movedTask = {\n\t\t...sourceTask,\n\t\tid: destTaskId\n\t};\n\n\t// Update any dependencies that reference the old task ID\n\ttasks.forEach((task) => {\n\t\tif (task.dependencies && task.dependencies.includes(sourceTask.id)) {\n\t\t\tconst depIndex = task.dependencies.indexOf(sourceTask.id);\n\t\t\ttask.dependencies[depIndex] = destTaskId;\n\t\t}\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (\n\t\t\t\t\tsubtask.dependencies &&\n\t\t\t\t\tsubtask.dependencies.includes(sourceTask.id)\n\t\t\t\t) {\n\t\t\t\t\tconst depIndex = subtask.dependencies.indexOf(sourceTask.id);\n\t\t\t\t\tsubtask.dependencies[depIndex] = destTaskId;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\t// Update dependencies within movedTask's subtasks that reference sibling subtasks\n\tif (Array.isArray(movedTask.subtasks)) {\n\t\tmovedTask.subtasks.forEach((subtask) => {\n\t\t\tif (Array.isArray(subtask.dependencies)) {\n\t\t\t\tsubtask.dependencies = subtask.dependencies.map((dep) => {\n\t\t\t\t\t// If dependency is a string like \"oldParent.subId\", update to \"newParent.subId\"\n\t\t\t\t\tif (typeof dep === 'string' && dep.includes('.')) {\n\t\t\t\t\t\tconst [depParent, depSub] = dep.split('.');\n\t\t\t\t\t\tif (parseInt(depParent, 10) === sourceTask.id) {\n\t\t\t\t\t\t\treturn `${destTaskId}.${depSub}`;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// If dependency is a number, and matches a subtask ID in the moved task, leave as is (context is implied)\n\t\t\t\t\treturn dep;\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\t}\n\n\t// Strategy based on commit fixes: remove source first, then replace destination\n\t// This avoids index shifting problems\n\n\t// Remove the source task first\n\ttasks.splice(sourceTaskIndex, 1);\n\n\t// Adjust the destination index if the source was before the destination\n\t// Since we removed the source, indices after it shift down by 1\n\tconst adjustedDestIndex =\n\t\tsourceTaskIndex < destTaskIndex ? destTaskIndex - 1 : destTaskIndex;\n\n\t// Replace the placeholder destination task with the moved task (based on commit fixes)\n\tif (adjustedDestIndex >= 0 && adjustedDestIndex < tasks.length) {\n\t\ttasks[adjustedDestIndex] = movedTask;\n\t} else {\n\t\t// Insert at the end if index is out of bounds\n\t\ttasks.push(movedTask);\n\t}\n\n\tlog('info', `Moved task ${sourceTask.id} to new ID ${destTaskId}`);\n\n\treturn {\n\t\tmessage: `Moved task ${sourceTask.id} to new ID ${destTaskId}`,\n\t\tmovedItem: movedTask\n\t};\n}\n\nexport default moveTask;\n"], ["/claude-task-master/scripts/modules/task-manager/analyze-task-complexity.js", "import chalk from 'chalk';\nimport boxen from 'boxen';\nimport readline from 'readline';\nimport fs from 'fs';\n\nimport { log, readJSON, writeJSON, isSilentMode } from '../utils.js';\n\nimport {\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { generateTextService } from '../ai-services-unified.js';\n\nimport { getDebugFlag, getProjectName } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport {\n\tCOMPLEXITY_REPORT_FILE,\n\tLEGACY_TASKS_FILE\n} from '../../../src/constants/paths.js';\nimport { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { flattenTasksWithSubtasks } from '../utils.js';\n\n/**\n * Generates the prompt for complexity analysis.\n * (Moved from ai-services.js and simplified)\n * @param {Object} tasksData - The tasks data object.\n * @param {string} [gatheredContext] - The gathered context for the analysis.\n * @returns {string} The generated prompt.\n */\nfunction generateInternalComplexityAnalysisPrompt(\n\ttasksData,\n\tgatheredContext = ''\n) {\n\tconst tasksString = JSON.stringify(tasksData.tasks, null, 2);\n\tlet prompt = `Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.\n\nTasks:\n${tasksString}`;\n\n\tif (gatheredContext) {\n\t\tprompt += `\\n\\n# Project Context\\n\\n${gatheredContext}`;\n\t}\n\n\tprompt += `\n\nRespond ONLY with a valid JSON array matching the schema:\n[\n {\n \"taskId\": <number>,\n \"taskTitle\": \"<string>\",\n \"complexityScore\": <number 1-10>,\n \"recommendedSubtasks\": <number>,\n \"expansionPrompt\": \"<string>\",\n \"reasoning\": \"<string>\"\n },\n ...\n]\n\nDo not include any explanatory text, markdown formatting, or code block markers before or after the JSON array.`;\n\treturn prompt;\n}\n\n/**\n * Analyzes task complexity and generates expansion recommendations\n * @param {Object} options Command options\n * @param {string} options.file - Path to tasks file\n * @param {string} options.output - Path to report output file\n * @param {string|number} [options.threshold] - Complexity threshold\n * @param {boolean} [options.research] - Use research role\n * @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).\n * @param {string} [options.tag] - Tag for the task\n * @param {string} [options.id] - Comma-separated list of task IDs to analyze specifically\n * @param {number} [options.from] - Starting task ID in a range to analyze\n * @param {number} [options.to] - Ending task ID in a range to analyze\n * @param {Object} [options._filteredTasksData] - Pre-filtered task data (internal use)\n * @param {number} [options._originalTaskCount] - Original task count (internal use)\n * @param {Object} context - Context object, potentially containing session and mcpLog\n * @param {Object} [context.session] - Session object from MCP server (optional)\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {function} [context.reportProgress] - Deprecated: Function to report progress (ignored)\n */\nasync function analyzeTaskComplexity(options, context = {}) {\n\tconst { session, mcpLog } = context;\n\tconst tasksPath = options.file || LEGACY_TASKS_FILE;\n\tconst thresholdScore = parseFloat(options.threshold || '5');\n\tconst useResearch = options.research || false;\n\tconst projectRoot = options.projectRoot;\n\tconst tag = options.tag;\n\t// New parameters for task ID filtering\n\tconst specificIds = options.id\n\t\t? options.id\n\t\t\t\t.split(',')\n\t\t\t\t.map((id) => parseInt(id.trim(), 10))\n\t\t\t\t.filter((id) => !Number.isNaN(id))\n\t\t: null;\n\tconst fromId = options.from !== undefined ? parseInt(options.from, 10) : null;\n\tconst toId = options.to !== undefined ? parseInt(options.to, 10) : null;\n\n\tconst outputFormat = mcpLog ? 'json' : 'text';\n\n\tconst reportLog = (message, level = 'info') => {\n\t\tif (mcpLog) {\n\t\t\tmcpLog[level](message);\n\t\t} else if (!isSilentMode() && outputFormat === 'text') {\n\t\t\tlog(level, message);\n\t\t}\n\t};\n\n\t// Resolve output path using tag-aware resolution\n\tconst outputPath = resolveComplexityReportOutputPath(\n\t\toptions.output,\n\t\t{ projectRoot, tag },\n\t\treportLog\n\t);\n\n\tif (outputFormat === 'text') {\n\t\tconsole.log(\n\t\t\tchalk.blue(\n\t\t\t\t'Analyzing task complexity and generating expansion recommendations...'\n\t\t\t)\n\t\t);\n\t}\n\n\ttry {\n\t\treportLog(`Reading tasks from ${tasksPath}...`, 'info');\n\t\tlet tasksData;\n\t\tlet originalTaskCount = 0;\n\t\tlet originalData = null;\n\n\t\tif (options._filteredTasksData) {\n\t\t\ttasksData = options._filteredTasksData;\n\t\t\toriginalTaskCount = options._originalTaskCount || tasksData.tasks.length;\n\t\t\tif (!options._originalTaskCount) {\n\t\t\t\ttry {\n\t\t\t\t\toriginalData = readJSON(tasksPath, projectRoot, tag);\n\t\t\t\t\tif (originalData && originalData.tasks) {\n\t\t\t\t\t\toriginalTaskCount = originalData.tasks.length;\n\t\t\t\t\t}\n\t\t\t\t} catch (e) {\n\t\t\t\t\tlog('warn', `Could not read original tasks file: ${e.message}`);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\toriginalData = readJSON(tasksPath, projectRoot, tag);\n\t\t\tif (\n\t\t\t\t!originalData ||\n\t\t\t\t!originalData.tasks ||\n\t\t\t\t!Array.isArray(originalData.tasks) ||\n\t\t\t\toriginalData.tasks.length === 0\n\t\t\t) {\n\t\t\t\tthrow new Error('No tasks found in the tasks file');\n\t\t\t}\n\t\t\toriginalTaskCount = originalData.tasks.length;\n\n\t\t\t// Filter tasks based on active status\n\t\t\tconst activeStatuses = ['pending', 'blocked', 'in-progress'];\n\t\t\tlet filteredTasks = originalData.tasks.filter((task) =>\n\t\t\t\tactiveStatuses.includes(task.status?.toLowerCase() || 'pending')\n\t\t\t);\n\n\t\t\t// Apply ID filtering if specified\n\t\t\tif (specificIds && specificIds.length > 0) {\n\t\t\t\treportLog(\n\t\t\t\t\t`Filtering tasks by specific IDs: ${specificIds.join(', ')}`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t\tfilteredTasks = filteredTasks.filter((task) =>\n\t\t\t\t\tspecificIds.includes(task.id)\n\t\t\t\t);\n\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tif (filteredTasks.length === 0 && specificIds.length > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Warning: No active tasks found with IDs: ${specificIds.join(', ')}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else if (filteredTasks.length < specificIds.length) {\n\t\t\t\t\t\tconst foundIds = filteredTasks.map((t) => t.id);\n\t\t\t\t\t\tconst missingIds = specificIds.filter(\n\t\t\t\t\t\t\t(id) => !foundIds.includes(id)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Warning: Some requested task IDs were not found or are not active: ${missingIds.join(', ')}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Apply range filtering if specified\n\t\t\telse if (fromId !== null || toId !== null) {\n\t\t\t\tconst effectiveFromId = fromId !== null ? fromId : 1;\n\t\t\t\tconst effectiveToId =\n\t\t\t\t\ttoId !== null\n\t\t\t\t\t\t? toId\n\t\t\t\t\t\t: Math.max(...originalData.tasks.map((t) => t.id));\n\n\t\t\t\treportLog(\n\t\t\t\t\t`Filtering tasks by ID range: ${effectiveFromId} to ${effectiveToId}`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t\tfilteredTasks = filteredTasks.filter(\n\t\t\t\t\t(task) => task.id >= effectiveFromId && task.id <= effectiveToId\n\t\t\t\t);\n\n\t\t\t\tif (outputFormat === 'text' && filteredTasks.length === 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Warning: No active tasks found in range: ${effectiveFromId}-${effectiveToId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttasksData = {\n\t\t\t\t...originalData,\n\t\t\t\ttasks: filteredTasks,\n\t\t\t\t_originalTaskCount: originalTaskCount\n\t\t\t};\n\t\t}\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\tif (originalData && originalData.tasks.length > 0) {\n\t\t\ttry {\n\t\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(originalData.tasks);\n\t\t\t\tconst fuzzySearch = new FuzzyTaskSearch(\n\t\t\t\t\tallTasksFlat,\n\t\t\t\t\t'analyze-complexity'\n\t\t\t\t);\n\t\t\t\t// Create a query from the tasks being analyzed\n\t\t\t\tconst searchQuery = tasksData.tasks\n\t\t\t\t\t.map((t) => `${t.title} ${t.description}`)\n\t\t\t\t\t.join(' ');\n\t\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\t\tmaxResults: 10\n\t\t\t\t});\n\t\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\t\tif (relevantTaskIds.length > 0) {\n\t\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\t\ttasks: relevantTaskIds,\n\t\t\t\t\t\tformat: 'research'\n\t\t\t\t\t});\n\t\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t\t}\n\t\t\t} catch (contextError) {\n\t\t\t\treportLog(\n\t\t\t\t\t`Could not gather additional context: ${contextError.message}`,\n\t\t\t\t\t'warn'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\tconst skippedCount = originalTaskCount - tasksData.tasks.length;\n\t\treportLog(\n\t\t\t`Found ${originalTaskCount} total tasks in the task file.`,\n\t\t\t'info'\n\t\t);\n\n\t\t// Updated messaging to reflect filtering logic\n\t\tif (specificIds || fromId !== null || toId !== null) {\n\t\t\tconst filterMsg = specificIds\n\t\t\t\t? `Analyzing ${tasksData.tasks.length} tasks with specific IDs: ${specificIds.join(', ')}`\n\t\t\t\t: `Analyzing ${tasksData.tasks.length} tasks in range: ${fromId || 1} to ${toId || 'end'}`;\n\n\t\t\treportLog(filterMsg, 'info');\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(chalk.blue(filterMsg));\n\t\t\t}\n\t\t} else if (skippedCount > 0) {\n\t\t\tconst skipMessage = `Skipping ${skippedCount} tasks marked as done/cancelled/deferred. Analyzing ${tasksData.tasks.length} active tasks.`;\n\t\t\treportLog(skipMessage, 'info');\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(chalk.yellow(skipMessage));\n\t\t\t}\n\t\t}\n\n\t\t// Check for existing report before doing analysis\n\t\tlet existingReport = null;\n\t\tconst existingAnalysisMap = new Map(); // For quick lookups by task ID\n\t\ttry {\n\t\t\tif (fs.existsSync(outputPath)) {\n\t\t\t\texistingReport = JSON.parse(fs.readFileSync(outputPath, 'utf8'));\n\t\t\t\treportLog(`Found existing complexity report at ${outputPath}`, 'info');\n\n\t\t\t\tif (\n\t\t\t\t\texistingReport &&\n\t\t\t\t\texistingReport.complexityAnalysis &&\n\t\t\t\t\tArray.isArray(existingReport.complexityAnalysis)\n\t\t\t\t) {\n\t\t\t\t\t// Create lookup map of existing analysis entries\n\t\t\t\t\texistingReport.complexityAnalysis.forEach((item) => {\n\t\t\t\t\t\texistingAnalysisMap.set(item.taskId, item);\n\t\t\t\t\t});\n\t\t\t\t\treportLog(\n\t\t\t\t\t\t`Existing report contains ${existingReport.complexityAnalysis.length} task analyses`,\n\t\t\t\t\t\t'info'\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (readError) {\n\t\t\treportLog(\n\t\t\t\t`Warning: Could not read existing report: ${readError.message}`,\n\t\t\t\t'warn'\n\t\t\t);\n\t\t\texistingReport = null;\n\t\t\texistingAnalysisMap.clear();\n\t\t}\n\n\t\tif (tasksData.tasks.length === 0) {\n\t\t\t// If using ID filtering but no matching tasks, return existing report or empty\n\t\t\tif (existingReport && (specificIds || fromId !== null || toId !== null)) {\n\t\t\t\treportLog(\n\t\t\t\t\t'No matching tasks found for analysis. Keeping existing report.',\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'No matching tasks found for analysis. Keeping existing report.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\treturn {\n\t\t\t\t\treport: existingReport,\n\t\t\t\t\ttelemetryData: null\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Otherwise create empty report\n\t\t\tconst emptyReport = {\n\t\t\t\tmeta: {\n\t\t\t\t\tgeneratedAt: new Date().toISOString(),\n\t\t\t\t\ttasksAnalyzed: 0,\n\t\t\t\t\tthresholdScore: thresholdScore,\n\t\t\t\t\tprojectName: getProjectName(session),\n\t\t\t\t\tusedResearch: useResearch\n\t\t\t\t},\n\t\t\t\tcomplexityAnalysis: existingReport?.complexityAnalysis || []\n\t\t\t};\n\t\t\treportLog(`Writing complexity report to ${outputPath}...`, 'info');\n\t\t\tfs.writeFileSync(\n\t\t\t\toutputPath,\n\t\t\t\tJSON.stringify(emptyReport, null, '\\t'),\n\t\t\t\t'utf8'\n\t\t\t);\n\t\t\treportLog(\n\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`,\n\t\t\t\t'success'\n\t\t\t);\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconst highComplexity = 0;\n\t\t\t\tconst mediumComplexity = 0;\n\t\t\t\tconst lowComplexity = 0;\n\t\t\t\tconst totalAnalyzed = 0;\n\n\t\t\t\tconsole.log('\\nComplexity Analysis Summary:');\n\t\t\t\tconsole.log('----------------------------');\n\t\t\t\tconsole.log(`Tasks in input file: ${originalTaskCount}`);\n\t\t\t\tconsole.log(`Tasks successfully analyzed: ${totalAnalyzed}`);\n\t\t\t\tconsole.log(`High complexity tasks: ${highComplexity}`);\n\t\t\t\tconsole.log(`Medium complexity tasks: ${mediumComplexity}`);\n\t\t\t\tconsole.log(`Low complexity tasks: ${lowComplexity}`);\n\t\t\t\tconsole.log(\n\t\t\t\t\t`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`\n\t\t\t\t);\n\t\t\t\tconsole.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);\n\t\t\t\tconsole.log(\n\t\t\t\t\t`\\nSee ${outputPath} for the full report and expansion commands.`\n\t\t\t\t);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.white.bold('Suggested Next Steps:') +\n\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\treport: emptyReport,\n\t\t\t\ttelemetryData: null\n\t\t\t};\n\t\t}\n\n\t\t// Continue with regular analysis path\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\tconst promptParams = {\n\t\t\ttasks: tasksData.tasks,\n\t\t\tgatheredContext: gatheredContext || '',\n\t\t\tuseResearch: useResearch\n\t\t};\n\n\t\tconst { systemPrompt, userPrompt: prompt } = await promptManager.loadPrompt(\n\t\t\t'analyze-complexity',\n\t\t\tpromptParams,\n\t\t\t'default'\n\t\t);\n\n\t\tlet loadingIndicator = null;\n\t\tif (outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\t`${useResearch ? 'Researching' : 'Analyzing'} the complexity of your tasks with AI...\\n`\n\t\t\t);\n\t\t}\n\n\t\tlet aiServiceResponse = null;\n\t\tlet complexityAnalysis = null;\n\n\t\ttry {\n\t\t\tconst role = useResearch ? 'research' : 'main';\n\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\tprompt,\n\t\t\t\tsystemPrompt,\n\t\t\t\trole,\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tcommandName: 'analyze-complexity',\n\t\t\t\toutputType: mcpLog ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\treadline.clearLine(process.stdout, 0);\n\t\t\t\treadline.cursorTo(process.stdout, 0);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green('AI service call complete. Parsing response...')\n\t\t\t\t);\n\t\t\t}\n\n\t\t\treportLog('Parsing complexity analysis from text response...', 'info');\n\t\t\ttry {\n\t\t\t\tlet cleanedResponse = aiServiceResponse.mainResult;\n\t\t\t\tcleanedResponse = cleanedResponse.trim();\n\n\t\t\t\tconst codeBlockMatch = cleanedResponse.match(\n\t\t\t\t\t/```(?:json)?\\s*([\\s\\S]*?)\\s*```/\n\t\t\t\t);\n\t\t\t\tif (codeBlockMatch) {\n\t\t\t\t\tcleanedResponse = codeBlockMatch[1].trim();\n\t\t\t\t} else {\n\t\t\t\t\tconst firstBracket = cleanedResponse.indexOf('[');\n\t\t\t\t\tconst lastBracket = cleanedResponse.lastIndexOf(']');\n\t\t\t\t\tif (firstBracket !== -1 && lastBracket > firstBracket) {\n\t\t\t\t\t\tcleanedResponse = cleanedResponse.substring(\n\t\t\t\t\t\t\tfirstBracket,\n\t\t\t\t\t\t\tlastBracket + 1\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\treportLog(\n\t\t\t\t\t\t\t'Warning: Response does not appear to be a JSON array.',\n\t\t\t\t\t\t\t'warn'\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\t\t\tconsole.log(chalk.gray('Attempting to parse cleaned JSON...'));\n\t\t\t\t\tconsole.log(chalk.gray('Cleaned response (first 100 chars):'));\n\t\t\t\t\tconsole.log(chalk.gray(cleanedResponse.substring(0, 100)));\n\t\t\t\t\tconsole.log(chalk.gray('Last 100 chars:'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.gray(cleanedResponse.substring(cleanedResponse.length - 100))\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tcomplexityAnalysis = JSON.parse(cleanedResponse);\n\t\t\t} catch (parseError) {\n\t\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\t\treportLog(\n\t\t\t\t\t`Error parsing complexity analysis JSON: ${parseError.message}`,\n\t\t\t\t\t'error'\n\t\t\t\t);\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error parsing complexity analysis JSON: ${parseError.message}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tthrow parseError;\n\t\t\t}\n\n\t\t\tconst taskIds = tasksData.tasks.map((t) => t.id);\n\t\t\tconst analysisTaskIds = complexityAnalysis.map((a) => a.taskId);\n\t\t\tconst missingTaskIds = taskIds.filter(\n\t\t\t\t(id) => !analysisTaskIds.includes(id)\n\t\t\t);\n\n\t\t\tif (missingTaskIds.length > 0) {\n\t\t\t\treportLog(\n\t\t\t\t\t`Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}`,\n\t\t\t\t\t'warn'\n\t\t\t\t);\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tfor (const missingId of missingTaskIds) {\n\t\t\t\t\tconst missingTask = tasksData.tasks.find((t) => t.id === missingId);\n\t\t\t\t\tif (missingTask) {\n\t\t\t\t\t\treportLog(`Adding default analysis for task ${missingId}`, 'info');\n\t\t\t\t\t\tcomplexityAnalysis.push({\n\t\t\t\t\t\t\ttaskId: missingId,\n\t\t\t\t\t\t\ttaskTitle: missingTask.title,\n\t\t\t\t\t\t\tcomplexityScore: 5,\n\t\t\t\t\t\t\trecommendedSubtasks: 3,\n\t\t\t\t\t\t\texpansionPrompt: `Break down this task with a focus on ${missingTask.title.toLowerCase()}.`,\n\t\t\t\t\t\t\treasoning:\n\t\t\t\t\t\t\t\t'Automatically added due to missing analysis in AI response.'\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Merge with existing report - only keep entries from the current tag\n\t\t\tlet finalComplexityAnalysis = [];\n\n\t\t\tif (existingReport && Array.isArray(existingReport.complexityAnalysis)) {\n\t\t\t\t// Create a map of task IDs that we just analyzed\n\t\t\t\tconst analyzedTaskIds = new Set(\n\t\t\t\t\tcomplexityAnalysis.map((item) => item.taskId)\n\t\t\t\t);\n\n\t\t\t\t// Keep existing entries that weren't in this analysis run AND belong to the current tag\n\t\t\t\t// We determine tag membership by checking if the task ID exists in the current tag's tasks\n\t\t\t\tconst currentTagTaskIds = new Set(tasksData.tasks.map((t) => t.id));\n\t\t\t\tconst existingEntriesNotAnalyzed =\n\t\t\t\t\texistingReport.complexityAnalysis.filter(\n\t\t\t\t\t\t(item) =>\n\t\t\t\t\t\t\t!analyzedTaskIds.has(item.taskId) &&\n\t\t\t\t\t\t\tcurrentTagTaskIds.has(item.taskId) // Only keep entries for tasks in current tag\n\t\t\t\t\t);\n\n\t\t\t\t// Combine with new analysis\n\t\t\t\tfinalComplexityAnalysis = [\n\t\t\t\t\t...existingEntriesNotAnalyzed,\n\t\t\t\t\t...complexityAnalysis\n\t\t\t\t];\n\n\t\t\t\treportLog(\n\t\t\t\t\t`Merged ${complexityAnalysis.length} new analyses with ${existingEntriesNotAnalyzed.length} existing entries from current tag`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// No existing report or invalid format, just use the new analysis\n\t\t\t\tfinalComplexityAnalysis = complexityAnalysis;\n\t\t\t}\n\n\t\t\tconst report = {\n\t\t\t\tmeta: {\n\t\t\t\t\tgeneratedAt: new Date().toISOString(),\n\t\t\t\t\ttasksAnalyzed: tasksData.tasks.length,\n\t\t\t\t\ttotalTasks: originalTaskCount,\n\t\t\t\t\tanalysisCount: finalComplexityAnalysis.length,\n\t\t\t\t\tthresholdScore: thresholdScore,\n\t\t\t\t\tprojectName: getProjectName(session),\n\t\t\t\t\tusedResearch: useResearch\n\t\t\t\t},\n\t\t\t\tcomplexityAnalysis: finalComplexityAnalysis\n\t\t\t};\n\t\t\treportLog(`Writing complexity report to ${outputPath}...`, 'info');\n\t\t\tfs.writeFileSync(outputPath, JSON.stringify(report, null, '\\t'), 'utf8');\n\n\t\t\treportLog(\n\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`,\n\t\t\t\t'success'\n\t\t\t);\n\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\t// Calculate statistics specifically for this analysis run\n\t\t\t\tconst highComplexity = complexityAnalysis.filter(\n\t\t\t\t\t(t) => t.complexityScore >= 8\n\t\t\t\t).length;\n\t\t\t\tconst mediumComplexity = complexityAnalysis.filter(\n\t\t\t\t\t(t) => t.complexityScore >= 5 && t.complexityScore < 8\n\t\t\t\t).length;\n\t\t\t\tconst lowComplexity = complexityAnalysis.filter(\n\t\t\t\t\t(t) => t.complexityScore < 5\n\t\t\t\t).length;\n\t\t\t\tconst totalAnalyzed = complexityAnalysis.length;\n\n\t\t\t\tconsole.log('\\nCurrent Analysis Summary:');\n\t\t\t\tconsole.log('----------------------------');\n\t\t\t\tconsole.log(`Tasks analyzed in this run: ${totalAnalyzed}`);\n\t\t\t\tconsole.log(`High complexity tasks: ${highComplexity}`);\n\t\t\t\tconsole.log(`Medium complexity tasks: ${mediumComplexity}`);\n\t\t\t\tconsole.log(`Low complexity tasks: ${lowComplexity}`);\n\n\t\t\t\tif (existingReport) {\n\t\t\t\t\tconsole.log('\\nUpdated Report Summary:');\n\t\t\t\t\tconsole.log('----------------------------');\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Total analyses in report: ${finalComplexityAnalysis.length}`\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Analyses from previous runs: ${finalComplexityAnalysis.length - totalAnalyzed}`\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(`New/updated analyses: ${totalAnalyzed}`);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);\n\t\t\t\tconsole.log(\n\t\t\t\t\t`\\nSee ${outputPath} for the full report and expansion commands.`\n\t\t\t\t);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.white.bold('Suggested Next Steps:') +\n\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\tif (getDebugFlag(session)) {\n\t\t\t\t\tconsole.debug(\n\t\t\t\t\t\tchalk.gray(\n\t\t\t\t\t\t\t`Final analysis object: ${JSON.stringify(report, null, 2)}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tif (aiServiceResponse.telemetryData) {\n\t\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\treport: report,\n\t\t\t\ttelemetryData: aiServiceResponse?.telemetryData,\n\t\t\t\ttagInfo: aiServiceResponse?.tagInfo\n\t\t\t};\n\t\t} catch (aiError) {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\treportLog(`Error during AI service call: ${aiError.message}`, 'error');\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(`Error during AI service call: ${aiError.message}`)\n\t\t\t\t);\n\t\t\t\tif (aiError.message.includes('API key')) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nPlease ensure your API keys are correctly configured in .env or ~/.taskmaster/.env'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\"Run 'task-master models --setup' if needed.\")\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t\tthrow aiError;\n\t\t}\n\t} catch (error) {\n\t\treportLog(`Error analyzing task complexity: ${error.message}`, 'error');\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(`Error analyzing task complexity: ${error.message}`)\n\t\t\t);\n\t\t\tif (getDebugFlag(session)) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error;\n\t\t}\n\t}\n}\n\nexport default analyzeTaskComplexity;\n"], ["/claude-task-master/scripts/modules/task-manager/research.js", "/**\n * research.js\n * Core research functionality for AI-powered queries with project context\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport inquirer from 'inquirer';\nimport { highlight } from 'cli-highlight';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport {\n\tlog as consoleLog,\n\tfindProjectRoot,\n\treadJSON,\n\tflattenTasksWithSubtasks\n} from '../utils.js';\nimport {\n\tdisplayAiUsageSummary,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator\n} from '../ui.js';\n\n/**\n * Perform AI-powered research with project context\n * @param {string} query - Research query/prompt\n * @param {Object} options - Research options\n * @param {Array<string>} [options.taskIds] - Task/subtask IDs for context\n * @param {Array<string>} [options.filePaths] - File paths for context\n * @param {string} [options.customContext] - Additional custom context\n * @param {boolean} [options.includeProjectTree] - Include project file tree\n * @param {string} [options.detailLevel] - Detail level: 'low', 'medium', 'high'\n * @param {string} [options.projectRoot] - Project root directory\n * @param {string} [options.tag] - Tag for the task\n * @param {boolean} [options.saveToFile] - Whether to save results to file (MCP mode)\n * @param {Object} [context] - Execution context\n * @param {Object} [context.session] - MCP session object\n * @param {Object} [context.mcpLog] - MCP logger object\n * @param {string} [context.commandName] - Command name for telemetry\n * @param {string} [context.outputType] - Output type ('cli' or 'mcp')\n * @param {string} [outputFormat] - Output format ('text' or 'json')\n * @param {boolean} [allowFollowUp] - Whether to allow follow-up questions (default: true)\n * @returns {Promise<Object>} Research results with telemetry data\n */\nasync function performResearch(\n\tquery,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text',\n\tallowFollowUp = true\n) {\n\tconst {\n\t\ttaskIds = [],\n\t\tfilePaths = [],\n\t\tcustomContext = '',\n\t\tincludeProjectTree = false,\n\t\tdetailLevel = 'medium',\n\t\tprojectRoot: providedProjectRoot,\n\t\ttag,\n\t\tsaveToFile = false\n\t} = options;\n\n\tconst {\n\t\tsession,\n\t\tmcpLog,\n\t\tcommandName = 'research',\n\t\toutputType = 'cli'\n\t} = context;\n\tconst isMCP = !!mcpLog;\n\n\t// Determine project root\n\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\tif (!projectRoot) {\n\t\tthrow new Error('Could not determine project root directory');\n\t}\n\n\t// Create consistent logger\n\tconst logFn = isMCP\n\t\t? mcpLog\n\t\t: {\n\t\t\t\tinfo: (...args) => consoleLog('info', ...args),\n\t\t\t\twarn: (...args) => consoleLog('warn', ...args),\n\t\t\t\terror: (...args) => consoleLog('error', ...args),\n\t\t\t\tdebug: (...args) => consoleLog('debug', ...args),\n\t\t\t\tsuccess: (...args) => consoleLog('success', ...args)\n\t\t\t};\n\n\t// Show UI banner for CLI mode\n\tif (outputFormat === 'text') {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan.bold(`🔍 AI Research Query`), {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t})\n\t\t);\n\t}\n\n\ttry {\n\t\t// Initialize context gatherer\n\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\n\t\t// Auto-discover relevant tasks using fuzzy search to supplement provided tasks\n\t\tlet finalTaskIds = [...taskIds]; // Start with explicitly provided tasks\n\t\tlet autoDiscoveredIds = [];\n\n\t\ttry {\n\t\t\tconst tasksPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster',\n\t\t\t\t'tasks',\n\t\t\t\t'tasks.json'\n\t\t\t);\n\t\t\tconst tasksData = await readJSON(tasksPath, projectRoot, tag);\n\n\t\t\tif (tasksData && tasksData.tasks && tasksData.tasks.length > 0) {\n\t\t\t\t// Flatten tasks to include subtasks for fuzzy search\n\t\t\t\tconst flattenedTasks = flattenTasksWithSubtasks(tasksData.tasks);\n\t\t\t\tconst fuzzySearch = new FuzzyTaskSearch(flattenedTasks, 'research');\n\t\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(query, {\n\t\t\t\t\tmaxResults: 8,\n\t\t\t\t\tincludeRecent: true,\n\t\t\t\t\tincludeCategoryMatches: true\n\t\t\t\t});\n\n\t\t\t\tautoDiscoveredIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\t\t// Remove any auto-discovered tasks that were already explicitly provided\n\t\t\t\tconst uniqueAutoDiscovered = autoDiscoveredIds.filter(\n\t\t\t\t\t(id) => !finalTaskIds.includes(id)\n\t\t\t\t);\n\n\t\t\t\t// Add unique auto-discovered tasks to the final list\n\t\t\t\tfinalTaskIds = [...finalTaskIds, ...uniqueAutoDiscovered];\n\n\t\t\t\tif (outputFormat === 'text' && finalTaskIds.length > 0) {\n\t\t\t\t\t// Sort task IDs numerically for better display\n\t\t\t\t\tconst sortedTaskIds = finalTaskIds\n\t\t\t\t\t\t.map((id) => parseInt(id))\n\t\t\t\t\t\t.sort((a, b) => a - b)\n\t\t\t\t\t\t.map((id) => id.toString());\n\n\t\t\t\t\t// Show different messages based on whether tasks were explicitly provided\n\t\t\t\t\tif (taskIds.length > 0) {\n\t\t\t\t\t\tconst sortedProvidedIds = taskIds\n\t\t\t\t\t\t\t.map((id) => parseInt(id))\n\t\t\t\t\t\t\t.sort((a, b) => a - b)\n\t\t\t\t\t\t\t.map((id) => id.toString());\n\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.gray('Provided tasks: ') +\n\t\t\t\t\t\t\t\tchalk.cyan(sortedProvidedIds.join(', '))\n\t\t\t\t\t\t);\n\n\t\t\t\t\t\tif (uniqueAutoDiscovered.length > 0) {\n\t\t\t\t\t\t\tconst sortedAutoIds = uniqueAutoDiscovered\n\t\t\t\t\t\t\t\t.map((id) => parseInt(id))\n\t\t\t\t\t\t\t\t.sort((a, b) => a - b)\n\t\t\t\t\t\t\t\t.map((id) => id.toString());\n\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.gray('+ Auto-discovered related tasks: ') +\n\t\t\t\t\t\t\t\t\tchalk.cyan(sortedAutoIds.join(', '))\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.gray('Auto-discovered relevant tasks: ') +\n\t\t\t\t\t\t\t\tchalk.cyan(sortedTaskIds.join(', '))\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Silently continue without auto-discovered tasks if there's an error\n\t\t\tlogFn.debug(`Could not auto-discover tasks: ${error.message}`);\n\t\t}\n\n\t\tconst contextResult = await contextGatherer.gather({\n\t\t\ttasks: finalTaskIds,\n\t\t\tfiles: filePaths,\n\t\t\tcustomContext,\n\t\t\tincludeProjectTree,\n\t\t\tformat: 'research', // Use research format for AI consumption\n\t\t\tincludeTokenCounts: true\n\t\t});\n\n\t\tconst gatheredContext = contextResult.context;\n\t\tconst tokenBreakdown = contextResult.tokenBreakdown;\n\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\tconst promptParams = {\n\t\t\tquery: query,\n\t\t\tgatheredContext: gatheredContext || '',\n\t\t\tdetailLevel: detailLevel,\n\t\t\tprojectInfo: {\n\t\t\t\troot: projectRoot,\n\t\t\t\ttaskCount: finalTaskIds.length,\n\t\t\t\tfileCount: filePaths.length\n\t\t\t}\n\t\t};\n\n\t\t// Load prompts - the research template handles detail level internally\n\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t'research',\n\t\t\tpromptParams\n\t\t);\n\n\t\t// Count tokens for system and user prompts\n\t\tconst systemPromptTokens = contextGatherer.countTokens(systemPrompt);\n\t\tconst userPromptTokens = contextGatherer.countTokens(userPrompt);\n\t\tconst totalInputTokens = systemPromptTokens + userPromptTokens;\n\n\t\tif (outputFormat === 'text') {\n\t\t\t// Display detailed token breakdown in a clean box\n\t\t\tdisplayDetailedTokenBreakdown(\n\t\t\t\ttokenBreakdown,\n\t\t\t\tsystemPromptTokens,\n\t\t\t\tuserPromptTokens\n\t\t\t);\n\t\t}\n\n\t\t// Only log detailed info in debug mode or MCP\n\t\tif (outputFormat !== 'text') {\n\t\t\tlogFn.info(\n\t\t\t\t`Calling AI service with research role, context size: ${tokenBreakdown.total} tokens (${gatheredContext.length} characters)`\n\t\t\t);\n\t\t}\n\n\t\t// Start loading indicator for CLI mode\n\t\tlet loadingIndicator = null;\n\t\tif (outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator('Researching with AI...\\n');\n\t\t}\n\n\t\tlet aiResult;\n\t\ttry {\n\t\t\t// Call AI service with research role\n\t\t\taiResult = await generateTextService({\n\t\t\t\trole: 'research', // Always use research role for research command\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tsystemPrompt,\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tcommandName,\n\t\t\t\toutputType\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t}\n\t\t\tthrow error;\n\t\t} finally {\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t}\n\t\t}\n\n\t\tconst researchResult = aiResult.mainResult;\n\t\tconst telemetryData = aiResult.telemetryData;\n\t\tconst tagInfo = aiResult.tagInfo;\n\n\t\t// Format and display results\n\t\t// Initialize interactive save tracking\n\t\tlet interactiveSaveInfo = { interactiveSaveOccurred: false };\n\n\t\tif (outputFormat === 'text') {\n\t\t\tdisplayResearchResults(\n\t\t\t\tresearchResult,\n\t\t\t\tquery,\n\t\t\t\tdetailLevel,\n\t\t\t\ttokenBreakdown\n\t\t\t);\n\n\t\t\t// Display AI usage telemetry for CLI users\n\t\t\tif (telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(telemetryData, 'cli');\n\t\t\t}\n\n\t\t\t// Offer follow-up question option (only for initial CLI queries, not MCP)\n\t\t\tif (allowFollowUp && !isMCP) {\n\t\t\t\tinteractiveSaveInfo = await handleFollowUpQuestions(\n\t\t\t\t\toptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\toutputFormat,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tlogFn,\n\t\t\t\t\tquery,\n\t\t\t\t\tresearchResult\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Handle MCP save-to-file request\n\t\tif (saveToFile && isMCP) {\n\t\t\tconst conversationHistory = [\n\t\t\t\t{\n\t\t\t\t\tquestion: query,\n\t\t\t\t\tanswer: researchResult,\n\t\t\t\t\ttype: 'initial',\n\t\t\t\t\ttimestamp: new Date().toISOString()\n\t\t\t\t}\n\t\t\t];\n\n\t\t\tconst savedFilePath = await handleSaveToFile(\n\t\t\t\tconversationHistory,\n\t\t\t\tprojectRoot,\n\t\t\t\tcontext,\n\t\t\t\tlogFn\n\t\t\t);\n\n\t\t\t// Add saved file path to return data\n\t\t\treturn {\n\t\t\t\tquery,\n\t\t\t\tresult: researchResult,\n\t\t\t\tcontextSize: gatheredContext.length,\n\t\t\t\tcontextTokens: tokenBreakdown.total,\n\t\t\t\ttokenBreakdown,\n\t\t\t\tsystemPromptTokens,\n\t\t\t\tuserPromptTokens,\n\t\t\t\ttotalInputTokens,\n\t\t\t\tdetailLevel,\n\t\t\t\ttelemetryData,\n\t\t\t\ttagInfo,\n\t\t\t\tsavedFilePath,\n\t\t\t\tinteractiveSaveOccurred: false // MCP save-to-file doesn't count as interactive save\n\t\t\t};\n\t\t}\n\n\t\tlogFn.success('Research query completed successfully');\n\n\t\treturn {\n\t\t\tquery,\n\t\t\tresult: researchResult,\n\t\t\tcontextSize: gatheredContext.length,\n\t\t\tcontextTokens: tokenBreakdown.total,\n\t\t\ttokenBreakdown,\n\t\t\tsystemPromptTokens,\n\t\t\tuserPromptTokens,\n\t\t\ttotalInputTokens,\n\t\t\tdetailLevel,\n\t\t\ttelemetryData,\n\t\t\ttagInfo,\n\t\t\tinteractiveSaveOccurred:\n\t\t\t\tinteractiveSaveInfo?.interactiveSaveOccurred || false\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Research query failed: ${error.message}`);\n\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`\\n❌ Research failed: ${error.message}`));\n\t\t}\n\n\t\tthrow error;\n\t}\n}\n\n/**\n * Display detailed token breakdown for context and prompts\n * @param {Object} tokenBreakdown - Token breakdown from context gatherer\n * @param {number} systemPromptTokens - System prompt token count\n * @param {number} userPromptTokens - User prompt token count\n */\nfunction displayDetailedTokenBreakdown(\n\ttokenBreakdown,\n\tsystemPromptTokens,\n\tuserPromptTokens\n) {\n\tconst parts = [];\n\n\t// Custom context\n\tif (tokenBreakdown.customContext) {\n\t\tparts.push(\n\t\t\tchalk.cyan('Custom: ') +\n\t\t\t\tchalk.yellow(tokenBreakdown.customContext.tokens.toLocaleString())\n\t\t);\n\t}\n\n\t// Tasks breakdown\n\tif (tokenBreakdown.tasks && tokenBreakdown.tasks.length > 0) {\n\t\tconst totalTaskTokens = tokenBreakdown.tasks.reduce(\n\t\t\t(sum, task) => sum + task.tokens,\n\t\t\t0\n\t\t);\n\t\tconst taskDetails = tokenBreakdown.tasks\n\t\t\t.map((task) => {\n\t\t\t\tconst titleDisplay =\n\t\t\t\t\ttask.title.length > 30\n\t\t\t\t\t\t? task.title.substring(0, 30) + '...'\n\t\t\t\t\t\t: task.title;\n\t\t\t\treturn ` ${chalk.gray(task.id)} ${chalk.white(titleDisplay)} ${chalk.yellow(task.tokens.toLocaleString())} tokens`;\n\t\t\t})\n\t\t\t.join('\\n');\n\n\t\tparts.push(\n\t\t\tchalk.cyan('Tasks: ') +\n\t\t\t\tchalk.yellow(totalTaskTokens.toLocaleString()) +\n\t\t\t\tchalk.gray(` (${tokenBreakdown.tasks.length} items)`) +\n\t\t\t\t'\\n' +\n\t\t\t\ttaskDetails\n\t\t);\n\t}\n\n\t// Files breakdown\n\tif (tokenBreakdown.files && tokenBreakdown.files.length > 0) {\n\t\tconst totalFileTokens = tokenBreakdown.files.reduce(\n\t\t\t(sum, file) => sum + file.tokens,\n\t\t\t0\n\t\t);\n\t\tconst fileDetails = tokenBreakdown.files\n\t\t\t.map((file) => {\n\t\t\t\tconst pathDisplay =\n\t\t\t\t\tfile.path.length > 40\n\t\t\t\t\t\t? '...' + file.path.substring(file.path.length - 37)\n\t\t\t\t\t\t: file.path;\n\t\t\t\treturn ` ${chalk.gray(pathDisplay)} ${chalk.yellow(file.tokens.toLocaleString())} tokens ${chalk.gray(`(${file.sizeKB}KB)`)}`;\n\t\t\t})\n\t\t\t.join('\\n');\n\n\t\tparts.push(\n\t\t\tchalk.cyan('Files: ') +\n\t\t\t\tchalk.yellow(totalFileTokens.toLocaleString()) +\n\t\t\t\tchalk.gray(` (${tokenBreakdown.files.length} files)`) +\n\t\t\t\t'\\n' +\n\t\t\t\tfileDetails\n\t\t);\n\t}\n\n\t// Project tree\n\tif (tokenBreakdown.projectTree) {\n\t\tparts.push(\n\t\t\tchalk.cyan('Project Tree: ') +\n\t\t\t\tchalk.yellow(tokenBreakdown.projectTree.tokens.toLocaleString()) +\n\t\t\t\tchalk.gray(\n\t\t\t\t\t` (${tokenBreakdown.projectTree.fileCount} files, ${tokenBreakdown.projectTree.dirCount} dirs)`\n\t\t\t\t)\n\t\t);\n\t}\n\n\t// Prompts breakdown\n\tconst totalPromptTokens = systemPromptTokens + userPromptTokens;\n\tconst promptDetails = [\n\t\t` ${chalk.gray('System:')} ${chalk.yellow(systemPromptTokens.toLocaleString())} tokens`,\n\t\t` ${chalk.gray('User:')} ${chalk.yellow(userPromptTokens.toLocaleString())} tokens`\n\t].join('\\n');\n\n\tparts.push(\n\t\tchalk.cyan('Prompts: ') +\n\t\t\tchalk.yellow(totalPromptTokens.toLocaleString()) +\n\t\t\tchalk.gray(' (generated)') +\n\t\t\t'\\n' +\n\t\t\tpromptDetails\n\t);\n\n\t// Display the breakdown in a clean box\n\tif (parts.length > 0) {\n\t\tconst content = parts.join('\\n\\n');\n\t\tconst tokenBox = boxen(content, {\n\t\t\ttitle: chalk.blue.bold('Context Analysis'),\n\t\t\ttitleAlignment: 'left',\n\t\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\t\tmargin: { top: 0, bottom: 1 },\n\t\t\tborderStyle: 'single',\n\t\t\tborderColor: 'blue'\n\t\t});\n\t\tconsole.log(tokenBox);\n\t}\n}\n\n/**\n * Process research result text to highlight code blocks\n * @param {string} text - Raw research result text\n * @returns {string} Processed text with highlighted code blocks\n */\nfunction processCodeBlocks(text) {\n\t// Regex to match code blocks with optional language specification\n\tconst codeBlockRegex = /```(\\w+)?\\n([\\s\\S]*?)```/g;\n\n\treturn text.replace(codeBlockRegex, (match, language, code) => {\n\t\ttry {\n\t\t\t// Default to javascript if no language specified\n\t\t\tconst lang = language || 'javascript';\n\n\t\t\t// Highlight the code using cli-highlight\n\t\t\tconst highlightedCode = highlight(code.trim(), {\n\t\t\t\tlanguage: lang,\n\t\t\t\tignoreIllegals: true // Don't fail on unrecognized syntax\n\t\t\t});\n\n\t\t\t// Add a subtle border around code blocks\n\t\t\tconst codeBox = boxen(highlightedCode, {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tmargin: { top: 0, bottom: 0 },\n\t\t\t\tborderStyle: 'single',\n\t\t\t\tborderColor: 'dim'\n\t\t\t});\n\n\t\t\treturn '\\n' + codeBox + '\\n';\n\t\t} catch (error) {\n\t\t\t// If highlighting fails, return the original code block with basic formatting\n\t\t\treturn (\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.gray('```' + (language || '')) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.white(code.trim()) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.gray('```') +\n\t\t\t\t'\\n'\n\t\t\t);\n\t\t}\n\t});\n}\n\n/**\n * Display research results in formatted output\n * @param {string} result - AI research result\n * @param {string} query - Original query\n * @param {string} detailLevel - Detail level used\n * @param {Object} tokenBreakdown - Detailed token usage\n */\nfunction displayResearchResults(result, query, detailLevel, tokenBreakdown) {\n\t// Header with query info\n\tconst header = boxen(\n\t\tchalk.green.bold('Research Results') +\n\t\t\t'\\n\\n' +\n\t\t\tchalk.gray('Query: ') +\n\t\t\tchalk.white(query) +\n\t\t\t'\\n' +\n\t\t\tchalk.gray('Detail Level: ') +\n\t\t\tchalk.cyan(detailLevel),\n\t\t{\n\t\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\tborderStyle: 'round',\n\t\t\tborderColor: 'green'\n\t\t}\n\t);\n\tconsole.log(header);\n\n\t// Process the result to highlight code blocks\n\tconst processedResult = processCodeBlocks(result);\n\n\t// Main research content in a clean box\n\tconst contentBox = boxen(processedResult, {\n\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\tmargin: { top: 0, bottom: 1 },\n\t\tborderStyle: 'single',\n\t\tborderColor: 'gray'\n\t});\n\tconsole.log(contentBox);\n\n\t// Success footer\n\tconsole.log(chalk.green('✅ Research completed'));\n}\n\n/**\n * Handle follow-up questions and save functionality in interactive mode\n * @param {Object} originalOptions - Original research options\n * @param {Object} context - Execution context\n * @param {string} outputFormat - Output format\n * @param {string} projectRoot - Project root directory\n * @param {Object} logFn - Logger function\n * @param {string} initialQuery - Initial query for context\n * @param {string} initialResult - Initial AI result for context\n */\nasync function handleFollowUpQuestions(\n\toriginalOptions,\n\tcontext,\n\toutputFormat,\n\tprojectRoot,\n\tlogFn,\n\tinitialQuery,\n\tinitialResult\n) {\n\tlet interactiveSaveOccurred = false;\n\n\ttry {\n\t\t// Import required modules for saving\n\t\tconst { readJSON } = await import('../utils.js');\n\t\tconst updateTaskById = (await import('./update-task-by-id.js')).default;\n\t\tconst { updateSubtaskById } = await import('./update-subtask-by-id.js');\n\n\t\t// Initialize conversation history with the initial Q&A\n\t\tconst conversationHistory = [\n\t\t\t{\n\t\t\t\tquestion: initialQuery,\n\t\t\t\tanswer: initialResult,\n\t\t\t\ttype: 'initial',\n\t\t\t\ttimestamp: new Date().toISOString()\n\t\t\t}\n\t\t];\n\n\t\twhile (true) {\n\t\t\t// Get user choice\n\t\t\tconst { action } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'list',\n\t\t\t\t\tname: 'action',\n\t\t\t\t\tmessage: 'What would you like to do next?',\n\t\t\t\t\tchoices: [\n\t\t\t\t\t\t{ name: 'Ask a follow-up question', value: 'followup' },\n\t\t\t\t\t\t{ name: 'Save to file', value: 'savefile' },\n\t\t\t\t\t\t{ name: 'Save to task/subtask', value: 'save' },\n\t\t\t\t\t\t{ name: 'Quit', value: 'quit' }\n\t\t\t\t\t],\n\t\t\t\t\tpageSize: 4\n\t\t\t\t}\n\t\t\t]);\n\n\t\t\tif (action === 'quit') {\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tif (action === 'savefile') {\n\t\t\t\t// Handle save to file functionality\n\t\t\t\tawait handleSaveToFile(\n\t\t\t\t\tconversationHistory,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcontext,\n\t\t\t\t\tlogFn\n\t\t\t\t);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (action === 'save') {\n\t\t\t\t// Handle save functionality\n\t\t\t\tconst saveResult = await handleSaveToTask(\n\t\t\t\t\tconversationHistory,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcontext,\n\t\t\t\t\tlogFn\n\t\t\t\t);\n\t\t\t\tif (saveResult) {\n\t\t\t\t\tinteractiveSaveOccurred = true;\n\t\t\t\t}\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (action === 'followup') {\n\t\t\t\t// Get the follow-up question\n\t\t\t\tconst { followUpQuery } = await inquirer.prompt([\n\t\t\t\t\t{\n\t\t\t\t\t\ttype: 'input',\n\t\t\t\t\t\tname: 'followUpQuery',\n\t\t\t\t\t\tmessage: 'Enter your follow-up question:',\n\t\t\t\t\t\tvalidate: (input) => {\n\t\t\t\t\t\t\tif (!input || input.trim().length === 0) {\n\t\t\t\t\t\t\t\treturn 'Please enter a valid question.';\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]);\n\n\t\t\t\tif (!followUpQuery || followUpQuery.trim().length === 0) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconsole.log('\\n' + chalk.gray('─'.repeat(60)) + '\\n');\n\n\t\t\t\t// Build cumulative conversation context from all previous exchanges\n\t\t\t\tconst conversationContext =\n\t\t\t\t\tbuildConversationContext(conversationHistory);\n\n\t\t\t\t// Create enhanced options for follow-up with full conversation context\n\t\t\t\tconst followUpOptions = {\n\t\t\t\t\t...originalOptions,\n\t\t\t\t\ttaskIds: [], // Clear task IDs to allow fresh fuzzy search\n\t\t\t\t\tcustomContext:\n\t\t\t\t\t\tconversationContext +\n\t\t\t\t\t\t(originalOptions.customContext\n\t\t\t\t\t\t\t? `\\n\\n--- Original Context ---\\n${originalOptions.customContext}`\n\t\t\t\t\t\t\t: '')\n\t\t\t\t};\n\n\t\t\t\t// Perform follow-up research\n\t\t\t\tconst followUpResult = await performResearch(\n\t\t\t\t\tfollowUpQuery.trim(),\n\t\t\t\t\tfollowUpOptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\toutputFormat,\n\t\t\t\t\tfalse // allowFollowUp = false for nested calls\n\t\t\t\t);\n\n\t\t\t\t// Add this exchange to the conversation history\n\t\t\t\tconversationHistory.push({\n\t\t\t\t\tquestion: followUpQuery.trim(),\n\t\t\t\t\tanswer: followUpResult.result,\n\t\t\t\t\ttype: 'followup',\n\t\t\t\t\ttimestamp: new Date().toISOString()\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\t// If there's an error with inquirer (e.g., non-interactive terminal),\n\t\t// silently continue without follow-up functionality\n\t\tlogFn.debug(`Follow-up questions not available: ${error.message}`);\n\t}\n\n\treturn { interactiveSaveOccurred };\n}\n\n/**\n * Handle saving conversation to a task or subtask\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @param {string} projectRoot - Project root directory\n * @param {Object} context - Execution context\n * @param {Object} logFn - Logger function\n */\nasync function handleSaveToTask(\n\tconversationHistory,\n\tprojectRoot,\n\tcontext,\n\tlogFn\n) {\n\ttry {\n\t\t// Import required modules\n\t\tconst { readJSON } = await import('../utils.js');\n\t\tconst updateTaskById = (await import('./update-task-by-id.js')).default;\n\t\tconst { updateSubtaskById } = await import('./update-subtask-by-id.js');\n\n\t\t// Get task ID from user\n\t\tconst { taskId } = await inquirer.prompt([\n\t\t\t{\n\t\t\t\ttype: 'input',\n\t\t\t\tname: 'taskId',\n\t\t\t\tmessage: 'Enter task ID (e.g., \"15\" for task or \"15.2\" for subtask):',\n\t\t\t\tvalidate: (input) => {\n\t\t\t\t\tif (!input || input.trim().length === 0) {\n\t\t\t\t\t\treturn 'Please enter a task ID.';\n\t\t\t\t\t}\n\n\t\t\t\t\tconst trimmedInput = input.trim();\n\t\t\t\t\t// Validate format: number or number.number\n\t\t\t\t\tif (!/^\\d+(\\.\\d+)?$/.test(trimmedInput)) {\n\t\t\t\t\t\treturn 'Invalid format. Use \"15\" for task or \"15.2\" for subtask.';\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t]);\n\n\t\tconst trimmedTaskId = taskId.trim();\n\n\t\t// Format conversation thread for saving\n\t\tconst conversationThread = formatConversationForSaving(conversationHistory);\n\n\t\t// Determine if it's a task or subtask\n\t\tconst isSubtask = trimmedTaskId.includes('.');\n\n\t\t// Try to save - first validate the ID exists\n\t\tconst tasksPath = path.join(\n\t\t\tprojectRoot,\n\t\t\t'.taskmaster',\n\t\t\t'tasks',\n\t\t\t'tasks.json'\n\t\t);\n\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\tconsole.log(\n\t\t\t\tchalk.red('❌ Tasks file not found. Please run task-master init first.')\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\tconst data = readJSON(tasksPath, projectRoot, context.tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tconsole.log(chalk.red('❌ No valid tasks found.'));\n\t\t\treturn;\n\t\t}\n\n\t\tif (isSubtask) {\n\t\t\t// Validate subtask exists\n\t\t\tconst [parentId, subtaskId] = trimmedTaskId\n\t\t\t\t.split('.')\n\t\t\t\t.map((id) => parseInt(id, 10));\n\t\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\n\t\t\tif (!parentTask) {\n\t\t\t\tconsole.log(chalk.red(`❌ Parent task ${parentId} not found.`));\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tif (\n\t\t\t\t!parentTask.subtasks ||\n\t\t\t\t!parentTask.subtasks.find((st) => st.id === subtaskId)\n\t\t\t) {\n\t\t\t\tconsole.log(chalk.red(`❌ Subtask ${trimmedTaskId} not found.`));\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Save to subtask using updateSubtaskById\n\t\t\tconsole.log(chalk.blue('💾 Saving research conversation to subtask...'));\n\n\t\t\tawait updateSubtaskById(\n\t\t\t\ttasksPath,\n\t\t\t\ttrimmedTaskId,\n\t\t\t\tconversationThread,\n\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\tcontext,\n\t\t\t\t'text'\n\t\t\t);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.green(\n\t\t\t\t\t`✅ Research conversation saved to subtask ${trimmedTaskId}`\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\t// Validate task exists\n\t\t\tconst taskIdNum = parseInt(trimmedTaskId, 10);\n\t\t\tconst task = data.tasks.find((t) => t.id === taskIdNum);\n\n\t\t\tif (!task) {\n\t\t\t\tconsole.log(chalk.red(`❌ Task ${trimmedTaskId} not found.`));\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Save to task using updateTaskById with append mode\n\t\t\tconsole.log(chalk.blue('💾 Saving research conversation to task...'));\n\n\t\t\tawait updateTaskById(\n\t\t\t\ttasksPath,\n\t\t\t\ttaskIdNum,\n\t\t\t\tconversationThread,\n\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\tcontext,\n\t\t\t\t'text',\n\t\t\t\ttrue // appendMode = true\n\t\t\t);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.green(`✅ Research conversation saved to task ${trimmedTaskId}`)\n\t\t\t);\n\t\t}\n\n\t\treturn true; // Indicate successful save\n\t} catch (error) {\n\t\tconsole.log(chalk.red(`❌ Error saving conversation: ${error.message}`));\n\t\tlogFn.error(`Error saving conversation: ${error.message}`);\n\t\treturn false; // Indicate failed save\n\t}\n}\n\n/**\n * Handle saving conversation to a file in .taskmaster/docs/research/\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @param {string} projectRoot - Project root directory\n * @param {Object} context - Execution context\n * @param {Object} logFn - Logger function\n * @returns {Promise<string>} Path to saved file\n */\nasync function handleSaveToFile(\n\tconversationHistory,\n\tprojectRoot,\n\tcontext,\n\tlogFn\n) {\n\ttry {\n\t\t// Create research directory if it doesn't exist\n\t\tconst researchDir = path.join(\n\t\t\tprojectRoot,\n\t\t\t'.taskmaster',\n\t\t\t'docs',\n\t\t\t'research'\n\t\t);\n\t\tif (!fs.existsSync(researchDir)) {\n\t\t\tfs.mkdirSync(researchDir, { recursive: true });\n\t\t}\n\n\t\t// Generate filename from first query and timestamp\n\t\tconst firstQuery = conversationHistory[0]?.question || 'research-query';\n\t\tconst timestamp = new Date().toISOString().split('T')[0]; // YYYY-MM-DD format\n\n\t\t// Create a slug from the query (remove special chars, limit length)\n\t\tconst querySlug = firstQuery\n\t\t\t.toLowerCase()\n\t\t\t.replace(/[^a-z0-9\\s-]/g, '') // Remove special characters\n\t\t\t.replace(/\\s+/g, '-') // Replace spaces with hyphens\n\t\t\t.replace(/-+/g, '-') // Replace multiple hyphens with single\n\t\t\t.substring(0, 50) // Limit length\n\t\t\t.replace(/^-+|-+$/g, ''); // Remove leading/trailing hyphens\n\n\t\tconst filename = `${timestamp}_${querySlug}.md`;\n\t\tconst filePath = path.join(researchDir, filename);\n\n\t\t// Format conversation for file\n\t\tconst fileContent = formatConversationForFile(\n\t\t\tconversationHistory,\n\t\t\tfirstQuery\n\t\t);\n\n\t\t// Write file\n\t\tfs.writeFileSync(filePath, fileContent, 'utf8');\n\n\t\tconst relativePath = path.relative(projectRoot, filePath);\n\t\tconsole.log(\n\t\t\tchalk.green(`✅ Research saved to: ${chalk.cyan(relativePath)}`)\n\t\t);\n\n\t\tlogFn.success(`Research conversation saved to ${relativePath}`);\n\n\t\treturn filePath;\n\t} catch (error) {\n\t\tconsole.log(chalk.red(`❌ Error saving research file: ${error.message}`));\n\t\tlogFn.error(`Error saving research file: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Format conversation history for saving to a file\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @param {string} initialQuery - The initial query for metadata\n * @returns {string} Formatted file content\n */\nfunction formatConversationForFile(conversationHistory, initialQuery) {\n\tconst timestamp = new Date().toISOString();\n\tconst date = new Date().toLocaleDateString();\n\tconst time = new Date().toLocaleTimeString();\n\n\t// Create metadata header\n\tlet content = `---\ntitle: Research Session\nquery: \"${initialQuery}\"\ndate: ${date}\ntime: ${time}\ntimestamp: ${timestamp}\nexchanges: ${conversationHistory.length}\n---\n\n# Research Session\n\n`;\n\n\t// Add each conversation exchange\n\tconversationHistory.forEach((exchange, index) => {\n\t\tif (exchange.type === 'initial') {\n\t\t\tcontent += `## Initial Query\\n\\n**Question:** ${exchange.question}\\n\\n**Response:**\\n\\n${exchange.answer}\\n\\n`;\n\t\t} else {\n\t\t\tcontent += `## Follow-up ${index}\\n\\n**Question:** ${exchange.question}\\n\\n**Response:**\\n\\n${exchange.answer}\\n\\n`;\n\t\t}\n\n\t\tif (index < conversationHistory.length - 1) {\n\t\t\tcontent += '---\\n\\n';\n\t\t}\n\t});\n\n\t// Add footer\n\tcontent += `\\n---\\n\\n*Generated by Task Master Research Command* \\n*Timestamp: ${timestamp}*\\n`;\n\n\treturn content;\n}\n\n/**\n * Format conversation history for saving to a task/subtask\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @returns {string} Formatted conversation thread\n */\nfunction formatConversationForSaving(conversationHistory) {\n\tconst timestamp = new Date().toISOString();\n\tlet formatted = `## Research Session - ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}\\n\\n`;\n\n\tconversationHistory.forEach((exchange, index) => {\n\t\tif (exchange.type === 'initial') {\n\t\t\tformatted += `**Initial Query:** ${exchange.question}\\n\\n`;\n\t\t\tformatted += `**Response:** ${exchange.answer}\\n\\n`;\n\t\t} else {\n\t\t\tformatted += `**Follow-up ${index}:** ${exchange.question}\\n\\n`;\n\t\t\tformatted += `**Response:** ${exchange.answer}\\n\\n`;\n\t\t}\n\n\t\tif (index < conversationHistory.length - 1) {\n\t\t\tformatted += '---\\n\\n';\n\t\t}\n\t});\n\n\treturn formatted;\n}\n\n/**\n * Build conversation context string from conversation history\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @returns {string} Formatted conversation context\n */\nfunction buildConversationContext(conversationHistory) {\n\tif (conversationHistory.length === 0) {\n\t\treturn '';\n\t}\n\n\tconst contextParts = ['--- Conversation History ---'];\n\n\tconversationHistory.forEach((exchange, index) => {\n\t\tconst questionLabel =\n\t\t\texchange.type === 'initial' ? 'Initial Question' : `Follow-up ${index}`;\n\t\tconst answerLabel =\n\t\t\texchange.type === 'initial' ? 'Initial Answer' : `Answer ${index}`;\n\n\t\tcontextParts.push(`\\n${questionLabel}: ${exchange.question}`);\n\t\tcontextParts.push(`${answerLabel}: ${exchange.answer}`);\n\t});\n\n\treturn contextParts.join('\\n');\n}\n\nexport { performResearch };\n"], ["/claude-task-master/scripts/modules/dependency-manager.js", "/**\n * dependency-manager.js\n * Manages task dependencies and relationships\n */\n\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\ttaskExists,\n\tformatTaskId,\n\tfindCycles,\n\tisSilentMode\n} from './utils.js';\n\nimport { displayBanner } from './ui.js';\n\nimport { generateTaskFiles } from './task-manager.js';\n\n/**\n * Add a dependency to a task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number|string} taskId - ID of the task to add dependency to\n * @param {number|string} dependencyId - ID of the task to add as dependency\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nasync function addDependency(tasksPath, taskId, dependencyId, context = {}) {\n\tlog('info', `Adding dependency ${dependencyId} to task ${taskId}...`);\n\n\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found in tasks.json');\n\t\tprocess.exit(1);\n\t}\n\n\t// Format the task and dependency IDs correctly\n\tconst formattedTaskId =\n\t\ttypeof taskId === 'string' && taskId.includes('.')\n\t\t\t? taskId\n\t\t\t: parseInt(taskId, 10);\n\n\tconst formattedDependencyId = formatTaskId(dependencyId);\n\n\t// Check if the dependency task or subtask actually exists\n\tif (!taskExists(data.tasks, formattedDependencyId)) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`Dependency target ${formattedDependencyId} does not exist in tasks.json`\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\t// Find the task to update\n\tlet targetTask = null;\n\tlet isSubtask = false;\n\n\tif (typeof formattedTaskId === 'string' && formattedTaskId.includes('.')) {\n\t\t// Handle dot notation for subtasks (e.g., \"1.2\")\n\t\tconst [parentId, subtaskId] = formattedTaskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask) {\n\t\t\tlog('error', `Parent task ${parentId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\tif (!parentTask.subtasks) {\n\t\t\tlog('error', `Parent task ${parentId} has no subtasks.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\ttargetTask = parentTask.subtasks.find((s) => s.id === subtaskId);\n\t\tisSubtask = true;\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Subtask ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t} else {\n\t\t// Regular task (not a subtask)\n\t\ttargetTask = data.tasks.find((t) => t.id === formattedTaskId);\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Task ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t}\n\n\t// Initialize dependencies array if it doesn't exist\n\tif (!targetTask.dependencies) {\n\t\ttargetTask.dependencies = [];\n\t}\n\n\t// Check if dependency already exists\n\tif (\n\t\ttargetTask.dependencies.some((d) => {\n\t\t\t// Convert both to strings for comparison to handle both numeric and string IDs\n\t\t\treturn String(d) === String(formattedDependencyId);\n\t\t})\n\t) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Dependency ${formattedDependencyId} already exists in task ${formattedTaskId}.`\n\t\t);\n\t\treturn;\n\t}\n\n\t// Check if the task is trying to depend on itself - compare full IDs (including subtask parts)\n\tif (String(formattedTaskId) === String(formattedDependencyId)) {\n\t\tlog('error', `Task ${formattedTaskId} cannot depend on itself.`);\n\t\tprocess.exit(1);\n\t}\n\n\t// For subtasks of the same parent, we need to make sure we're not treating it as a self-dependency\n\t// Check if we're dealing with subtasks with the same parent task\n\tlet isSelfDependency = false;\n\n\tif (\n\t\ttypeof formattedTaskId === 'string' &&\n\t\ttypeof formattedDependencyId === 'string' &&\n\t\tformattedTaskId.includes('.') &&\n\t\tformattedDependencyId.includes('.')\n\t) {\n\t\tconst [taskParentId] = formattedTaskId.split('.');\n\t\tconst [depParentId] = formattedDependencyId.split('.');\n\n\t\t// Only treat it as a self-dependency if both the parent ID and subtask ID are identical\n\t\tisSelfDependency = formattedTaskId === formattedDependencyId;\n\n\t\t// Log for debugging\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`Adding dependency between subtasks: ${formattedTaskId} depends on ${formattedDependencyId}`\n\t\t);\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`Parent IDs: ${taskParentId} and ${depParentId}, Self-dependency check: ${isSelfDependency}`\n\t\t);\n\t}\n\n\tif (isSelfDependency) {\n\t\tlog('error', `Subtask ${formattedTaskId} cannot depend on itself.`);\n\t\tprocess.exit(1);\n\t}\n\n\t// Check for circular dependencies\n\tconst dependencyChain = [formattedTaskId];\n\tif (\n\t\t!isCircularDependency(data.tasks, formattedDependencyId, dependencyChain)\n\t) {\n\t\t// Add the dependency\n\t\ttargetTask.dependencies.push(formattedDependencyId);\n\n\t\t// Sort dependencies numerically or by parent task ID first, then subtask ID\n\t\ttargetTask.dependencies.sort((a, b) => {\n\t\t\tif (typeof a === 'number' && typeof b === 'number') {\n\t\t\t\treturn a - b;\n\t\t\t} else if (typeof a === 'string' && typeof b === 'string') {\n\t\t\t\tconst [aParent, aChild] = a.split('.').map(Number);\n\t\t\t\tconst [bParent, bChild] = b.split('.').map(Number);\n\t\t\t\treturn aParent !== bParent ? aParent - bParent : aChild - bChild;\n\t\t\t} else if (typeof a === 'number') {\n\t\t\t\treturn -1; // Numbers come before strings\n\t\t\t} else {\n\t\t\t\treturn 1; // Strings come after numbers\n\t\t\t}\n\t\t});\n\n\t\t// Save changes\n\t\twriteJSON(tasksPath, data, context.projectRoot, context.tag);\n\t\tlog(\n\t\t\t'success',\n\t\t\t`Added dependency ${formattedDependencyId} to task ${formattedTaskId}`\n\t\t);\n\n\t\t// Display a more visually appealing success message\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(`Successfully added dependency:\\n\\n`) +\n\t\t\t\t\t\t`Task ${chalk.bold(formattedTaskId)} now depends on ${chalk.bold(formattedDependencyId)}`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\t// Generate updated task files\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\n\t\tlog('info', 'Task files regenerated with updated dependencies.');\n\t} else {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`Cannot add dependency ${formattedDependencyId} to task ${formattedTaskId} as it would create a circular dependency.`\n\t\t);\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Remove a dependency from a task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number|string} taskId - ID of the task to remove dependency from\n * @param {number|string} dependencyId - ID of the task to remove as dependency\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nasync function removeDependency(tasksPath, taskId, dependencyId, context = {}) {\n\tlog('info', `Removing dependency ${dependencyId} from task ${taskId}...`);\n\n\t// Read tasks file\n\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Format the task and dependency IDs correctly\n\tconst formattedTaskId =\n\t\ttypeof taskId === 'string' && taskId.includes('.')\n\t\t\t? taskId\n\t\t\t: parseInt(taskId, 10);\n\n\tconst formattedDependencyId = formatTaskId(dependencyId);\n\n\t// Find the task to update\n\tlet targetTask = null;\n\tlet isSubtask = false;\n\n\tif (typeof formattedTaskId === 'string' && formattedTaskId.includes('.')) {\n\t\t// Handle dot notation for subtasks (e.g., \"1.2\")\n\t\tconst [parentId, subtaskId] = formattedTaskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask) {\n\t\t\tlog('error', `Parent task ${parentId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\tif (!parentTask.subtasks) {\n\t\t\tlog('error', `Parent task ${parentId} has no subtasks.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\ttargetTask = parentTask.subtasks.find((s) => s.id === subtaskId);\n\t\tisSubtask = true;\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Subtask ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t} else {\n\t\t// Regular task (not a subtask)\n\t\ttargetTask = data.tasks.find((t) => t.id === formattedTaskId);\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Task ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t}\n\n\t// Check if the task has any dependencies\n\tif (!targetTask.dependencies || targetTask.dependencies.length === 0) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Task ${formattedTaskId} has no dependencies, nothing to remove.`\n\t\t);\n\t\treturn;\n\t}\n\n\t// Normalize the dependency ID for comparison to handle different formats\n\tconst normalizedDependencyId = String(formattedDependencyId);\n\n\t// Check if the dependency exists by comparing string representations\n\tconst dependencyIndex = targetTask.dependencies.findIndex((dep) => {\n\t\t// Convert both to strings for comparison\n\t\tlet depStr = String(dep);\n\n\t\t// Special handling for numeric IDs that might be subtask references\n\t\tif (typeof dep === 'number' && dep < 100 && isSubtask) {\n\t\t\t// It's likely a reference to another subtask in the same parent task\n\t\t\t// Convert to full format for comparison (e.g., 2 -> \"1.2\" for a subtask in task 1)\n\t\t\tconst [parentId] = formattedTaskId.split('.');\n\t\t\tdepStr = `${parentId}.${dep}`;\n\t\t}\n\n\t\treturn depStr === normalizedDependencyId;\n\t});\n\n\tif (dependencyIndex === -1) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Task ${formattedTaskId} does not depend on ${formattedDependencyId}, no changes made.`\n\t\t);\n\t\treturn;\n\t}\n\n\t// Remove the dependency\n\ttargetTask.dependencies.splice(dependencyIndex, 1);\n\n\t// Save the updated tasks\n\twriteJSON(tasksPath, data, context.projectRoot, context.tag);\n\n\t// Success message\n\tlog(\n\t\t'success',\n\t\t`Removed dependency: Task ${formattedTaskId} no longer depends on ${formattedDependencyId}`\n\t);\n\n\tif (!isSilentMode()) {\n\t\t// Display a more visually appealing success message\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.green(`Successfully removed dependency:\\n\\n`) +\n\t\t\t\t\t`Task ${chalk.bold(formattedTaskId)} no longer depends on ${chalk.bold(formattedDependencyId)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Regenerate task files\n\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n}\n\n/**\n * Check if adding a dependency would create a circular dependency\n * @param {Array} tasks - Array of all tasks\n * @param {number|string} taskId - ID of task to check\n * @param {Array} chain - Chain of dependencies to check\n * @returns {boolean} True if circular dependency would be created\n */\nfunction isCircularDependency(tasks, taskId, chain = []) {\n\t// Convert taskId to string for comparison\n\tconst taskIdStr = String(taskId);\n\n\t// If we've seen this task before in the chain, we have a circular dependency\n\tif (chain.some((id) => String(id) === taskIdStr)) {\n\t\treturn true;\n\t}\n\n\t// Find the task or subtask\n\tlet task = null;\n\tlet parentIdForSubtask = null;\n\n\t// Check if this is a subtask reference (e.g., \"1.2\")\n\tif (taskIdStr.includes('.')) {\n\t\tconst [parentId, subtaskId] = taskIdStr.split('.').map(Number);\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\t\tparentIdForSubtask = parentId; // Store parent ID if it's a subtask\n\n\t\tif (parentTask && parentTask.subtasks) {\n\t\t\ttask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\t}\n\t} else {\n\t\t// Regular task\n\t\ttask = tasks.find((t) => String(t.id) === taskIdStr);\n\t}\n\n\tif (!task) {\n\t\treturn false; // Task doesn't exist, can't create circular dependency\n\t}\n\n\t// No dependencies, can't create circular dependency\n\tif (!task.dependencies || task.dependencies.length === 0) {\n\t\treturn false;\n\t}\n\n\t// Check each dependency recursively\n\tconst newChain = [...chain, taskIdStr]; // Use taskIdStr for consistency\n\treturn task.dependencies.some((depId) => {\n\t\tlet normalizedDepId = String(depId);\n\t\t// Normalize relative subtask dependencies\n\t\tif (typeof depId === 'number' && parentIdForSubtask !== null) {\n\t\t\t// If the current task is a subtask AND the dependency is a number,\n\t\t\t// assume it refers to a sibling subtask.\n\t\t\tnormalizedDepId = `${parentIdForSubtask}.${depId}`;\n\t\t}\n\t\t// Pass the normalized ID to the recursive call\n\t\treturn isCircularDependency(tasks, normalizedDepId, newChain);\n\t});\n}\n\n/**\n * Validate task dependencies\n * @param {Array} tasks - Array of all tasks\n * @returns {Object} Validation result with valid flag and issues array\n */\nfunction validateTaskDependencies(tasks) {\n\tconst issues = [];\n\n\t// Check each task's dependencies\n\ttasks.forEach((task) => {\n\t\tif (!task.dependencies) {\n\t\t\treturn; // No dependencies to validate\n\t\t}\n\n\t\ttask.dependencies.forEach((depId) => {\n\t\t\t// Check for self-dependencies\n\t\t\tif (String(depId) === String(task.id)) {\n\t\t\t\tissues.push({\n\t\t\t\t\ttype: 'self',\n\t\t\t\t\ttaskId: task.id,\n\t\t\t\t\tmessage: `Task ${task.id} depends on itself`\n\t\t\t\t});\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Check if dependency exists\n\t\t\tif (!taskExists(tasks, depId)) {\n\t\t\t\tissues.push({\n\t\t\t\t\ttype: 'missing',\n\t\t\t\t\ttaskId: task.id,\n\t\t\t\t\tdependencyId: depId,\n\t\t\t\t\tmessage: `Task ${task.id} depends on non-existent task ${depId}`\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Check for circular dependencies\n\t\tif (isCircularDependency(tasks, task.id)) {\n\t\t\tissues.push({\n\t\t\t\ttype: 'circular',\n\t\t\t\ttaskId: task.id,\n\t\t\t\tmessage: `Task ${task.id} is part of a circular dependency chain`\n\t\t\t});\n\t\t}\n\n\t\t// Check subtask dependencies if they exist\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (!subtask.dependencies) {\n\t\t\t\t\treturn; // No dependencies to validate\n\t\t\t\t}\n\n\t\t\t\t// Create a full subtask ID for reference\n\t\t\t\tconst fullSubtaskId = `${task.id}.${subtask.id}`;\n\n\t\t\t\tsubtask.dependencies.forEach((depId) => {\n\t\t\t\t\t// Check for self-dependencies in subtasks\n\t\t\t\t\tif (\n\t\t\t\t\t\tString(depId) === String(fullSubtaskId) ||\n\t\t\t\t\t\t(typeof depId === 'number' && depId === subtask.id)\n\t\t\t\t\t) {\n\t\t\t\t\t\tissues.push({\n\t\t\t\t\t\t\ttype: 'self',\n\t\t\t\t\t\t\ttaskId: fullSubtaskId,\n\t\t\t\t\t\t\tmessage: `Subtask ${fullSubtaskId} depends on itself`\n\t\t\t\t\t\t});\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Check if dependency exists\n\t\t\t\t\tif (!taskExists(tasks, depId)) {\n\t\t\t\t\t\tissues.push({\n\t\t\t\t\t\t\ttype: 'missing',\n\t\t\t\t\t\t\ttaskId: fullSubtaskId,\n\t\t\t\t\t\t\tdependencyId: depId,\n\t\t\t\t\t\t\tmessage: `Subtask ${fullSubtaskId} depends on non-existent task/subtask ${depId}`\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\t// Check for circular dependencies in subtasks\n\t\t\t\tif (isCircularDependency(tasks, fullSubtaskId)) {\n\t\t\t\t\tissues.push({\n\t\t\t\t\t\ttype: 'circular',\n\t\t\t\t\t\ttaskId: fullSubtaskId,\n\t\t\t\t\t\tmessage: `Subtask ${fullSubtaskId} is part of a circular dependency chain`\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\treturn {\n\t\tvalid: issues.length === 0,\n\t\tissues\n\t};\n}\n\n/**\n * Remove duplicate dependencies from tasks\n * @param {Object} tasksData - Tasks data object with tasks array\n * @returns {Object} Updated tasks data with duplicates removed\n */\nfunction removeDuplicateDependencies(tasksData) {\n\tconst tasks = tasksData.tasks.map((task) => {\n\t\tif (!task.dependencies) {\n\t\t\treturn task;\n\t\t}\n\n\t\t// Convert to Set and back to array to remove duplicates\n\t\tconst uniqueDeps = [...new Set(task.dependencies)];\n\t\treturn {\n\t\t\t...task,\n\t\t\tdependencies: uniqueDeps\n\t\t};\n\t});\n\n\treturn {\n\t\t...tasksData,\n\t\ttasks\n\t};\n}\n\n/**\n * Clean up invalid subtask dependencies\n * @param {Object} tasksData - Tasks data object with tasks array\n * @returns {Object} Updated tasks data with invalid subtask dependencies removed\n */\nfunction cleanupSubtaskDependencies(tasksData) {\n\tconst tasks = tasksData.tasks.map((task) => {\n\t\t// Handle task's own dependencies\n\t\tif (task.dependencies) {\n\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t// Keep only dependencies that exist\n\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t});\n\t\t}\n\n\t\t// Handle subtask dependencies\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks = task.subtasks.map((subtask) => {\n\t\t\t\tif (!subtask.dependencies) {\n\t\t\t\t\treturn subtask;\n\t\t\t\t}\n\n\t\t\t\t// Filter out dependencies to non-existent subtasks\n\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t\t});\n\n\t\t\t\treturn subtask;\n\t\t\t});\n\t\t}\n\n\t\treturn task;\n\t});\n\n\treturn {\n\t\t...tasksData,\n\t\ttasks\n\t};\n}\n\n/**\n * Validate dependencies in task files\n * @param {string} tasksPath - Path to tasks.json\n * @param {Object} options - Options object, including context\n */\nasync function validateDependenciesCommand(tasksPath, options = {}) {\n\tconst { context = {} } = options;\n\tlog('info', 'Checking for invalid dependencies in task files...');\n\n\t// Read tasks data\n\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found in tasks.json');\n\t\tprocess.exit(1);\n\t}\n\n\t// Count of tasks and subtasks for reporting\n\tconst taskCount = data.tasks.length;\n\tlet subtaskCount = 0;\n\tdata.tasks.forEach((task) => {\n\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\tsubtaskCount += task.subtasks.length;\n\t\t}\n\t});\n\n\tlog(\n\t\t'info',\n\t\t`Analyzing dependencies for ${taskCount} tasks and ${subtaskCount} subtasks...`\n\t);\n\n\ttry {\n\t\t// Directly call the validation function\n\t\tconst validationResult = validateTaskDependencies(data.tasks);\n\n\t\tif (!validationResult.valid) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`Dependency validation failed. Found ${validationResult.issues.length} issue(s):`\n\t\t\t);\n\t\t\tvalidationResult.issues.forEach((issue) => {\n\t\t\t\tlet errorMsg = ` [${issue.type.toUpperCase()}] Task ${issue.taskId}: ${issue.message}`;\n\t\t\t\tif (issue.dependencyId) {\n\t\t\t\t\terrorMsg += ` (Dependency: ${issue.dependencyId})`;\n\t\t\t\t}\n\t\t\t\tlog('error', errorMsg); // Log each issue as an error\n\t\t\t});\n\n\t\t\t// Optionally exit if validation fails, depending on desired behavior\n\t\t\t// process.exit(1); // Uncomment if validation failure should stop the process\n\n\t\t\t// Display summary box even on failure, showing issues found\n\t\t\tif (!isSilentMode()) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.red(`Dependency Validation FAILED\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks checked:')} ${taskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Subtasks checked:')} ${subtaskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.red('Issues found:')} ${validationResult.issues.length}`, // Display count from result\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog(\n\t\t\t\t'success',\n\t\t\t\t'No invalid dependencies found - all dependencies are valid'\n\t\t\t);\n\n\t\t\t// Show validation summary - only if not in silent mode\n\t\t\tif (!isSilentMode()) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.green(`All Dependencies Are Valid\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks checked:')} ${taskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Subtasks checked:')} ${subtaskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Total dependencies verified:')} ${countAllDependencies(data.tasks)}`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlog('error', 'Error validating dependencies:', error);\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Helper function to count all dependencies across tasks and subtasks\n * @param {Array} tasks - All tasks\n * @returns {number} - Total number of dependencies\n */\nfunction countAllDependencies(tasks) {\n\tlet count = 0;\n\n\ttasks.forEach((task) => {\n\t\t// Count main task dependencies\n\t\tif (task.dependencies && Array.isArray(task.dependencies)) {\n\t\t\tcount += task.dependencies.length;\n\t\t}\n\n\t\t// Count subtask dependencies\n\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\tcount += subtask.dependencies.length;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\treturn count;\n}\n\n/**\n * Fixes invalid dependencies in tasks.json\n * @param {string} tasksPath - Path to tasks.json\n * @param {Object} options - Options object, including context\n */\nasync function fixDependenciesCommand(tasksPath, options = {}) {\n\tconst { context = {} } = options;\n\tlog('info', 'Checking for and fixing invalid dependencies in tasks.json...');\n\n\ttry {\n\t\t// Read tasks data\n\t\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tlog('error', 'No valid tasks found in tasks.json');\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\t// Create a deep copy of the original data for comparison\n\t\tconst originalData = JSON.parse(JSON.stringify(data));\n\n\t\t// Track fixes for reporting\n\t\tconst stats = {\n\t\t\tnonExistentDependenciesRemoved: 0,\n\t\t\tselfDependenciesRemoved: 0,\n\t\t\tduplicateDependenciesRemoved: 0,\n\t\t\tcircularDependenciesFixed: 0,\n\t\t\ttasksFixed: 0,\n\t\t\tsubtasksFixed: 0\n\t\t};\n\n\t\t// First phase: Remove duplicate dependencies in tasks\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.dependencies && Array.isArray(task.dependencies)) {\n\t\t\t\tconst uniqueDeps = new Set();\n\t\t\t\tconst originalLength = task.dependencies.length;\n\t\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t\tconst depIdStr = String(depId);\n\t\t\t\t\tif (uniqueDeps.has(depIdStr)) {\n\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t`Removing duplicate dependency from task ${task.id}: ${depId}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tstats.duplicateDependenciesRemoved++;\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\tuniqueDeps.add(depIdStr);\n\t\t\t\t\treturn true;\n\t\t\t\t});\n\t\t\t\tif (task.dependencies.length < originalLength) {\n\t\t\t\t\tstats.tasksFixed++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check for duplicates in subtasks\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\t\tconst uniqueDeps = new Set();\n\t\t\t\t\t\tconst originalLength = subtask.dependencies.length;\n\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\tlet depIdStr = String(depId);\n\t\t\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\tdepIdStr = `${task.id}.${depId}`;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (uniqueDeps.has(depIdStr)) {\n\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t`Removing duplicate dependency from subtask ${task.id}.${subtask.id}: ${depId}`\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstats.duplicateDependenciesRemoved++;\n\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tuniqueDeps.add(depIdStr);\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t});\n\t\t\t\t\t\tif (subtask.dependencies.length < originalLength) {\n\t\t\t\t\t\t\tstats.subtasksFixed++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Create validity maps for tasks and subtasks\n\t\tconst validTaskIds = new Set(data.tasks.map((t) => t.id));\n\t\tconst validSubtaskIds = new Set();\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tvalidSubtaskIds.add(`${task.id}.${subtask.id}`);\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Second phase: Remove invalid task dependencies (non-existent tasks)\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.dependencies && Array.isArray(task.dependencies)) {\n\t\t\t\tconst originalLength = task.dependencies.length;\n\t\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t\tconst isSubtask = typeof depId === 'string' && depId.includes('.');\n\n\t\t\t\t\tif (isSubtask) {\n\t\t\t\t\t\t// Check if the subtask exists\n\t\t\t\t\t\tif (!validSubtaskIds.has(depId)) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t`Removing invalid subtask dependency from task ${task.id}: ${depId} (subtask does not exist)`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Check if the task exists\n\t\t\t\t\t\tconst numericId =\n\t\t\t\t\t\t\ttypeof depId === 'string' ? parseInt(depId, 10) : depId;\n\t\t\t\t\t\tif (!validTaskIds.has(numericId)) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t`Removing invalid task dependency from task ${task.id}: ${depId} (task does not exist)`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\tif (task.dependencies.length < originalLength) {\n\t\t\t\t\tstats.tasksFixed++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check subtask dependencies for invalid references\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\t\tconst originalLength = subtask.dependencies.length;\n\t\t\t\t\t\tconst subtaskId = `${task.id}.${subtask.id}`;\n\n\t\t\t\t\t\t// First check for self-dependencies\n\t\t\t\t\t\tconst hasSelfDependency = subtask.dependencies.some((depId) => {\n\t\t\t\t\t\t\tif (typeof depId === 'string' && depId.includes('.')) {\n\t\t\t\t\t\t\t\treturn depId === subtaskId;\n\t\t\t\t\t\t\t} else if (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\treturn depId === subtask.id;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tif (hasSelfDependency) {\n\t\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\t\tconst normalizedDepId =\n\t\t\t\t\t\t\t\t\ttypeof depId === 'number' && depId < 100\n\t\t\t\t\t\t\t\t\t\t? `${task.id}.${depId}`\n\t\t\t\t\t\t\t\t\t\t: String(depId);\n\n\t\t\t\t\t\t\t\tif (normalizedDepId === subtaskId) {\n\t\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t\t`Removing self-dependency from subtask ${subtaskId}`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tstats.selfDependenciesRemoved++;\n\t\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Then check for non-existent dependencies\n\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\tif (typeof depId === 'string' && depId.includes('.')) {\n\t\t\t\t\t\t\t\tif (!validSubtaskIds.has(depId)) {\n\t\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t\t`Removing invalid subtask dependency from subtask ${subtaskId}: ${depId} (subtask does not exist)`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Handle numeric dependencies\n\t\t\t\t\t\t\tconst numericId =\n\t\t\t\t\t\t\t\ttypeof depId === 'number' ? depId : parseInt(depId, 10);\n\n\t\t\t\t\t\t\t// Small numbers likely refer to subtasks in the same task\n\t\t\t\t\t\t\tif (numericId < 100) {\n\t\t\t\t\t\t\t\tconst fullSubtaskId = `${task.id}.${numericId}`;\n\n\t\t\t\t\t\t\t\tif (!validSubtaskIds.has(fullSubtaskId)) {\n\t\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t\t`Removing invalid subtask dependency from subtask ${subtaskId}: ${numericId}`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Otherwise it's a task reference\n\t\t\t\t\t\t\tif (!validTaskIds.has(numericId)) {\n\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t`Removing invalid task dependency from subtask ${subtaskId}: ${numericId}`\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tif (subtask.dependencies.length < originalLength) {\n\t\t\t\t\t\t\tstats.subtasksFixed++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Third phase: Check for circular dependencies\n\t\tlog('info', 'Checking for circular dependencies...');\n\n\t\t// Build the dependency map for subtasks\n\t\tconst subtaskDependencyMap = new Map();\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tconst subtaskId = `${task.id}.${subtask.id}`;\n\n\t\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\t\tconst normalizedDeps = subtask.dependencies.map((depId) => {\n\t\t\t\t\t\t\tif (typeof depId === 'string' && depId.includes('.')) {\n\t\t\t\t\t\t\t\treturn depId;\n\t\t\t\t\t\t\t} else if (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\treturn `${task.id}.${depId}`;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn String(depId);\n\t\t\t\t\t\t});\n\t\t\t\t\t\tsubtaskDependencyMap.set(subtaskId, normalizedDeps);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsubtaskDependencyMap.set(subtaskId, []);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Check for and fix circular dependencies\n\t\tfor (const [subtaskId, dependencies] of subtaskDependencyMap.entries()) {\n\t\t\tconst visited = new Set();\n\t\t\tconst recursionStack = new Set();\n\n\t\t\t// Detect cycles\n\t\t\tconst cycleEdges = findCycles(\n\t\t\t\tsubtaskId,\n\t\t\t\tsubtaskDependencyMap,\n\t\t\t\tvisited,\n\t\t\t\trecursionStack\n\t\t\t);\n\n\t\t\tif (cycleEdges.length > 0) {\n\t\t\t\tconst [taskId, subtaskNum] = subtaskId\n\t\t\t\t\t.split('.')\n\t\t\t\t\t.map((part) => Number(part));\n\t\t\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\n\t\t\t\tif (task && task.subtasks) {\n\t\t\t\t\tconst subtask = task.subtasks.find((st) => st.id === subtaskNum);\n\n\t\t\t\t\tif (subtask && subtask.dependencies) {\n\t\t\t\t\t\tconst originalLength = subtask.dependencies.length;\n\n\t\t\t\t\t\tconst edgesToRemove = cycleEdges.map((edge) => {\n\t\t\t\t\t\t\tif (edge.includes('.')) {\n\t\t\t\t\t\t\t\tconst [depTaskId, depSubtaskId] = edge\n\t\t\t\t\t\t\t\t\t.split('.')\n\t\t\t\t\t\t\t\t\t.map((part) => Number(part));\n\n\t\t\t\t\t\t\t\tif (depTaskId === taskId) {\n\t\t\t\t\t\t\t\t\treturn depSubtaskId;\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treturn edge;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\treturn Number(edge);\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\tconst normalizedDepId =\n\t\t\t\t\t\t\t\ttypeof depId === 'number' && depId < 100\n\t\t\t\t\t\t\t\t\t? `${taskId}.${depId}`\n\t\t\t\t\t\t\t\t\t: String(depId);\n\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\tedgesToRemove.includes(depId) ||\n\t\t\t\t\t\t\t\tedgesToRemove.includes(normalizedDepId)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t`Breaking circular dependency: Removing ${normalizedDepId} from subtask ${subtaskId}`\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstats.circularDependenciesFixed++;\n\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tif (subtask.dependencies.length < originalLength) {\n\t\t\t\t\t\t\tstats.subtasksFixed++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check if any changes were made by comparing with original data\n\t\tconst dataChanged = JSON.stringify(data) !== JSON.stringify(originalData);\n\n\t\tif (dataChanged) {\n\t\t\t// Save the changes\n\t\t\twriteJSON(tasksPath, data, context.projectRoot, context.tag);\n\t\t\tlog('success', 'Fixed dependency issues in tasks.json');\n\n\t\t\t// Regenerate task files\n\t\t\tlog('info', 'Regenerating task files to reflect dependency changes...');\n\t\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\t\t} else {\n\t\t\tlog('info', 'No changes needed to fix dependencies');\n\t\t}\n\n\t\t// Show detailed statistics report\n\t\tconst totalFixedAll =\n\t\t\tstats.nonExistentDependenciesRemoved +\n\t\t\tstats.selfDependenciesRemoved +\n\t\t\tstats.duplicateDependenciesRemoved +\n\t\t\tstats.circularDependenciesFixed;\n\n\t\tif (!isSilentMode()) {\n\t\t\tif (totalFixedAll > 0) {\n\t\t\t\tlog('success', `Fixed ${totalFixedAll} dependency issues in total!`);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.green(`Dependency Fixes Summary:\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Invalid dependencies removed:')} ${stats.nonExistentDependenciesRemoved}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Self-dependencies removed:')} ${stats.selfDependenciesRemoved}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Duplicate dependencies removed:')} ${stats.duplicateDependenciesRemoved}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Circular dependencies fixed:')} ${stats.circularDependenciesFixed}\\n\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks fixed:')} ${stats.tasksFixed}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Subtasks fixed:')} ${stats.subtasksFixed}\\n`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'success',\n\t\t\t\t\t'No dependency issues found - all dependencies are valid'\n\t\t\t\t);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.green(`All Dependencies Are Valid\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks checked:')} ${data.tasks.length}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Total dependencies verified:')} ${countAllDependencies(data.tasks)}`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlog('error', 'Error in fix-dependencies command:', error);\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Ensure at least one subtask in each task has no dependencies\n * @param {Object} tasksData - The tasks data object with tasks array\n * @returns {boolean} - True if any changes were made\n */\nfunction ensureAtLeastOneIndependentSubtask(tasksData) {\n\tif (!tasksData || !tasksData.tasks || !Array.isArray(tasksData.tasks)) {\n\t\treturn false;\n\t}\n\n\tlet changesDetected = false;\n\n\ttasksData.tasks.forEach((task) => {\n\t\tif (\n\t\t\t!task.subtasks ||\n\t\t\t!Array.isArray(task.subtasks) ||\n\t\t\ttask.subtasks.length === 0\n\t\t) {\n\t\t\treturn;\n\t\t}\n\n\t\t// Check if any subtask has no dependencies\n\t\tconst hasIndependentSubtask = task.subtasks.some(\n\t\t\t(st) =>\n\t\t\t\t!st.dependencies ||\n\t\t\t\t!Array.isArray(st.dependencies) ||\n\t\t\t\tst.dependencies.length === 0\n\t\t);\n\n\t\tif (!hasIndependentSubtask) {\n\t\t\t// Find the first subtask and clear its dependencies\n\t\t\tif (task.subtasks.length > 0) {\n\t\t\t\tconst firstSubtask = task.subtasks[0];\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Ensuring at least one independent subtask: Clearing dependencies for subtask ${task.id}.${firstSubtask.id}`\n\t\t\t\t);\n\t\t\t\tfirstSubtask.dependencies = [];\n\t\t\t\tchangesDetected = true;\n\t\t\t}\n\t\t}\n\t});\n\n\treturn changesDetected;\n}\n\n/**\n * Validate and fix dependencies across all tasks and subtasks\n * This function is designed to be called after any task modification\n * @param {Object} tasksData - The tasks data object with tasks array\n * @param {string} tasksPath - Optional path to save the changes\n * @param {string} projectRoot - Optional project root for tag context\n * @param {string} tag - Optional tag for tag context\n * @returns {boolean} - True if any changes were made\n */\nfunction validateAndFixDependencies(\n\ttasksData,\n\ttasksPath = null,\n\tprojectRoot = null,\n\ttag = null\n) {\n\tif (!tasksData || !tasksData.tasks || !Array.isArray(tasksData.tasks)) {\n\t\tlog('error', 'Invalid tasks data');\n\t\treturn false;\n\t}\n\n\tlog('debug', 'Validating and fixing dependencies...');\n\n\t// Create a deep copy for comparison\n\tconst originalData = JSON.parse(JSON.stringify(tasksData));\n\n\t// 1. Remove duplicate dependencies from tasks and subtasks\n\ttasksData.tasks = tasksData.tasks.map((task) => {\n\t\t// Handle task dependencies\n\t\tif (task.dependencies) {\n\t\t\tconst uniqueDeps = [...new Set(task.dependencies)];\n\t\t\ttask.dependencies = uniqueDeps;\n\t\t}\n\n\t\t// Handle subtask dependencies\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks = task.subtasks.map((subtask) => {\n\t\t\t\tif (subtask.dependencies) {\n\t\t\t\t\tconst uniqueDeps = [...new Set(subtask.dependencies)];\n\t\t\t\t\tsubtask.dependencies = uniqueDeps;\n\t\t\t\t}\n\t\t\t\treturn subtask;\n\t\t\t});\n\t\t}\n\t\treturn task;\n\t});\n\n\t// 2. Remove invalid task dependencies (non-existent tasks)\n\ttasksData.tasks.forEach((task) => {\n\t\t// Clean up task dependencies\n\t\tif (task.dependencies) {\n\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t// Remove self-dependencies\n\t\t\t\tif (String(depId) === String(task.id)) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\t// Remove non-existent dependencies\n\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t});\n\t\t}\n\n\t\t// Clean up subtask dependencies\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (subtask.dependencies) {\n\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t// Handle numeric subtask references\n\t\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\tconst fullSubtaskId = `${task.id}.${depId}`;\n\t\t\t\t\t\t\treturn taskExists(tasksData.tasks, fullSubtaskId);\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Handle full task/subtask references\n\t\t\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\t// 3. Ensure at least one subtask has no dependencies in each task\n\ttasksData.tasks.forEach((task) => {\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tconst hasIndependentSubtask = task.subtasks.some(\n\t\t\t\t(st) =>\n\t\t\t\t\t!st.dependencies ||\n\t\t\t\t\t!Array.isArray(st.dependencies) ||\n\t\t\t\t\tst.dependencies.length === 0\n\t\t\t);\n\n\t\t\tif (!hasIndependentSubtask) {\n\t\t\t\ttask.subtasks[0].dependencies = [];\n\t\t\t}\n\t\t}\n\t});\n\n\t// Check if any changes were made by comparing with original data\n\tconst changesDetected =\n\t\tJSON.stringify(tasksData) !== JSON.stringify(originalData);\n\n\t// Save changes if needed\n\tif (tasksPath && changesDetected) {\n\t\ttry {\n\t\t\twriteJSON(tasksPath, tasksData, projectRoot, tag);\n\t\t\tlog('debug', 'Saved dependency fixes to tasks.json');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to save dependency fixes to tasks.json', error);\n\t\t}\n\t}\n\n\treturn changesDetected;\n}\n\nexport {\n\taddDependency,\n\tremoveDependency,\n\tisCircularDependency,\n\tvalidateTaskDependencies,\n\tvalidateDependenciesCommand,\n\tfixDependenciesCommand,\n\tremoveDuplicateDependencies,\n\tcleanupSubtaskDependencies,\n\tensureAtLeastOneIndependentSubtask,\n\tvalidateAndFixDependencies\n};\n"], ["/claude-task-master/scripts/modules/task-manager/update-single-task-status.js", "import chalk from 'chalk';\n\nimport { log } from '../utils.js';\nimport { isValidTaskStatus } from '../../../src/constants/task-status.js';\n\n/**\n * Update the status of a single task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} taskIdInput - Task ID to update\n * @param {string} newStatus - New status\n * @param {Object} data - Tasks data\n * @param {boolean} showUi - Whether to show UI elements\n */\nasync function updateSingleTaskStatus(\n\ttasksPath,\n\ttaskIdInput,\n\tnewStatus,\n\tdata,\n\tshowUi = true\n) {\n\tif (!isValidTaskStatus(newStatus)) {\n\t\tthrow new Error(\n\t\t\t`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`\n\t\t);\n\t}\n\n\t// Check if it's a subtask (e.g., \"1.2\")\n\tif (taskIdInput.includes('.')) {\n\t\tconst [parentId, subtaskId] = taskIdInput\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\n\t\t// Find the parent task\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(`Parent task ${parentId} not found`);\n\t\t}\n\n\t\t// Find the subtask\n\t\tif (!parentTask.subtasks) {\n\t\t\tthrow new Error(`Parent task ${parentId} has no subtasks`);\n\t\t}\n\n\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\tif (!subtask) {\n\t\t\tthrow new Error(\n\t\t\t\t`Subtask ${subtaskId} not found in parent task ${parentId}`\n\t\t\t);\n\t\t}\n\n\t\t// Update the subtask status\n\t\tconst oldStatus = subtask.status || 'pending';\n\t\tsubtask.status = newStatus;\n\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Updated subtask ${parentId}.${subtaskId} status from '${oldStatus}' to '${newStatus}'`\n\t\t);\n\n\t\t// Check if all subtasks are done (if setting to 'done')\n\t\tif (\n\t\t\tnewStatus.toLowerCase() === 'done' ||\n\t\t\tnewStatus.toLowerCase() === 'completed'\n\t\t) {\n\t\t\tconst allSubtasksDone = parentTask.subtasks.every(\n\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t);\n\n\t\t\t// Suggest updating parent task if all subtasks are done\n\t\t\tif (\n\t\t\t\tallSubtasksDone &&\n\t\t\t\tparentTask.status !== 'done' &&\n\t\t\t\tparentTask.status !== 'completed'\n\t\t\t) {\n\t\t\t\t// Only show suggestion in CLI mode\n\t\t\t\tif (showUi) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`All subtasks of parent task ${parentId} are now marked as done.`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Consider updating the parent task status with: task-master set-status --id=${parentId} --status=done`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Handle regular task\n\t\tconst taskId = parseInt(taskIdInput, 10);\n\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\n\t\tif (!task) {\n\t\t\tthrow new Error(`Task ${taskId} not found`);\n\t\t}\n\n\t\t// Update the task status\n\t\tconst oldStatus = task.status || 'pending';\n\t\ttask.status = newStatus;\n\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Updated task ${taskId} status from '${oldStatus}' to '${newStatus}'`\n\t\t);\n\n\t\t// If marking as done, also mark all subtasks as done\n\t\tif (\n\t\t\t(newStatus.toLowerCase() === 'done' ||\n\t\t\t\tnewStatus.toLowerCase() === 'completed') &&\n\t\t\ttask.subtasks &&\n\t\t\ttask.subtasks.length > 0\n\t\t) {\n\t\t\tconst pendingSubtasks = task.subtasks.filter(\n\t\t\t\t(st) => st.status !== 'done' && st.status !== 'completed'\n\t\t\t);\n\n\t\t\tif (pendingSubtasks.length > 0) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Also marking ${pendingSubtasks.length} subtasks as '${newStatus}'`\n\t\t\t\t);\n\n\t\t\t\tpendingSubtasks.forEach((subtask) => {\n\t\t\t\t\tsubtask.status = newStatus;\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport default updateSingleTaskStatus;\n"], ["/claude-task-master/scripts/modules/task-manager/list-tasks.js", "import chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport {\n\tlog,\n\treadJSON,\n\ttruncate,\n\treadComplexityReport,\n\taddComplexityToTask\n} from '../utils.js';\nimport findNextTask from './find-next-task.js';\n\nimport {\n\tdisplayBanner,\n\tgetStatusWithColor,\n\tformatDependenciesWithStatus,\n\tgetComplexityWithColor,\n\tcreateProgressBar\n} from '../ui.js';\n\n/**\n * List all tasks\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} statusFilter - Filter by status (single status or comma-separated list, e.g., 'pending' or 'blocked,deferred')\n * @param {string} reportPath - Path to the complexity report\n * @param {boolean} withSubtasks - Whether to show subtasks\n * @param {string} outputFormat - Output format (text or json)\n * @param {Object} context - Context object (required)\n * @param {string} context.projectRoot - Project root path\n * @param {string} context.tag - Tag for the task\n * @returns {Object} - Task list result for json format\n */\nfunction listTasks(\n\ttasksPath,\n\tstatusFilter,\n\treportPath = null,\n\twithSubtasks = false,\n\toutputFormat = 'text',\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\ttry {\n\t\t// Extract projectRoot from context if provided\n\t\tconst data = readJSON(tasksPath, projectRoot, tag); // Pass projectRoot to readJSON\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}`);\n\t\t}\n\n\t\t// Add complexity scores to tasks if report exists\n\t\t// `reportPath` is already tag-aware (resolved at the CLI boundary).\n\t\tconst complexityReport = readComplexityReport(reportPath);\n\t\t// Apply complexity scores to tasks\n\t\tif (complexityReport && complexityReport.complexityAnalysis) {\n\t\t\tdata.tasks.forEach((task) => addComplexityToTask(task, complexityReport));\n\t\t}\n\n\t\t// Filter tasks by status if specified - now supports comma-separated statuses\n\t\tlet filteredTasks;\n\t\tif (statusFilter && statusFilter.toLowerCase() !== 'all') {\n\t\t\t// Handle comma-separated statuses\n\t\t\tconst allowedStatuses = statusFilter\n\t\t\t\t.split(',')\n\t\t\t\t.map((s) => s.trim().toLowerCase())\n\t\t\t\t.filter((s) => s.length > 0); // Remove empty strings\n\n\t\t\tfilteredTasks = data.tasks.filter(\n\t\t\t\t(task) =>\n\t\t\t\t\ttask.status && allowedStatuses.includes(task.status.toLowerCase())\n\t\t\t);\n\t\t} else {\n\t\t\t// Default to all tasks if no filter or filter is 'all'\n\t\t\tfilteredTasks = data.tasks;\n\t\t}\n\n\t\t// Calculate completion statistics\n\t\tconst totalTasks = data.tasks.length;\n\t\tconst completedTasks = data.tasks.filter(\n\t\t\t(task) => task.status === 'done' || task.status === 'completed'\n\t\t).length;\n\t\tconst completionPercentage =\n\t\t\ttotalTasks > 0 ? (completedTasks / totalTasks) * 100 : 0;\n\n\t\t// Count statuses for tasks\n\t\tconst doneCount = completedTasks;\n\t\tconst inProgressCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'in-progress'\n\t\t).length;\n\t\tconst pendingCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'pending'\n\t\t).length;\n\t\tconst blockedCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'blocked'\n\t\t).length;\n\t\tconst deferredCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'deferred'\n\t\t).length;\n\t\tconst cancelledCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'cancelled'\n\t\t).length;\n\t\tconst reviewCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'review'\n\t\t).length;\n\n\t\t// Count subtasks and their statuses\n\t\tlet totalSubtasks = 0;\n\t\tlet completedSubtasks = 0;\n\t\tlet inProgressSubtasks = 0;\n\t\tlet pendingSubtasks = 0;\n\t\tlet blockedSubtasks = 0;\n\t\tlet deferredSubtasks = 0;\n\t\tlet cancelledSubtasks = 0;\n\t\tlet reviewSubtasks = 0;\n\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\t\ttotalSubtasks += task.subtasks.length;\n\t\t\t\tcompletedSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t\t).length;\n\t\t\t\tinProgressSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'in-progress'\n\t\t\t\t).length;\n\t\t\t\tpendingSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'pending'\n\t\t\t\t).length;\n\t\t\t\tblockedSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'blocked'\n\t\t\t\t).length;\n\t\t\t\tdeferredSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'deferred'\n\t\t\t\t).length;\n\t\t\t\tcancelledSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'cancelled'\n\t\t\t\t).length;\n\t\t\t\treviewSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'review'\n\t\t\t\t).length;\n\t\t\t}\n\t\t});\n\n\t\tconst subtaskCompletionPercentage =\n\t\t\ttotalSubtasks > 0 ? (completedSubtasks / totalSubtasks) * 100 : 0;\n\n\t\t// Calculate dependency statistics (moved up to be available for all output formats)\n\t\tconst completedTaskIds = new Set(\n\t\t\tdata.tasks\n\t\t\t\t.filter((t) => t.status === 'done' || t.status === 'completed')\n\t\t\t\t.map((t) => t.id)\n\t\t);\n\n\t\tconst tasksWithNoDeps = data.tasks.filter(\n\t\t\t(t) =>\n\t\t\t\tt.status !== 'done' &&\n\t\t\t\tt.status !== 'completed' &&\n\t\t\t\t(!t.dependencies || t.dependencies.length === 0)\n\t\t).length;\n\n\t\tconst tasksWithAllDepsSatisfied = data.tasks.filter(\n\t\t\t(t) =>\n\t\t\t\tt.status !== 'done' &&\n\t\t\t\tt.status !== 'completed' &&\n\t\t\t\tt.dependencies &&\n\t\t\t\tt.dependencies.length > 0 &&\n\t\t\t\tt.dependencies.every((depId) => completedTaskIds.has(depId))\n\t\t).length;\n\n\t\tconst tasksWithUnsatisfiedDeps = data.tasks.filter(\n\t\t\t(t) =>\n\t\t\t\tt.status !== 'done' &&\n\t\t\t\tt.status !== 'completed' &&\n\t\t\t\tt.dependencies &&\n\t\t\t\tt.dependencies.length > 0 &&\n\t\t\t\t!t.dependencies.every((depId) => completedTaskIds.has(depId))\n\t\t).length;\n\n\t\t// Calculate total tasks ready to work on (no deps + satisfied deps)\n\t\tconst tasksReadyToWork = tasksWithNoDeps + tasksWithAllDepsSatisfied;\n\n\t\t// Calculate most depended-on tasks\n\t\tconst dependencyCount = {};\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\ttask.dependencies.forEach((depId) => {\n\t\t\t\t\tdependencyCount[depId] = (dependencyCount[depId] || 0) + 1;\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Find the most depended-on task\n\t\tlet mostDependedOnTaskId = null;\n\t\tlet maxDependents = 0;\n\n\t\tfor (const [taskId, count] of Object.entries(dependencyCount)) {\n\t\t\tif (count > maxDependents) {\n\t\t\t\tmaxDependents = count;\n\t\t\t\tmostDependedOnTaskId = parseInt(taskId);\n\t\t\t}\n\t\t}\n\n\t\t// Get the most depended-on task\n\t\tconst mostDependedOnTask =\n\t\t\tmostDependedOnTaskId !== null\n\t\t\t\t? data.tasks.find((t) => t.id === mostDependedOnTaskId)\n\t\t\t\t: null;\n\n\t\t// Calculate average dependencies per task\n\t\tconst totalDependencies = data.tasks.reduce(\n\t\t\t(sum, task) => sum + (task.dependencies ? task.dependencies.length : 0),\n\t\t\t0\n\t\t);\n\t\tconst avgDependenciesPerTask = totalDependencies / data.tasks.length;\n\n\t\t// Find next task to work on, passing the complexity report\n\t\tconst nextItem = findNextTask(data.tasks, complexityReport);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\t// *** Modification: Remove 'details' field for JSON output ***\n\t\t\tconst tasksWithoutDetails = filteredTasks.map((task) => {\n\t\t\t\t// <-- USES filteredTasks!\n\t\t\t\t// Omit 'details' from the parent task\n\t\t\t\tconst { details, ...taskRest } = task;\n\n\t\t\t\t// If subtasks exist, omit 'details' from them too\n\t\t\t\tif (taskRest.subtasks && Array.isArray(taskRest.subtasks)) {\n\t\t\t\t\ttaskRest.subtasks = taskRest.subtasks.map((subtask) => {\n\t\t\t\t\t\tconst { details: subtaskDetails, ...subtaskRest } = subtask;\n\t\t\t\t\t\treturn subtaskRest;\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\treturn taskRest;\n\t\t\t});\n\t\t\t// *** End of Modification ***\n\n\t\t\treturn {\n\t\t\t\ttasks: tasksWithoutDetails, // <--- THIS IS THE ARRAY BEING RETURNED\n\t\t\t\tfilter: statusFilter || 'all', // Return the actual filter used\n\t\t\t\tstats: {\n\t\t\t\t\ttotal: totalTasks,\n\t\t\t\t\tcompleted: doneCount,\n\t\t\t\t\tinProgress: inProgressCount,\n\t\t\t\t\tpending: pendingCount,\n\t\t\t\t\tblocked: blockedCount,\n\t\t\t\t\tdeferred: deferredCount,\n\t\t\t\t\tcancelled: cancelledCount,\n\t\t\t\t\treview: reviewCount,\n\t\t\t\t\tcompletionPercentage,\n\t\t\t\t\tsubtasks: {\n\t\t\t\t\t\ttotal: totalSubtasks,\n\t\t\t\t\t\tcompleted: completedSubtasks,\n\t\t\t\t\t\tinProgress: inProgressSubtasks,\n\t\t\t\t\t\tpending: pendingSubtasks,\n\t\t\t\t\t\tblocked: blockedSubtasks,\n\t\t\t\t\t\tdeferred: deferredSubtasks,\n\t\t\t\t\t\tcancelled: cancelledSubtasks,\n\t\t\t\t\t\tcompletionPercentage: subtaskCompletionPercentage\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// For markdown-readme output, return formatted markdown\n\t\tif (outputFormat === 'markdown-readme') {\n\t\t\treturn generateMarkdownOutput(data, filteredTasks, {\n\t\t\t\ttotalTasks,\n\t\t\t\tcompletedTasks,\n\t\t\t\tcompletionPercentage,\n\t\t\t\tdoneCount,\n\t\t\t\tinProgressCount,\n\t\t\t\tpendingCount,\n\t\t\t\tblockedCount,\n\t\t\t\tdeferredCount,\n\t\t\t\tcancelledCount,\n\t\t\t\ttotalSubtasks,\n\t\t\t\tcompletedSubtasks,\n\t\t\t\tsubtaskCompletionPercentage,\n\t\t\t\tinProgressSubtasks,\n\t\t\t\tpendingSubtasks,\n\t\t\t\tblockedSubtasks,\n\t\t\t\tdeferredSubtasks,\n\t\t\t\tcancelledSubtasks,\n\t\t\t\treviewSubtasks,\n\t\t\t\ttasksWithNoDeps,\n\t\t\t\ttasksReadyToWork,\n\t\t\t\ttasksWithUnsatisfiedDeps,\n\t\t\t\tmostDependedOnTask,\n\t\t\t\tmostDependedOnTaskId,\n\t\t\t\tmaxDependents,\n\t\t\t\tavgDependenciesPerTask,\n\t\t\t\tcomplexityReport,\n\t\t\t\twithSubtasks,\n\t\t\t\tnextItem\n\t\t\t});\n\t\t}\n\n\t\t// ... existing code for text output ...\n\n\t\t// Calculate status breakdowns as percentages of total\n\t\tconst taskStatusBreakdown = {\n\t\t\t'in-progress': totalTasks > 0 ? (inProgressCount / totalTasks) * 100 : 0,\n\t\t\tpending: totalTasks > 0 ? (pendingCount / totalTasks) * 100 : 0,\n\t\t\tblocked: totalTasks > 0 ? (blockedCount / totalTasks) * 100 : 0,\n\t\t\tdeferred: totalTasks > 0 ? (deferredCount / totalTasks) * 100 : 0,\n\t\t\tcancelled: totalTasks > 0 ? (cancelledCount / totalTasks) * 100 : 0,\n\t\t\treview: totalTasks > 0 ? (reviewCount / totalTasks) * 100 : 0\n\t\t};\n\n\t\tconst subtaskStatusBreakdown = {\n\t\t\t'in-progress':\n\t\t\t\ttotalSubtasks > 0 ? (inProgressSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tpending: totalSubtasks > 0 ? (pendingSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tblocked: totalSubtasks > 0 ? (blockedSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tdeferred:\n\t\t\t\ttotalSubtasks > 0 ? (deferredSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tcancelled:\n\t\t\t\ttotalSubtasks > 0 ? (cancelledSubtasks / totalSubtasks) * 100 : 0,\n\t\t\treview: totalSubtasks > 0 ? (reviewSubtasks / totalSubtasks) * 100 : 0\n\t\t};\n\n\t\t// Create progress bars with status breakdowns\n\t\tconst taskProgressBar = createProgressBar(\n\t\t\tcompletionPercentage,\n\t\t\t30,\n\t\t\ttaskStatusBreakdown\n\t\t);\n\t\tconst subtaskProgressBar = createProgressBar(\n\t\t\tsubtaskCompletionPercentage,\n\t\t\t30,\n\t\t\tsubtaskStatusBreakdown\n\t\t);\n\n\t\t// Get terminal width - more reliable method\n\t\tlet terminalWidth;\n\t\ttry {\n\t\t\t// Try to get the actual terminal columns\n\t\t\tterminalWidth = process.stdout.columns;\n\t\t} catch (e) {\n\t\t\t// Fallback if columns cannot be determined\n\t\t\tlog('debug', 'Could not determine terminal width, using default');\n\t\t}\n\t\t// Ensure we have a reasonable default if detection fails\n\t\tterminalWidth = terminalWidth || 80;\n\n\t\t// Ensure terminal width is at least a minimum value to prevent layout issues\n\t\tterminalWidth = Math.max(terminalWidth, 80);\n\n\t\t// Create dashboard content\n\t\tconst projectDashboardContent =\n\t\t\tchalk.white.bold('Project Dashboard') +\n\t\t\t'\\n' +\n\t\t\t`Tasks Progress: ${chalk.greenBright(taskProgressBar)} ${completionPercentage.toFixed(0)}%\\n` +\n\t\t\t`Done: ${chalk.green(doneCount)} In Progress: ${chalk.blue(inProgressCount)} Pending: ${chalk.yellow(pendingCount)} Blocked: ${chalk.red(blockedCount)} Deferred: ${chalk.gray(deferredCount)} Cancelled: ${chalk.gray(cancelledCount)}\\n\\n` +\n\t\t\t`Subtasks Progress: ${chalk.cyan(subtaskProgressBar)} ${subtaskCompletionPercentage.toFixed(0)}%\\n` +\n\t\t\t`Completed: ${chalk.green(completedSubtasks)}/${totalSubtasks} In Progress: ${chalk.blue(inProgressSubtasks)} Pending: ${chalk.yellow(pendingSubtasks)} Blocked: ${chalk.red(blockedSubtasks)} Deferred: ${chalk.gray(deferredSubtasks)} Cancelled: ${chalk.gray(cancelledSubtasks)}\\n\\n` +\n\t\t\tchalk.cyan.bold('Priority Breakdown:') +\n\t\t\t'\\n' +\n\t\t\t`${chalk.red('•')} ${chalk.white('High priority:')} ${data.tasks.filter((t) => t.priority === 'high').length}\\n` +\n\t\t\t`${chalk.yellow('•')} ${chalk.white('Medium priority:')} ${data.tasks.filter((t) => t.priority === 'medium').length}\\n` +\n\t\t\t`${chalk.green('•')} ${chalk.white('Low priority:')} ${data.tasks.filter((t) => t.priority === 'low').length}`;\n\n\t\tconst dependencyDashboardContent =\n\t\t\tchalk.white.bold('Dependency Status & Next Task') +\n\t\t\t'\\n' +\n\t\t\tchalk.cyan.bold('Dependency Metrics:') +\n\t\t\t'\\n' +\n\t\t\t`${chalk.green('•')} ${chalk.white('Tasks with no dependencies:')} ${tasksWithNoDeps}\\n` +\n\t\t\t`${chalk.green('•')} ${chalk.white('Tasks ready to work on:')} ${tasksReadyToWork}\\n` +\n\t\t\t`${chalk.yellow('•')} ${chalk.white('Tasks blocked by dependencies:')} ${tasksWithUnsatisfiedDeps}\\n` +\n\t\t\t`${chalk.magenta('•')} ${chalk.white('Most depended-on task:')} ${mostDependedOnTask ? chalk.cyan(`#${mostDependedOnTaskId} (${maxDependents} dependents)`) : chalk.gray('None')}\\n` +\n\t\t\t`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\\n\\n` +\n\t\t\tchalk.cyan.bold('Next Task to Work On:') +\n\t\t\t'\\n' +\n\t\t\t`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}\n` +\n\t\t\t`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : ''}\n` +\n\t\t\t`Complexity: ${nextItem && nextItem.complexityScore ? getComplexityWithColor(nextItem.complexityScore) : chalk.gray('N/A')}`;\n\n\t\t// Calculate width for side-by-side display\n\t\t// Box borders, padding take approximately 4 chars on each side\n\t\tconst minDashboardWidth = 50; // Minimum width for dashboard\n\t\tconst minDependencyWidth = 50; // Minimum width for dependency dashboard\n\t\tconst totalMinWidth = minDashboardWidth + minDependencyWidth + 4; // Extra 4 chars for spacing\n\n\t\t// If terminal is wide enough, show boxes side by side with responsive widths\n\t\tif (terminalWidth >= totalMinWidth) {\n\t\t\t// Calculate widths proportionally for each box - use exact 50% width each\n\t\t\tconst availableWidth = terminalWidth;\n\t\t\tconst halfWidth = Math.floor(availableWidth / 2);\n\n\t\t\t// Account for border characters (2 chars on each side)\n\t\t\tconst boxContentWidth = halfWidth - 4;\n\n\t\t\t// Create boxen options with precise widths\n\t\t\tconst dashboardBox = boxen(projectDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\twidth: boxContentWidth,\n\t\t\t\tdimBorder: false\n\t\t\t});\n\n\t\t\tconst dependencyBox = boxen(dependencyDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\twidth: boxContentWidth,\n\t\t\t\tdimBorder: false\n\t\t\t});\n\n\t\t\t// Create a better side-by-side layout with exact spacing\n\t\t\tconst dashboardLines = dashboardBox.split('\\n');\n\t\t\tconst dependencyLines = dependencyBox.split('\\n');\n\n\t\t\t// Make sure both boxes have the same height\n\t\t\tconst maxHeight = Math.max(dashboardLines.length, dependencyLines.length);\n\n\t\t\t// For each line of output, pad the dashboard line to exactly halfWidth chars\n\t\t\t// This ensures the dependency box starts at exactly the right position\n\t\t\tconst combinedLines = [];\n\t\t\tfor (let i = 0; i < maxHeight; i++) {\n\t\t\t\t// Get the dashboard line (or empty string if we've run out of lines)\n\t\t\t\tconst dashLine = i < dashboardLines.length ? dashboardLines[i] : '';\n\t\t\t\t// Get the dependency line (or empty string if we've run out of lines)\n\t\t\t\tconst depLine = i < dependencyLines.length ? dependencyLines[i] : '';\n\n\t\t\t\t// Remove any trailing spaces from dashLine before padding to exact width\n\t\t\t\tconst trimmedDashLine = dashLine.trimEnd();\n\t\t\t\t// Pad the dashboard line to exactly halfWidth chars with no extra spaces\n\t\t\t\tconst paddedDashLine = trimmedDashLine.padEnd(halfWidth, ' ');\n\n\t\t\t\t// Join the lines with no space in between\n\t\t\t\tcombinedLines.push(paddedDashLine + depLine);\n\t\t\t}\n\n\t\t\t// Join all lines and output\n\t\t\tconsole.log(combinedLines.join('\\n'));\n\t\t} else {\n\t\t\t// Terminal too narrow, show boxes stacked vertically\n\t\t\tconst dashboardBox = boxen(projectDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 0, bottom: 1 }\n\t\t\t});\n\n\t\t\tconst dependencyBox = boxen(dependencyDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 0, bottom: 1 }\n\t\t\t});\n\n\t\t\t// Display stacked vertically\n\t\t\tconsole.log(dashboardBox);\n\t\t\tconsole.log(dependencyBox);\n\t\t}\n\n\t\tif (filteredTasks.length === 0) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tstatusFilter\n\t\t\t\t\t\t? chalk.yellow(`No tasks with status '${statusFilter}' found`)\n\t\t\t\t\t\t: chalk.yellow('No tasks found'),\n\t\t\t\t\t{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// COMPLETELY REVISED TABLE APPROACH\n\t\t// Define percentage-based column widths and calculate actual widths\n\t\t// Adjust percentages based on content type and user requirements\n\n\t\t// Adjust ID width if showing subtasks (subtask IDs are longer: e.g., \"1.2\")\n\t\tconst idWidthPct = withSubtasks ? 10 : 7;\n\n\t\t// Calculate max status length to accommodate \"in-progress\"\n\t\tconst statusWidthPct = 15;\n\n\t\t// Increase priority column width as requested\n\t\tconst priorityWidthPct = 12;\n\n\t\t// Make dependencies column smaller as requested (-20%)\n\t\tconst depsWidthPct = 20;\n\n\t\tconst complexityWidthPct = 10;\n\n\t\t// Calculate title/description width as remaining space (+20% from dependencies reduction)\n\t\tconst titleWidthPct =\n\t\t\t100 -\n\t\t\tidWidthPct -\n\t\t\tstatusWidthPct -\n\t\t\tpriorityWidthPct -\n\t\t\tdepsWidthPct -\n\t\t\tcomplexityWidthPct;\n\n\t\t// Allow 10 characters for borders and padding\n\t\tconst availableWidth = terminalWidth - 10;\n\n\t\t// Calculate actual column widths based on percentages\n\t\tconst idWidth = Math.floor(availableWidth * (idWidthPct / 100));\n\t\tconst statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));\n\t\tconst priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));\n\t\tconst depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));\n\t\tconst complexityWidth = Math.floor(\n\t\t\tavailableWidth * (complexityWidthPct / 100)\n\t\t);\n\t\tconst titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));\n\n\t\t// Create a table with correct borders and spacing\n\t\tconst table = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\tchalk.cyan.bold('Status'),\n\t\t\t\tchalk.cyan.bold('Priority'),\n\t\t\t\tchalk.cyan.bold('Dependencies'),\n\t\t\t\tchalk.cyan.bold('Complexity')\n\t\t\t],\n\t\t\tcolWidths: [\n\t\t\t\tidWidth,\n\t\t\t\ttitleWidth,\n\t\t\t\tstatusWidth,\n\t\t\t\tpriorityWidth,\n\t\t\t\tdepsWidth,\n\t\t\t\tcomplexityWidth // Added complexity column width\n\t\t\t],\n\t\t\tstyle: {\n\t\t\t\thead: [], // No special styling for header\n\t\t\t\tborder: [], // No special styling for border\n\t\t\t\tcompact: false // Use default spacing\n\t\t\t},\n\t\t\twordWrap: true,\n\t\t\twrapOnWordBoundary: true\n\t\t});\n\n\t\t// Process tasks for the table\n\t\tfilteredTasks.forEach((task) => {\n\t\t\t// Format dependencies with status indicators (colored)\n\t\t\tlet depText = 'None';\n\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\t// Use the proper formatDependenciesWithStatus function for colored status\n\t\t\t\tdepText = formatDependenciesWithStatus(\n\t\t\t\t\ttask.dependencies,\n\t\t\t\t\tdata.tasks,\n\t\t\t\t\ttrue,\n\t\t\t\t\tcomplexityReport\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tdepText = chalk.gray('None');\n\t\t\t}\n\n\t\t\t// Clean up any ANSI codes or confusing characters\n\t\t\tconst cleanTitle = task.title.replace(/\\n/g, ' ');\n\n\t\t\t// Get priority color\n\t\t\tconst priorityColor =\n\t\t\t\t{\n\t\t\t\t\thigh: chalk.red,\n\t\t\t\t\tmedium: chalk.yellow,\n\t\t\t\t\tlow: chalk.gray\n\t\t\t\t}[task.priority || 'medium'] || chalk.white;\n\n\t\t\t// Format status\n\t\t\tconst status = getStatusWithColor(task.status, true);\n\n\t\t\t// Add the row without truncating dependencies\n\t\t\ttable.push([\n\t\t\t\ttask.id.toString(),\n\t\t\t\ttruncate(cleanTitle, titleWidth - 3),\n\t\t\t\tstatus,\n\t\t\t\tpriorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),\n\t\t\t\tdepText,\n\t\t\t\ttask.complexityScore\n\t\t\t\t\t? getComplexityWithColor(task.complexityScore)\n\t\t\t\t\t: chalk.gray('N/A')\n\t\t\t]);\n\n\t\t\t// Add subtasks if requested\n\t\t\tif (withSubtasks && task.subtasks && task.subtasks.length > 0) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\t// Format subtask dependencies with status indicators\n\t\t\t\t\tlet subtaskDepText = 'None';\n\t\t\t\t\tif (subtask.dependencies && subtask.dependencies.length > 0) {\n\t\t\t\t\t\t// Handle both subtask-to-subtask and subtask-to-task dependencies\n\t\t\t\t\t\tconst formattedDeps = subtask.dependencies\n\t\t\t\t\t\t\t.map((depId) => {\n\t\t\t\t\t\t\t\t// Check if it's a dependency on another subtask\n\t\t\t\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\t\tconst foundSubtask = task.subtasks.find(\n\t\t\t\t\t\t\t\t\t\t(st) => st.id === depId\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tif (foundSubtask) {\n\t\t\t\t\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\t\t\t\t\tfoundSubtask.status === 'done' ||\n\t\t\t\t\t\t\t\t\t\t\tfoundSubtask.status === 'completed';\n\t\t\t\t\t\t\t\t\t\tconst isInProgress = foundSubtask.status === 'in-progress';\n\n\t\t\t\t\t\t\t\t\t\t// Use consistent color formatting instead of emojis\n\t\t\t\t\t\t\t\t\t\tif (isDone) {\n\t\t\t\t\t\t\t\t\t\t\treturn chalk.green.bold(`${task.id}.${depId}`);\n\t\t\t\t\t\t\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\t\t\t\t\t\t\treturn chalk.hex('#FFA500').bold(`${task.id}.${depId}`);\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\treturn chalk.red.bold(`${task.id}.${depId}`);\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t// Default to regular task dependency\n\t\t\t\t\t\t\t\tconst depTask = data.tasks.find((t) => t.id === depId);\n\t\t\t\t\t\t\t\tif (depTask) {\n\t\t\t\t\t\t\t\t\t// Add complexity to depTask before checking status\n\t\t\t\t\t\t\t\t\taddComplexityToTask(depTask, complexityReport);\n\t\t\t\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\t\t\t\tdepTask.status === 'done' || depTask.status === 'completed';\n\t\t\t\t\t\t\t\t\tconst isInProgress = depTask.status === 'in-progress';\n\t\t\t\t\t\t\t\t\t// Use the same color scheme as in formatDependenciesWithStatus\n\t\t\t\t\t\t\t\t\tif (isDone) {\n\t\t\t\t\t\t\t\t\t\treturn chalk.green.bold(`${depId}`);\n\t\t\t\t\t\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\t\t\t\t\t\treturn chalk.hex('#FFA500').bold(`${depId}`);\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\treturn chalk.red.bold(`${depId}`);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn chalk.cyan(depId.toString());\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t.join(', ');\n\n\t\t\t\t\t\tsubtaskDepText = formattedDeps || chalk.gray('None');\n\t\t\t\t\t}\n\n\t\t\t\t\t// Add the subtask row without truncating dependencies\n\t\t\t\t\ttable.push([\n\t\t\t\t\t\t`${task.id}.${subtask.id}`,\n\t\t\t\t\t\tchalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),\n\t\t\t\t\t\tgetStatusWithColor(subtask.status, true),\n\t\t\t\t\t\tchalk.dim('-'),\n\t\t\t\t\t\tsubtaskDepText,\n\t\t\t\t\t\tsubtask.complexityScore\n\t\t\t\t\t\t\t? chalk.gray(`${subtask.complexityScore}`)\n\t\t\t\t\t\t\t: chalk.gray('N/A')\n\t\t\t\t\t]);\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Ensure we output the table even if it had to wrap\n\t\ttry {\n\t\t\tconsole.log(table.toString());\n\t\t} catch (err) {\n\t\t\tlog('error', `Error rendering table: ${err.message}`);\n\n\t\t\t// Fall back to simpler output\n\t\t\tconsole.log(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t'\\nFalling back to simple task list due to terminal width constraints:'\n\t\t\t\t)\n\t\t\t);\n\t\t\tfilteredTasks.forEach((task) => {\n\t\t\t\tconsole.log(\n\t\t\t\t\t`${chalk.cyan(task.id)}: ${chalk.white(task.title)} - ${getStatusWithColor(task.status)}`\n\t\t\t\t);\n\t\t\t});\n\t\t}\n\n\t\t// Show filter info if applied\n\t\tif (statusFilter) {\n\t\t\tconsole.log(chalk.yellow(`\\nFiltered by status: ${statusFilter}`));\n\t\t\tconsole.log(\n\t\t\t\tchalk.yellow(`Showing ${filteredTasks.length} of ${totalTasks} tasks`)\n\t\t\t);\n\t\t}\n\n\t\t// Define priority colors\n\t\tconst priorityColors = {\n\t\t\thigh: chalk.red.bold,\n\t\t\tmedium: chalk.yellow,\n\t\t\tlow: chalk.gray\n\t\t};\n\n\t\t// Show next task box in a prominent color\n\t\tif (nextItem) {\n\t\t\t// Prepare subtasks section if they exist (Only tasks have .subtasks property)\n\t\t\tlet subtasksSection = '';\n\t\t\t// Check if the nextItem is a top-level task before looking for subtasks\n\t\t\tconst parentTaskForSubtasks = data.tasks.find(\n\t\t\t\t(t) => String(t.id) === String(nextItem.id)\n\t\t\t); // Find the original task object\n\t\t\tif (\n\t\t\t\tparentTaskForSubtasks &&\n\t\t\t\tparentTaskForSubtasks.subtasks &&\n\t\t\t\tparentTaskForSubtasks.subtasks.length > 0\n\t\t\t) {\n\t\t\t\tsubtasksSection = `\\n\\n${chalk.white.bold('Subtasks:')}\\n`;\n\t\t\t\tsubtasksSection += parentTaskForSubtasks.subtasks\n\t\t\t\t\t.map((subtask) => {\n\t\t\t\t\t\t// Add complexity to subtask before display\n\t\t\t\t\t\taddComplexityToTask(subtask, complexityReport);\n\t\t\t\t\t\t// Using a more simplified format for subtask status display\n\t\t\t\t\t\tconst status = subtask.status || 'pending';\n\t\t\t\t\t\tconst statusColors = {\n\t\t\t\t\t\t\tdone: chalk.green,\n\t\t\t\t\t\t\tcompleted: chalk.green,\n\t\t\t\t\t\t\tpending: chalk.yellow,\n\t\t\t\t\t\t\t'in-progress': chalk.blue,\n\t\t\t\t\t\t\tdeferred: chalk.gray,\n\t\t\t\t\t\t\tblocked: chalk.red,\n\t\t\t\t\t\t\tcancelled: chalk.gray\n\t\t\t\t\t\t};\n\t\t\t\t\t\tconst statusColor =\n\t\t\t\t\t\t\tstatusColors[status.toLowerCase()] || chalk.white;\n\t\t\t\t\t\t// Ensure subtask ID is displayed correctly using parent ID from the original task object\n\t\t\t\t\t\treturn `${chalk.cyan(`${parentTaskForSubtasks.id}.${subtask.id}`)} [${statusColor(status)}] ${subtask.title}`;\n\t\t\t\t\t})\n\t\t\t\t\t.join('\\n');\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.hex('#FF8800').bold(\n\t\t\t\t\t\t// Use nextItem.id and nextItem.title\n\t\t\t\t\t\t`🔥 Next Task to Work On: #${nextItem.id} - ${nextItem.title}`\n\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t// Use nextItem.priority, nextItem.status, nextItem.dependencies\n\t\t\t\t\t\t`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\\n` +\n\t\t\t\t\t\t`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : chalk.gray('None')}\\n\\n` +\n\t\t\t\t\t\t// Use nextTask.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)\n\t\t\t\t\t\t// *** Fetching original item for description and details ***\n\t\t\t\t\t\t`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +\n\t\t\t\t\t\tsubtasksSection + // <-- Subtasks are handled above now\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t// Use nextItem.id\n\t\t\t\t\t\t`${chalk.cyan('Start working:')} ${chalk.yellow(`task-master set-status --id=${nextItem.id} --status=in-progress`)}\\n` +\n\t\t\t\t\t\t// Use nextItem.id\n\t\t\t\t\t\t`${chalk.cyan('View details:')} ${chalk.yellow(`task-master show ${nextItem.id}`)}`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { left: 2, right: 2, top: 1, bottom: 1 },\n\t\t\t\t\t\tborderColor: '#FF8800',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 },\n\t\t\t\t\t\ttitle: '⚡ RECOMMENDED NEXT TASK ⚡',\n\t\t\t\t\t\ttitleAlignment: 'center',\n\t\t\t\t\t\twidth: terminalWidth - 4,\n\t\t\t\t\t\tfullscreen: false\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.hex('#FF8800').bold('No eligible next task found') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t'All pending tasks have dependencies that are not yet completed, or all tasks are done.',\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: '#FF8800',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 },\n\t\t\t\t\t\ttitle: '⚡ NEXT TASK ⚡',\n\t\t\t\t\t\ttitleAlignment: 'center',\n\t\t\t\t\t\twidth: terminalWidth - 4 // Use full terminal width minus a small margin\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\t// Show next steps\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Suggested Next Steps:') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master next')} to see what to work on next\\n` +\n\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks\\n` +\n\t\t\t\t\t`${chalk.cyan('3.')} Run ${chalk.yellow('task-master set-status --id=<id> --status=done')} to mark a task as complete`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'gray',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t} catch (error) {\n\t\tlog('error', `Error listing tasks: ${error.message}`);\n\n\t\tif (outputFormat === 'json') {\n\t\t\t// Return structured error for JSON output\n\t\t\tthrow {\n\t\t\t\tcode: 'TASK_LIST_ERROR',\n\t\t\t\tmessage: error.message,\n\t\t\t\tdetails: error.stack\n\t\t\t};\n\t\t}\n\n\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\tprocess.exit(1);\n\t}\n}\n\n// *** Helper function to get description for task or subtask ***\nfunction getWorkItemDescription(item, allTasks) {\n\tif (!item) return 'N/A';\n\tif (item.parentId) {\n\t\t// It's a subtask\n\t\tconst parent = allTasks.find((t) => t.id === item.parentId);\n\t\tconst subtask = parent?.subtasks?.find(\n\t\t\t(st) => `${parent.id}.${st.id}` === item.id\n\t\t);\n\t\treturn subtask?.description || 'No description available.';\n\t} else {\n\t\t// It's a top-level task\n\t\tconst task = allTasks.find((t) => String(t.id) === String(item.id));\n\t\treturn task?.description || 'No description available.';\n\t}\n}\n\n/**\n * Generate markdown-formatted output for README files\n * @param {Object} data - Full tasks data\n * @param {Array} filteredTasks - Filtered tasks array\n * @param {Object} stats - Statistics object\n * @returns {string} - Formatted markdown string\n */\nfunction generateMarkdownOutput(data, filteredTasks, stats) {\n\tconst {\n\t\ttotalTasks,\n\t\tcompletedTasks,\n\t\tcompletionPercentage,\n\t\tdoneCount,\n\t\tinProgressCount,\n\t\tpendingCount,\n\t\tblockedCount,\n\t\tdeferredCount,\n\t\tcancelledCount,\n\t\ttotalSubtasks,\n\t\tcompletedSubtasks,\n\t\tsubtaskCompletionPercentage,\n\t\tinProgressSubtasks,\n\t\tpendingSubtasks,\n\t\tblockedSubtasks,\n\t\tdeferredSubtasks,\n\t\tcancelledSubtasks,\n\t\ttasksWithNoDeps,\n\t\ttasksReadyToWork,\n\t\ttasksWithUnsatisfiedDeps,\n\t\tmostDependedOnTask,\n\t\tmostDependedOnTaskId,\n\t\tmaxDependents,\n\t\tavgDependenciesPerTask,\n\t\tcomplexityReport,\n\t\twithSubtasks,\n\t\tnextItem\n\t} = stats;\n\n\tlet markdown = '';\n\n\t// Create progress bars for markdown (using Unicode block characters)\n\tconst createMarkdownProgressBar = (percentage, width = 20) => {\n\t\tconst filled = Math.round((percentage / 100) * width);\n\t\tconst empty = width - filled;\n\t\treturn '█'.repeat(filled) + '░'.repeat(empty);\n\t};\n\n\tconst taskProgressBar = createMarkdownProgressBar(completionPercentage, 20);\n\tconst subtaskProgressBar = createMarkdownProgressBar(\n\t\tsubtaskCompletionPercentage,\n\t\t20\n\t);\n\n\t// Dashboard section\n\t// markdown += '```\\n';\n\tmarkdown += '| Project Dashboard | |\\n';\n\tmarkdown += '| :- |:-|\\n';\n\tmarkdown += `| Task Progress | ${taskProgressBar} ${Math.round(completionPercentage)}% |\\n`;\n\tmarkdown += `| Done | ${doneCount} |\\n`;\n\tmarkdown += `| In Progress | ${inProgressCount} |\\n`;\n\tmarkdown += `| Pending | ${pendingCount} |\\n`;\n\tmarkdown += `| Deferred | ${deferredCount} |\\n`;\n\tmarkdown += `| Cancelled | ${cancelledCount} |\\n`;\n\tmarkdown += `|-|-|\\n`;\n\tmarkdown += `| Subtask Progress | ${subtaskProgressBar} ${Math.round(subtaskCompletionPercentage)}% |\\n`;\n\tmarkdown += `| Completed | ${completedSubtasks} |\\n`;\n\tmarkdown += `| In Progress | ${inProgressSubtasks} |\\n`;\n\tmarkdown += `| Pending | ${pendingSubtasks} |\\n`;\n\n\tmarkdown += '\\n\\n';\n\n\t// Tasks table\n\tmarkdown +=\n\t\t'| ID | Title | Status | Priority | Dependencies | Complexity |\\n';\n\tmarkdown +=\n\t\t'| :- | :- | :- | :- | :- | :- |\\n';\n\n\t// Helper function to format status with symbols\n\tconst getStatusSymbol = (status) => {\n\t\tswitch (status) {\n\t\t\tcase 'done':\n\t\t\tcase 'completed':\n\t\t\t\treturn '✓ done';\n\t\t\tcase 'in-progress':\n\t\t\t\treturn '► in-progress';\n\t\t\tcase 'pending':\n\t\t\t\treturn '○ pending';\n\t\t\tcase 'blocked':\n\t\t\t\treturn '⭕ blocked';\n\t\t\tcase 'deferred':\n\t\t\t\treturn 'x deferred';\n\t\t\tcase 'cancelled':\n\t\t\t\treturn 'x cancelled';\n\t\t\tcase 'review':\n\t\t\t\treturn '? review';\n\t\t\tdefault:\n\t\t\t\treturn status || 'pending';\n\t\t}\n\t};\n\n\t// Helper function to format dependencies without color codes\n\tconst formatDependenciesForMarkdown = (deps, allTasks) => {\n\t\tif (!deps || deps.length === 0) return 'None';\n\t\treturn deps\n\t\t\t.map((depId) => {\n\t\t\t\tconst depTask = allTasks.find((t) => t.id === depId);\n\t\t\t\treturn depTask ? depId.toString() : depId.toString();\n\t\t\t})\n\t\t\t.join(', ');\n\t};\n\n\t// Process all tasks\n\tfilteredTasks.forEach((task) => {\n\t\tconst taskTitle = task.title; // No truncation for README\n\t\tconst statusSymbol = getStatusSymbol(task.status);\n\t\tconst priority = task.priority || 'medium';\n\t\tconst deps = formatDependenciesForMarkdown(task.dependencies, data.tasks);\n\t\tconst complexity = task.complexityScore\n\t\t\t? `● ${task.complexityScore}`\n\t\t\t: 'N/A';\n\n\t\tmarkdown += `| ${task.id} | ${taskTitle} | ${statusSymbol} | ${priority} | ${deps} | ${complexity} |\\n`;\n\n\t\t// Add subtasks if requested\n\t\tif (withSubtasks && task.subtasks && task.subtasks.length > 0) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tconst subtaskTitle = `${subtask.title}`; // No truncation\n\t\t\t\tconst subtaskStatus = getStatusSymbol(subtask.status);\n\t\t\t\tconst subtaskDeps = formatDependenciesForMarkdown(\n\t\t\t\t\tsubtask.dependencies,\n\t\t\t\t\tdata.tasks\n\t\t\t\t);\n\t\t\t\tconst subtaskComplexity = subtask.complexityScore\n\t\t\t\t\t? subtask.complexityScore.toString()\n\t\t\t\t\t: 'N/A';\n\n\t\t\t\tmarkdown += `| ${task.id}.${subtask.id} | ${subtaskTitle} | ${subtaskStatus} | - | ${subtaskDeps} | ${subtaskComplexity} |\\n`;\n\t\t\t});\n\t\t}\n\t});\n\n\treturn markdown;\n}\n\nexport default listTasks;\n"], ["/claude-task-master/scripts/modules/task-manager/expand-task.js", "import fs from 'fs';\nimport path from 'path';\nimport { z } from 'zod';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tisSilentMode,\n\tgetTagAwareFilePath\n} from '../utils.js';\n\nimport {\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { generateTextService } from '../ai-services-unified.js';\n\nimport { getDefaultSubtasks, getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';\n\n// --- Zod Schemas (Keep from previous step) ---\nconst subtaskSchema = z\n\t.object({\n\t\tid: z\n\t\t\t.number()\n\t\t\t.int()\n\t\t\t.positive()\n\t\t\t.describe('Sequential subtask ID starting from 1'),\n\t\ttitle: z.string().min(5).describe('Clear, specific title for the subtask'),\n\t\tdescription: z\n\t\t\t.string()\n\t\t\t.min(10)\n\t\t\t.describe('Detailed description of the subtask'),\n\t\tdependencies: z\n\t\t\t.array(z.string())\n\t\t\t.describe(\n\t\t\t\t'Array of subtask dependencies within the same parent task. Use format [\"parentTaskId.1\", \"parentTaskId.2\"]. Subtasks can only depend on siblings, not external tasks.'\n\t\t\t),\n\t\tdetails: z.string().min(20).describe('Implementation details and guidance'),\n\t\tstatus: z\n\t\t\t.string()\n\t\t\t.describe(\n\t\t\t\t'The current status of the subtask (should be pending initially)'\n\t\t\t),\n\t\ttestStrategy: z\n\t\t\t.string()\n\t\t\t.nullable()\n\t\t\t.describe('Approach for testing this subtask')\n\t\t\t.default('')\n\t})\n\t.strict();\nconst subtaskArraySchema = z.array(subtaskSchema);\nconst subtaskWrapperSchema = z.object({\n\tsubtasks: subtaskArraySchema.describe('The array of generated subtasks.')\n});\n// --- End Zod Schemas ---\n\n/**\n * Parse subtasks from AI's text response. Includes basic cleanup.\n * @param {string} text - Response text from AI.\n * @param {number} startId - Starting subtask ID expected.\n * @param {number} expectedCount - Expected number of subtasks.\n * @param {number} parentTaskId - Parent task ID for context.\n * @param {Object} logger - Logging object (mcpLog or console log).\n * @returns {Array} Parsed and potentially corrected subtasks array.\n * @throws {Error} If parsing fails or JSON is invalid/malformed.\n */\nfunction parseSubtasksFromText(\n\ttext,\n\tstartId,\n\texpectedCount,\n\tparentTaskId,\n\tlogger\n) {\n\tif (typeof text !== 'string') {\n\t\tlogger.error(\n\t\t\t`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`\n\t\t);\n\t\tthrow new Error('AI response text is not a string.');\n\t}\n\n\tif (!text || text.trim() === '') {\n\t\tthrow new Error('AI response text is empty after trimming.');\n\t}\n\n\tconst originalTrimmedResponse = text.trim(); // Store the original trimmed response\n\tlet jsonToParse = originalTrimmedResponse; // Initialize jsonToParse with it\n\n\tlogger.debug(\n\t\t`Original AI Response for parsing (full length: ${jsonToParse.length}): ${jsonToParse.substring(0, 1000)}...`\n\t);\n\n\t// --- Pre-emptive cleanup for known AI JSON issues ---\n\t// Fix for \"dependencies\": , or \"dependencies\":,\n\tif (jsonToParse.includes('\"dependencies\":')) {\n\t\tconst malformedPattern = /\"dependencies\":\\s*,/g;\n\t\tif (malformedPattern.test(jsonToParse)) {\n\t\t\tlogger.warn('Attempting to fix malformed \"dependencies\": , issue.');\n\t\t\tjsonToParse = jsonToParse.replace(\n\t\t\t\tmalformedPattern,\n\t\t\t\t'\"dependencies\": [],'\n\t\t\t);\n\t\t\tlogger.debug(\n\t\t\t\t`JSON after fixing \"dependencies\": ${jsonToParse.substring(0, 500)}...`\n\t\t\t);\n\t\t}\n\t}\n\t// --- End pre-emptive cleanup ---\n\n\tlet parsedObject;\n\tlet primaryParseAttemptFailed = false;\n\n\t// --- Attempt 1: Simple Parse (with optional Markdown cleanup) ---\n\tlogger.debug('Attempting simple parse...');\n\ttry {\n\t\t// Check for markdown code block\n\t\tconst codeBlockMatch = jsonToParse.match(/```(?:json)?\\s*([\\s\\S]*?)\\s*```/);\n\t\tlet contentToParseDirectly = jsonToParse;\n\t\tif (codeBlockMatch && codeBlockMatch[1]) {\n\t\t\tcontentToParseDirectly = codeBlockMatch[1].trim();\n\t\t\tlogger.debug('Simple parse: Extracted content from markdown code block.');\n\t\t} else {\n\t\t\tlogger.debug(\n\t\t\t\t'Simple parse: No markdown code block found, using trimmed original.'\n\t\t\t);\n\t\t}\n\n\t\tparsedObject = JSON.parse(contentToParseDirectly);\n\t\tlogger.debug('Simple parse successful!');\n\n\t\t// Quick check if it looks like our target object\n\t\tif (\n\t\t\t!parsedObject ||\n\t\t\ttypeof parsedObject !== 'object' ||\n\t\t\t!Array.isArray(parsedObject.subtasks)\n\t\t) {\n\t\t\tlogger.warn(\n\t\t\t\t'Simple parse succeeded, but result is not the expected {\"subtasks\": []} structure. Will proceed to advanced extraction.'\n\t\t\t);\n\t\t\tprimaryParseAttemptFailed = true;\n\t\t\tparsedObject = null; // Reset parsedObject so we enter the advanced logic\n\t\t}\n\t\t// If it IS the correct structure, we'll skip advanced extraction.\n\t} catch (e) {\n\t\tlogger.warn(\n\t\t\t`Simple parse failed: ${e.message}. Proceeding to advanced extraction logic.`\n\t\t);\n\t\tprimaryParseAttemptFailed = true;\n\t\t// jsonToParse is already originalTrimmedResponse if simple parse failed before modifying it for markdown\n\t}\n\n\t// --- Attempt 2: Advanced Extraction (if simple parse failed or produced wrong structure) ---\n\tif (primaryParseAttemptFailed || !parsedObject) {\n\t\t// Ensure we try advanced if simple parse gave wrong structure\n\t\tlogger.debug('Attempting advanced extraction logic...');\n\t\t// Reset jsonToParse to the original full trimmed response for advanced logic\n\t\tjsonToParse = originalTrimmedResponse;\n\n\t\t// (Insert the more complex extraction logic here - the one we worked on with:\n\t\t// - targetPattern = '{\"subtasks\":';\n\t\t// - careful brace counting for that targetPattern\n\t\t// - fallbacks to last '{' and '}' if targetPattern logic fails)\n\t\t// This was the logic from my previous message. Let's assume it's here.\n\t\t// This block should ultimately set `jsonToParse` to the best candidate string.\n\n\t\t// Example snippet of that advanced logic's start:\n\t\tconst targetPattern = '{\"subtasks\":';\n\t\tconst patternStartIndex = jsonToParse.indexOf(targetPattern);\n\n\t\tif (patternStartIndex !== -1) {\n\t\t\tconst openBraces = 0;\n\t\t\tconst firstBraceFound = false;\n\t\t\tconst extractedJsonBlock = '';\n\t\t\t// ... (loop for brace counting as before) ...\n\t\t\t// ... (if successful, jsonToParse = extractedJsonBlock) ...\n\t\t\t// ... (if that fails, fallbacks as before) ...\n\t\t} else {\n\t\t\t// ... (fallback to last '{' and '}' if targetPattern not found) ...\n\t\t}\n\t\t// End of advanced logic excerpt\n\n\t\tlogger.debug(\n\t\t\t`Advanced extraction: JSON string that will be parsed: ${jsonToParse.substring(0, 500)}...`\n\t\t);\n\t\ttry {\n\t\t\tparsedObject = JSON.parse(jsonToParse);\n\t\t\tlogger.debug('Advanced extraction parse successful!');\n\t\t} catch (parseError) {\n\t\t\tlogger.error(\n\t\t\t\t`Advanced extraction: Failed to parse JSON object: ${parseError.message}`\n\t\t\t);\n\t\t\tlogger.error(\n\t\t\t\t`Advanced extraction: Problematic JSON string for parse (first 500 chars): ${jsonToParse.substring(0, 500)}`\n\t\t\t);\n\t\t\tthrow new Error(\n\t\t\t\t// Re-throw a more specific error if advanced also fails\n\t\t\t\t`Failed to parse JSON response object after both simple and advanced attempts: ${parseError.message}`\n\t\t\t);\n\t\t}\n\t}\n\n\t// --- Validation (applies to successfully parsedObject from either attempt) ---\n\tif (\n\t\t!parsedObject ||\n\t\ttypeof parsedObject !== 'object' ||\n\t\t!Array.isArray(parsedObject.subtasks)\n\t) {\n\t\tlogger.error(\n\t\t\t`Final parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`\n\t\t);\n\t\tthrow new Error(\n\t\t\t'Parsed AI response is not a valid object containing a \"subtasks\" array after all attempts.'\n\t\t);\n\t}\n\tconst parsedSubtasks = parsedObject.subtasks;\n\n\tif (expectedCount && parsedSubtasks.length !== expectedCount) {\n\t\tlogger.warn(\n\t\t\t`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`\n\t\t);\n\t}\n\n\tlet currentId = startId;\n\tconst validatedSubtasks = [];\n\tconst validationErrors = [];\n\n\tfor (const rawSubtask of parsedSubtasks) {\n\t\tconst correctedSubtask = {\n\t\t\t...rawSubtask,\n\t\t\tid: currentId,\n\t\t\tdependencies: Array.isArray(rawSubtask.dependencies)\n\t\t\t\t? rawSubtask.dependencies.filter(\n\t\t\t\t\t\t(dep) =>\n\t\t\t\t\t\t\ttypeof dep === 'string' && dep.startsWith(`${parentTaskId}.`)\n\t\t\t\t\t)\n\t\t\t\t: [],\n\t\t\tstatus: 'pending'\n\t\t};\n\n\t\tconst result = subtaskSchema.safeParse(correctedSubtask);\n\n\t\tif (result.success) {\n\t\t\tvalidatedSubtasks.push(result.data);\n\t\t} else {\n\t\t\tlogger.warn(\n\t\t\t\t`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`\n\t\t\t);\n\t\t\tresult.error.errors.forEach((err) => {\n\t\t\t\tconst errorMessage = ` - Field '${err.path.join('.')}': ${err.message}`;\n\t\t\t\tlogger.warn(errorMessage);\n\t\t\t\tvalidationErrors.push(`Subtask ${currentId}: ${errorMessage}`);\n\t\t\t});\n\t\t}\n\t\tcurrentId++;\n\t}\n\n\tif (validationErrors.length > 0) {\n\t\tlogger.error(\n\t\t\t`Found ${validationErrors.length} validation errors in the generated subtasks.`\n\t\t);\n\t\tlogger.warn('Proceeding with only the successfully validated subtasks.');\n\t}\n\n\tif (validatedSubtasks.length === 0 && parsedSubtasks.length > 0) {\n\t\tthrow new Error(\n\t\t\t'AI response contained potential subtasks, but none passed validation.'\n\t\t);\n\t}\n\treturn validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);\n}\n\n/**\n * Expand a task into subtasks using the unified AI service (generateTextService).\n * Appends new subtasks by default. Replaces existing subtasks if force=true.\n * Integrates complexity report to determine subtask count and prompt if available,\n * unless numSubtasks is explicitly provided.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} taskId - Task ID to expand\n * @param {number | null | undefined} [numSubtasks] - Optional: Explicit target number of subtasks. If null/undefined, check complexity report or config default.\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {string} [additionalContext=''] - Optional additional context.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @param {boolean} [force=false] - If true, replace existing subtasks; otherwise, append.\n * @returns {Promise<Object>} The updated parent task object with new subtasks.\n * @throws {Error} If task not found, AI service fails, or parsing fails.\n */\nasync function expandTask(\n\ttasksPath,\n\ttaskId,\n\tnumSubtasks,\n\tuseResearch = false,\n\tadditionalContext = '',\n\tcontext = {},\n\tforce = false\n) {\n\tconst {\n\t\tsession,\n\t\tmcpLog,\n\t\tprojectRoot: contextProjectRoot,\n\t\ttag,\n\t\tcomplexityReportPath\n\t} = context;\n\tconst outputFormat = mcpLog ? 'json' : 'text';\n\n\t// Determine projectRoot: Use from context if available, otherwise derive from tasksPath\n\tconst projectRoot = contextProjectRoot || findProjectRoot(tasksPath);\n\n\t// Use mcpLog if available, otherwise use the default console log wrapper\n\tconst logger = mcpLog || {\n\t\tinfo: (msg) => !isSilentMode() && log('info', msg),\n\t\twarn: (msg) => !isSilentMode() && log('warn', msg),\n\t\terror: (msg) => !isSilentMode() && log('error', msg),\n\t\tdebug: (msg) =>\n\t\t\t!isSilentMode() && getDebugFlag(session) && log('debug', msg) // Use getDebugFlag\n\t};\n\n\tif (mcpLog) {\n\t\tlogger.info(`expandTask called with context: session=${!!session}`);\n\t}\n\n\ttry {\n\t\t// --- Task Loading/Filtering (Unchanged) ---\n\t\tlogger.info(`Reading tasks from ${tasksPath}`);\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks)\n\t\t\tthrow new Error(`Invalid tasks data in ${tasksPath}`);\n\t\tconst taskIndex = data.tasks.findIndex(\n\t\t\t(t) => t.id === parseInt(taskId, 10)\n\t\t);\n\t\tif (taskIndex === -1) throw new Error(`Task ${taskId} not found`);\n\t\tconst task = data.tasks[taskIndex];\n\t\tlogger.info(\n\t\t\t`Expanding task ${taskId}: ${task.title}${useResearch ? ' with research' : ''}`\n\t\t);\n\t\t// --- End Task Loading/Filtering ---\n\n\t\t// --- Handle Force Flag: Clear existing subtasks if force=true ---\n\t\tif (force && Array.isArray(task.subtasks) && task.subtasks.length > 0) {\n\t\t\tlogger.info(\n\t\t\t\t`Force flag set. Clearing existing ${task.subtasks.length} subtasks for task ${taskId}.`\n\t\t\t);\n\t\t\ttask.subtasks = []; // Clear existing subtasks\n\t\t}\n\t\t// --- End Force Flag Handling ---\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'expand-task');\n\t\t\tconst searchQuery = `${task.title} ${task.description}`;\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([taskId.toString(), ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\tlogger.warn(`Could not gather context: ${contextError.message}`);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\t// --- Complexity Report Integration ---\n\t\tlet finalSubtaskCount;\n\t\tlet complexityReasoningContext = '';\n\t\tlet taskAnalysis = null;\n\n\t\tlogger.info(\n\t\t\t`Looking for complexity report at: ${complexityReportPath}${tag !== 'master' ? ` (tag-specific for '${tag}')` : ''}`\n\t\t);\n\n\t\ttry {\n\t\t\tif (fs.existsSync(complexityReportPath)) {\n\t\t\t\tconst complexityReport = readJSON(complexityReportPath);\n\t\t\t\ttaskAnalysis = complexityReport?.complexityAnalysis?.find(\n\t\t\t\t\t(a) => a.taskId === task.id\n\t\t\t\t);\n\t\t\t\tif (taskAnalysis) {\n\t\t\t\t\tlogger.info(\n\t\t\t\t\t\t`Found complexity analysis for task ${task.id}: Score ${taskAnalysis.complexityScore}`\n\t\t\t\t\t);\n\t\t\t\t\tif (taskAnalysis.reasoning) {\n\t\t\t\t\t\tcomplexityReasoningContext = `\\nComplexity Analysis Reasoning: ${taskAnalysis.reasoning}`;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlogger.info(\n\t\t\t\t\t\t`No complexity analysis found for task ${task.id} in report.`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlogger.info(\n\t\t\t\t\t`Complexity report not found at ${complexityReportPath}. Skipping complexity check.`\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (reportError) {\n\t\t\tlogger.warn(\n\t\t\t\t`Could not read or parse complexity report: ${reportError.message}. Proceeding without it.`\n\t\t\t);\n\t\t}\n\n\t\t// Determine final subtask count\n\t\tconst explicitNumSubtasks = parseInt(numSubtasks, 10);\n\t\tif (!Number.isNaN(explicitNumSubtasks) && explicitNumSubtasks >= 0) {\n\t\t\tfinalSubtaskCount = explicitNumSubtasks;\n\t\t\tlogger.info(\n\t\t\t\t`Using explicitly provided subtask count: ${finalSubtaskCount}`\n\t\t\t);\n\t\t} else if (taskAnalysis?.recommendedSubtasks) {\n\t\t\tfinalSubtaskCount = parseInt(taskAnalysis.recommendedSubtasks, 10);\n\t\t\tlogger.info(\n\t\t\t\t`Using subtask count from complexity report: ${finalSubtaskCount}`\n\t\t\t);\n\t\t} else {\n\t\t\tfinalSubtaskCount = getDefaultSubtasks(session);\n\t\t\tlogger.info(`Using default number of subtasks: ${finalSubtaskCount}`);\n\t\t}\n\t\tif (Number.isNaN(finalSubtaskCount) || finalSubtaskCount < 0) {\n\t\t\tlogger.warn(\n\t\t\t\t`Invalid subtask count determined (${finalSubtaskCount}), defaulting to 3.`\n\t\t\t);\n\t\t\tfinalSubtaskCount = 3;\n\t\t}\n\n\t\t// Determine prompt content AND system prompt\n\t\tconst nextSubtaskId = (task.subtasks?.length || 0) + 1;\n\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\t// Combine all context sources into a single additionalContext parameter\n\t\tlet combinedAdditionalContext = '';\n\t\tif (additionalContext || complexityReasoningContext) {\n\t\t\tcombinedAdditionalContext =\n\t\t\t\t`\\n\\n${additionalContext}${complexityReasoningContext}`.trim();\n\t\t}\n\t\tif (gatheredContext) {\n\t\t\tcombinedAdditionalContext =\n\t\t\t\t`${combinedAdditionalContext}\\n\\n# Project Context\\n\\n${gatheredContext}`.trim();\n\t\t}\n\n\t\t// Ensure expansionPrompt is a string (handle both string and object formats)\n\t\tlet expansionPromptText = undefined;\n\t\tif (taskAnalysis?.expansionPrompt) {\n\t\t\tif (typeof taskAnalysis.expansionPrompt === 'string') {\n\t\t\t\texpansionPromptText = taskAnalysis.expansionPrompt;\n\t\t\t} else if (\n\t\t\t\ttypeof taskAnalysis.expansionPrompt === 'object' &&\n\t\t\t\ttaskAnalysis.expansionPrompt.text\n\t\t\t) {\n\t\t\t\texpansionPromptText = taskAnalysis.expansionPrompt.text;\n\t\t\t}\n\t\t}\n\n\t\t// Ensure gatheredContext is a string (handle both string and object formats)\n\t\tlet gatheredContextText = gatheredContext;\n\t\tif (typeof gatheredContext === 'object' && gatheredContext !== null) {\n\t\t\tif (gatheredContext.data) {\n\t\t\t\tgatheredContextText = gatheredContext.data;\n\t\t\t} else if (gatheredContext.text) {\n\t\t\t\tgatheredContextText = gatheredContext.text;\n\t\t\t} else {\n\t\t\t\tgatheredContextText = JSON.stringify(gatheredContext);\n\t\t\t}\n\t\t}\n\n\t\tconst promptParams = {\n\t\t\ttask: task,\n\t\t\tsubtaskCount: finalSubtaskCount,\n\t\t\tnextSubtaskId: nextSubtaskId,\n\t\t\tadditionalContext: additionalContext,\n\t\t\tcomplexityReasoningContext: complexityReasoningContext,\n\t\t\tgatheredContext: gatheredContextText || '',\n\t\t\tuseResearch: useResearch,\n\t\t\texpansionPrompt: expansionPromptText || undefined\n\t\t};\n\n\t\tlet variantKey = 'default';\n\t\tif (expansionPromptText) {\n\t\t\tvariantKey = 'complexity-report';\n\t\t\tlogger.info(\n\t\t\t\t`Using expansion prompt from complexity report for task ${task.id}.`\n\t\t\t);\n\t\t} else if (useResearch) {\n\t\t\tvariantKey = 'research';\n\t\t\tlogger.info(`Using research variant for task ${task.id}.`);\n\t\t} else {\n\t\t\tlogger.info(`Using standard prompt generation for task ${task.id}.`);\n\t\t}\n\n\t\tconst { systemPrompt, userPrompt: promptContent } =\n\t\t\tawait promptManager.loadPrompt('expand-task', promptParams, variantKey);\n\t\t// --- End Complexity Report / Prompt Logic ---\n\n\t\t// --- AI Subtask Generation using generateTextService ---\n\t\tlet generatedSubtasks = [];\n\t\tlet loadingIndicator = null;\n\t\tif (outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\t`Generating ${finalSubtaskCount || 'appropriate number of'} subtasks...\\n`\n\t\t\t);\n\t\t}\n\n\t\tlet responseText = '';\n\t\tlet aiServiceResponse = null;\n\n\t\ttry {\n\t\t\tconst role = useResearch ? 'research' : 'main';\n\n\t\t\t// Call generateTextService with the determined prompts and telemetry params\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\tprompt: promptContent,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\trole,\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tcommandName: 'expand-task',\n\t\t\t\toutputType: outputFormat\n\t\t\t});\n\t\t\tresponseText = aiServiceResponse.mainResult;\n\n\t\t\t// Parse Subtasks\n\t\t\tgeneratedSubtasks = parseSubtasksFromText(\n\t\t\t\tresponseText,\n\t\t\t\tnextSubtaskId,\n\t\t\t\tfinalSubtaskCount,\n\t\t\t\ttask.id,\n\t\t\t\tlogger\n\t\t\t);\n\t\t\tlogger.info(\n\t\t\t\t`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\tlogger.error(\n\t\t\t\t`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context\n\t\t\t\t'error'\n\t\t\t);\n\t\t\t// Log raw response in debug mode if parsing failed\n\t\t\tif (\n\t\t\t\terror.message.includes('Failed to parse valid subtasks') &&\n\t\t\t\tgetDebugFlag(session)\n\t\t\t) {\n\t\t\t\tlogger.error(`Raw AI Response that failed parsing:\\n${responseText}`);\n\t\t\t}\n\t\t\tthrow error;\n\t\t} finally {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t}\n\n\t\t// --- Task Update & File Writing ---\n\t\t// Ensure task.subtasks is an array before appending\n\t\tif (!Array.isArray(task.subtasks)) {\n\t\t\ttask.subtasks = [];\n\t\t}\n\t\t// Append the newly generated and validated subtasks\n\t\ttask.subtasks.push(...generatedSubtasks);\n\t\t// --- End Change: Append instead of replace ---\n\n\t\tdata.tasks[taskIndex] = task; // Assign the modified task back\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\n\t\t// Display AI Usage Summary for CLI\n\t\tif (\n\t\t\toutputFormat === 'text' &&\n\t\t\taiServiceResponse &&\n\t\t\taiServiceResponse.telemetryData\n\t\t) {\n\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t}\n\n\t\t// Return the updated task object AND telemetry data\n\t\treturn {\n\t\t\ttask,\n\t\t\ttelemetryData: aiServiceResponse?.telemetryData,\n\t\t\ttagInfo: aiServiceResponse?.tagInfo\n\t\t};\n\t} catch (error) {\n\t\t// Catches errors from file reading, parsing, AI call etc.\n\t\tlogger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.error(error); // Log full stack in debug CLI mode\n\t\t}\n\t\tthrow error; // Re-throw for the caller\n\t}\n}\n\nexport default expandTask;\n"], ["/claude-task-master/scripts/modules/task-manager/expand-all-tasks.js", "import { log, readJSON, isSilentMode, findProjectRoot } from '../utils.js';\nimport {\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\nimport expandTask from './expand-task.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport { aggregateTelemetry } from '../utils.js';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\n\n/**\n * Expand all eligible pending or in-progress tasks using the expandTask function.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} [numSubtasks] - Optional: Target number of subtasks per task.\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {string} [additionalContext=''] - Optional additional context.\n * @param {boolean} [force=false] - Force expansion even if tasks already have subtasks.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [context.complexityReportPath] - Path to the complexity report file\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json'). MCP calls should use 'json'.\n * @returns {Promise<{success: boolean, expandedCount: number, failedCount: number, skippedCount: number, tasksToExpand: number, telemetryData: Array<Object>}>} - Result summary.\n */\nasync function expandAllTasks(\n\ttasksPath,\n\tnumSubtasks, // Keep this signature, expandTask handles defaults\n\tuseResearch = false,\n\tadditionalContext = '',\n\tforce = false, // Keep force here for the filter logic\n\tcontext = {},\n\toutputFormat = 'text' // Assume text default for CLI\n) {\n\tconst {\n\t\tsession,\n\t\tmcpLog,\n\t\tprojectRoot: providedProjectRoot,\n\t\ttag,\n\t\tcomplexityReportPath\n\t} = context;\n\tconst isMCPCall = !!mcpLog; // Determine if called from MCP\n\n\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\tif (!projectRoot) {\n\t\tthrow new Error('Could not determine project root directory');\n\t}\n\n\t// Use mcpLog if available, otherwise use the default console log wrapper respecting silent mode\n\tconst logger =\n\t\tmcpLog ||\n\t\t(outputFormat === 'json'\n\t\t\t? {\n\t\t\t\t\t// Basic logger for JSON output mode\n\t\t\t\t\tinfo: (msg) => {},\n\t\t\t\t\twarn: (msg) => {},\n\t\t\t\t\terror: (msg) => console.error(`ERROR: ${msg}`), // Still log errors\n\t\t\t\t\tdebug: (msg) => {}\n\t\t\t\t}\n\t\t\t: {\n\t\t\t\t\t// CLI logger respecting silent mode\n\t\t\t\t\tinfo: (msg) => !isSilentMode() && log('info', msg),\n\t\t\t\t\twarn: (msg) => !isSilentMode() && log('warn', msg),\n\t\t\t\t\terror: (msg) => !isSilentMode() && log('error', msg),\n\t\t\t\t\tdebug: (msg) =>\n\t\t\t\t\t\t!isSilentMode() && getDebugFlag(session) && log('debug', msg)\n\t\t\t\t});\n\n\tlet loadingIndicator = null;\n\tlet expandedCount = 0;\n\tlet failedCount = 0;\n\tlet tasksToExpandCount = 0;\n\tconst allTelemetryData = []; // Still collect individual data first\n\n\tif (!isMCPCall && outputFormat === 'text') {\n\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t'Analyzing tasks for expansion...'\n\t\t);\n\t}\n\n\ttry {\n\t\tlogger.info(`Reading tasks from ${tasksPath}`);\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`Invalid tasks data in ${tasksPath}`);\n\t\t}\n\n\t\t// --- Restore Original Filtering Logic ---\n\t\tconst tasksToExpand = data.tasks.filter(\n\t\t\t(task) =>\n\t\t\t\t(task.status === 'pending' || task.status === 'in-progress') && // Include 'in-progress'\n\t\t\t\t(!task.subtasks || task.subtasks.length === 0 || force) // Check subtasks/force here\n\t\t);\n\t\ttasksToExpandCount = tasksToExpand.length; // Get the count from the filtered array\n\t\tlogger.info(`Found ${tasksToExpandCount} tasks eligible for expansion.`);\n\t\t// --- End Restored Filtering Logic ---\n\n\t\tif (loadingIndicator) {\n\t\t\tstopLoadingIndicator(loadingIndicator, 'Analysis complete.');\n\t\t}\n\n\t\tif (tasksToExpandCount === 0) {\n\t\t\tlogger.info('No tasks eligible for expansion.');\n\t\t\t// --- Fix: Restore success: true and add message ---\n\t\t\treturn {\n\t\t\t\tsuccess: true, // Indicate overall success despite no action\n\t\t\t\texpandedCount: 0,\n\t\t\t\tfailedCount: 0,\n\t\t\t\tskippedCount: 0,\n\t\t\t\ttasksToExpand: 0,\n\t\t\t\ttelemetryData: allTelemetryData,\n\t\t\t\tmessage: 'No tasks eligible for expansion.'\n\t\t\t};\n\t\t\t// --- End Fix ---\n\t\t}\n\n\t\t// Iterate over the already filtered tasks\n\t\tfor (const task of tasksToExpand) {\n\t\t\t// Start indicator for individual task expansion in CLI mode\n\t\t\tlet taskIndicator = null;\n\t\t\tif (!isMCPCall && outputFormat === 'text') {\n\t\t\t\ttaskIndicator = startLoadingIndicator(`Expanding task ${task.id}...`);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Call the refactored expandTask function AND capture result\n\t\t\t\tconst result = await expandTask(\n\t\t\t\t\ttasksPath,\n\t\t\t\t\ttask.id,\n\t\t\t\t\tnumSubtasks,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\tadditionalContext,\n\t\t\t\t\t{\n\t\t\t\t\t\t...context,\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\ttag: data.tag || tag,\n\t\t\t\t\t\tcomplexityReportPath\n\t\t\t\t\t}, // Pass the whole context object with projectRoot and resolved tag\n\t\t\t\t\tforce\n\t\t\t\t);\n\t\t\t\texpandedCount++;\n\n\t\t\t\t// Collect individual telemetry data\n\t\t\t\tif (result && result.telemetryData) {\n\t\t\t\t\tallTelemetryData.push(result.telemetryData);\n\t\t\t\t}\n\n\t\t\t\tif (taskIndicator) {\n\t\t\t\t\tstopLoadingIndicator(taskIndicator, `Task ${task.id} expanded.`);\n\t\t\t\t}\n\t\t\t\tlogger.info(`Successfully expanded task ${task.id}.`);\n\t\t\t} catch (error) {\n\t\t\t\tfailedCount++;\n\t\t\t\tif (taskIndicator) {\n\t\t\t\t\tstopLoadingIndicator(\n\t\t\t\t\t\ttaskIndicator,\n\t\t\t\t\t\t`Failed to expand task ${task.id}.`,\n\t\t\t\t\t\tfalse\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tlogger.error(`Failed to expand task ${task.id}: ${error.message}`);\n\t\t\t\t// Continue to the next task\n\t\t\t}\n\t\t}\n\n\t\t// --- AGGREGATION AND DISPLAY ---\n\t\tlogger.info(\n\t\t\t`Expansion complete: ${expandedCount} expanded, ${failedCount} failed.`\n\t\t);\n\n\t\t// Aggregate the collected telemetry data\n\t\tconst aggregatedTelemetryData = aggregateTelemetry(\n\t\t\tallTelemetryData,\n\t\t\t'expand-all-tasks'\n\t\t);\n\n\t\tif (outputFormat === 'text') {\n\t\t\tconst summaryContent =\n\t\t\t\t`${chalk.white.bold('Expansion Summary:')}\\n\\n` +\n\t\t\t\t`${chalk.cyan('-')} Attempted: ${chalk.bold(tasksToExpandCount)}\\n` +\n\t\t\t\t`${chalk.green('-')} Expanded: ${chalk.bold(expandedCount)}\\n` +\n\t\t\t\t// Skipped count is always 0 now due to pre-filtering\n\t\t\t\t`${chalk.gray('-')} Skipped: ${chalk.bold(0)}\\n` +\n\t\t\t\t`${chalk.red('-')} Failed: ${chalk.bold(failedCount)}`;\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(summaryContent, {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tmargin: { top: 1 },\n\t\t\t\t\tborderColor: failedCount > 0 ? 'red' : 'green', // Red if failures, green otherwise\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\n\t\tif (outputFormat === 'text' && aggregatedTelemetryData) {\n\t\t\tdisplayAiUsageSummary(aggregatedTelemetryData, 'cli');\n\t\t}\n\n\t\t// Return summary including the AGGREGATED telemetry data\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\texpandedCount,\n\t\t\tfailedCount,\n\t\t\tskippedCount: 0,\n\t\t\ttasksToExpand: tasksToExpandCount,\n\t\t\ttelemetryData: aggregatedTelemetryData\n\t\t};\n\t} catch (error) {\n\t\tif (loadingIndicator)\n\t\t\tstopLoadingIndicator(loadingIndicator, 'Error.', false);\n\t\tlogger.error(`Error during expand all operation: ${error.message}`);\n\t\tif (!isMCPCall && getDebugFlag(session)) {\n\t\t\tconsole.error(error); // Log full stack in debug CLI mode\n\t\t}\n\t\t// Re-throw error for the caller to handle, the direct function will format it\n\t\tthrow error; // Let direct function wrapper handle formatting\n\t\t/* Original re-throw:\n\t\tthrow new Error(`Failed to expand all tasks: ${error.message}`);\n\t\t*/\n\t}\n}\n\nexport default expandAllTasks;\n"], ["/claude-task-master/scripts/modules/task-manager/clear-subtasks.js", "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';\nimport { displayBanner } from '../ui.js';\n\n/**\n * Clear subtasks from specified tasks\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} taskIds - Task IDs to clear subtasks from\n * @param {Object} context - Context object containing projectRoot and tag\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nfunction clearSubtasks(tasksPath, taskIds, context = {}) {\n\tconst { projectRoot, tag } = context;\n\tlog('info', `Reading tasks from ${tasksPath}...`);\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\tif (!isSilentMode()) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Clearing Subtasks'), {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t})\n\t\t);\n\t}\n\n\t// Handle multiple task IDs (comma-separated)\n\tconst taskIdArray = taskIds.split(',').map((id) => id.trim());\n\tlet clearedCount = 0;\n\n\t// Create a summary table for the cleared subtasks\n\tconst summaryTable = new Table({\n\t\thead: [\n\t\t\tchalk.cyan.bold('Task ID'),\n\t\t\tchalk.cyan.bold('Task Title'),\n\t\t\tchalk.cyan.bold('Subtasks Cleared')\n\t\t],\n\t\tcolWidths: [10, 50, 20],\n\t\tstyle: { head: [], border: [] }\n\t});\n\n\ttaskIdArray.forEach((taskId) => {\n\t\tconst id = parseInt(taskId, 10);\n\t\tif (Number.isNaN(id)) {\n\t\t\tlog('error', `Invalid task ID: ${taskId}`);\n\t\t\treturn;\n\t\t}\n\n\t\tconst task = data.tasks.find((t) => t.id === id);\n\t\tif (!task) {\n\t\t\tlog('error', `Task ${id} not found`);\n\t\t\treturn;\n\t\t}\n\n\t\tif (!task.subtasks || task.subtasks.length === 0) {\n\t\t\tlog('info', `Task ${id} has no subtasks to clear`);\n\t\t\tsummaryTable.push([\n\t\t\t\tid.toString(),\n\t\t\t\ttruncate(task.title, 47),\n\t\t\t\tchalk.yellow('No subtasks')\n\t\t\t]);\n\t\t\treturn;\n\t\t}\n\n\t\tconst subtaskCount = task.subtasks.length;\n\t\ttask.subtasks = [];\n\t\tclearedCount++;\n\t\tlog('info', `Cleared ${subtaskCount} subtasks from task ${id}`);\n\n\t\tsummaryTable.push([\n\t\t\tid.toString(),\n\t\t\ttruncate(task.title, 47),\n\t\t\tchalk.green(`${subtaskCount} subtasks cleared`)\n\t\t]);\n\t});\n\n\tif (clearedCount > 0) {\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Show summary table\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold('Subtask Clearing Summary:'), {\n\t\t\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t})\n\t\t\t);\n\t\t\tconsole.log(summaryTable.toString());\n\t\t}\n\n\t\t// Success message\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Successfully cleared subtasks from ${chalk.bold(clearedCount)} task(s)`\n\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// Next steps suggestion\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master expand --id=<id>')} to generate new subtasks\\n` +\n\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master list --with-subtasks')} to verify changes`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\t} else {\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.yellow('No subtasks were cleared'), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\t}\n}\n\nexport default clearSubtasks;\n"], ["/claude-task-master/mcp-server/src/tools/utils.js", "/**\n * tools/utils.js\n * Utility functions for Task Master CLI integration\n */\n\nimport { spawnSync } from 'child_process';\nimport path from 'path';\nimport fs from 'fs';\nimport { contextManager } from '../core/context-manager.js'; // Import the singleton\nimport { fileURLToPath } from 'url';\nimport { getCurrentTag } from '../../../scripts/modules/utils.js';\n\n// Import path utilities to ensure consistent path resolution\nimport {\n\tlastFoundProjectRoot,\n\tPROJECT_MARKERS\n} from '../core/utils/path-utils.js';\n\nconst __filename = fileURLToPath(import.meta.url);\n\n// Cache for version info to avoid repeated file reads\nlet cachedVersionInfo = null;\n\n/**\n * Get version information from package.json\n * @returns {Object} Version information\n */\nfunction getVersionInfo() {\n\t// Return cached version if available\n\tif (cachedVersionInfo) {\n\t\treturn cachedVersionInfo;\n\t}\n\n\ttry {\n\t\t// Navigate to the project root from the tools directory\n\t\tconst packageJsonPath = path.join(\n\t\t\tpath.dirname(__filename),\n\t\t\t'../../../package.json'\n\t\t);\n\t\tif (fs.existsSync(packageJsonPath)) {\n\t\t\tconst packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf-8'));\n\t\t\tcachedVersionInfo = {\n\t\t\t\tversion: packageJson.version,\n\t\t\t\tname: packageJson.name\n\t\t\t};\n\t\t\treturn cachedVersionInfo;\n\t\t}\n\t\tcachedVersionInfo = {\n\t\t\tversion: 'unknown',\n\t\t\tname: 'task-master-ai'\n\t\t};\n\t\treturn cachedVersionInfo;\n\t} catch (error) {\n\t\t// Fallback version info if package.json can't be read\n\t\tcachedVersionInfo = {\n\t\t\tversion: 'unknown',\n\t\t\tname: 'task-master-ai'\n\t\t};\n\t\treturn cachedVersionInfo;\n\t}\n}\n\n/**\n * Get current tag information for MCP responses\n * @param {string} projectRoot - The project root directory\n * @param {Object} log - Logger object\n * @returns {Object} Tag information object\n */\nfunction getTagInfo(projectRoot, log) {\n\ttry {\n\t\tif (!projectRoot) {\n\t\t\tlog.warn('No project root provided for tag information');\n\t\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t\t}\n\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\n\t\t// Read available tags from tasks.json\n\t\tlet availableTags = ['master']; // Default fallback\n\t\ttry {\n\t\t\tconst tasksJsonPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster',\n\t\t\t\t'tasks',\n\t\t\t\t'tasks.json'\n\t\t\t);\n\t\t\tif (fs.existsSync(tasksJsonPath)) {\n\t\t\t\tconst tasksData = JSON.parse(fs.readFileSync(tasksJsonPath, 'utf-8'));\n\n\t\t\t\t// If it's the new tagged format, extract tag keys\n\t\t\t\tif (\n\t\t\t\t\ttasksData &&\n\t\t\t\t\ttypeof tasksData === 'object' &&\n\t\t\t\t\t!Array.isArray(tasksData.tasks)\n\t\t\t\t) {\n\t\t\t\t\tconst tagKeys = Object.keys(tasksData).filter(\n\t\t\t\t\t\t(key) =>\n\t\t\t\t\t\t\ttasksData[key] &&\n\t\t\t\t\t\t\ttypeof tasksData[key] === 'object' &&\n\t\t\t\t\t\t\tArray.isArray(tasksData[key].tasks)\n\t\t\t\t\t);\n\t\t\t\t\tif (tagKeys.length > 0) {\n\t\t\t\t\t\tavailableTags = tagKeys;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (tagError) {\n\t\t\tlog.debug(`Could not read available tags: ${tagError.message}`);\n\t\t}\n\n\t\treturn {\n\t\t\tcurrentTag: currentTag || 'master',\n\t\t\tavailableTags: availableTags\n\t\t};\n\t} catch (error) {\n\t\tlog.warn(`Error getting tag information: ${error.message}`);\n\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t}\n}\n\n/**\n * Get normalized project root path\n * @param {string|undefined} projectRootRaw - Raw project root from arguments\n * @param {Object} log - Logger object\n * @returns {string} - Normalized absolute path to project root\n */\nfunction getProjectRoot(projectRootRaw, log) {\n\t// PRECEDENCE ORDER:\n\t// 1. Environment variable override (TASK_MASTER_PROJECT_ROOT)\n\t// 2. Explicitly provided projectRoot in args\n\t// 3. Previously found/cached project root\n\t// 4. Current directory if it has project markers\n\t// 5. Current directory with warning\n\n\t// 1. Check for environment variable override\n\tif (process.env.TASK_MASTER_PROJECT_ROOT) {\n\t\tconst envRoot = process.env.TASK_MASTER_PROJECT_ROOT;\n\t\tconst absolutePath = path.isAbsolute(envRoot)\n\t\t\t? envRoot\n\t\t\t: path.resolve(process.cwd(), envRoot);\n\t\tlog.info(\n\t\t\t`Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}`\n\t\t);\n\t\treturn absolutePath;\n\t}\n\n\t// 2. If project root is explicitly provided, use it\n\tif (projectRootRaw) {\n\t\tconst absolutePath = path.isAbsolute(projectRootRaw)\n\t\t\t? projectRootRaw\n\t\t\t: path.resolve(process.cwd(), projectRootRaw);\n\n\t\tlog.info(`Using explicitly provided project root: ${absolutePath}`);\n\t\treturn absolutePath;\n\t}\n\n\t// 3. If we have a last found project root from a tasks.json search, use that for consistency\n\tif (lastFoundProjectRoot) {\n\t\tlog.info(\n\t\t\t`Using last known project root where tasks.json was found: ${lastFoundProjectRoot}`\n\t\t);\n\t\treturn lastFoundProjectRoot;\n\t}\n\n\t// 4. Check if the current directory has any indicators of being a task-master project\n\tconst currentDir = process.cwd();\n\tif (\n\t\tPROJECT_MARKERS.some((marker) => {\n\t\t\tconst markerPath = path.join(currentDir, marker);\n\t\t\treturn fs.existsSync(markerPath);\n\t\t})\n\t) {\n\t\tlog.info(\n\t\t\t`Using current directory as project root (found project markers): ${currentDir}`\n\t\t);\n\t\treturn currentDir;\n\t}\n\n\t// 5. Default to current working directory but warn the user\n\tlog.warn(\n\t\t`No task-master project detected in current directory. Using ${currentDir} as project root.`\n\t);\n\tlog.warn(\n\t\t'Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.'\n\t);\n\treturn currentDir;\n}\n\n/**\n * Extracts and normalizes the project root path from the MCP session object.\n * @param {Object} session - The MCP session object.\n * @param {Object} log - The MCP logger object.\n * @returns {string|null} - The normalized absolute project root path or null if not found/invalid.\n */\nfunction getProjectRootFromSession(session, log) {\n\ttry {\n\t\t// Add detailed logging of session structure\n\t\tlog.info(\n\t\t\t`Session object: ${JSON.stringify({\n\t\t\t\thasSession: !!session,\n\t\t\t\thasRoots: !!session?.roots,\n\t\t\t\trootsType: typeof session?.roots,\n\t\t\t\tisRootsArray: Array.isArray(session?.roots),\n\t\t\t\trootsLength: session?.roots?.length,\n\t\t\t\tfirstRoot: session?.roots?.[0],\n\t\t\t\thasRootsRoots: !!session?.roots?.roots,\n\t\t\t\trootsRootsType: typeof session?.roots?.roots,\n\t\t\t\tisRootsRootsArray: Array.isArray(session?.roots?.roots),\n\t\t\t\trootsRootsLength: session?.roots?.roots?.length,\n\t\t\t\tfirstRootsRoot: session?.roots?.roots?.[0]\n\t\t\t})}`\n\t\t);\n\n\t\tlet rawRootPath = null;\n\t\tlet decodedPath = null;\n\t\tlet finalPath = null;\n\n\t\t// Check primary location\n\t\tif (session?.roots?.[0]?.uri) {\n\t\t\trawRootPath = session.roots[0].uri;\n\t\t\tlog.info(`Found raw root URI in session.roots[0].uri: ${rawRootPath}`);\n\t\t}\n\t\t// Check alternate location\n\t\telse if (session?.roots?.roots?.[0]?.uri) {\n\t\t\trawRootPath = session.roots.roots[0].uri;\n\t\t\tlog.info(\n\t\t\t\t`Found raw root URI in session.roots.roots[0].uri: ${rawRootPath}`\n\t\t\t);\n\t\t}\n\n\t\tif (rawRootPath) {\n\t\t\t// Decode URI and strip file:// protocol\n\t\t\tdecodedPath = rawRootPath.startsWith('file://')\n\t\t\t\t? decodeURIComponent(rawRootPath.slice(7))\n\t\t\t\t: rawRootPath; // Assume non-file URI is already decoded? Or decode anyway? Let's decode.\n\t\t\tif (!rawRootPath.startsWith('file://')) {\n\t\t\t\tdecodedPath = decodeURIComponent(rawRootPath); // Decode even if no file://\n\t\t\t}\n\n\t\t\t// Handle potential Windows drive prefix after stripping protocol (e.g., /C:/...)\n\t\t\tif (\n\t\t\t\tdecodedPath.startsWith('/') &&\n\t\t\t\t/[A-Za-z]:/.test(decodedPath.substring(1, 3))\n\t\t\t) {\n\t\t\t\tdecodedPath = decodedPath.substring(1); // Remove leading slash if it's like /C:/...\n\t\t\t}\n\n\t\t\tlog.info(`Decoded path: ${decodedPath}`);\n\n\t\t\t// Normalize slashes and resolve\n\t\t\tconst normalizedSlashes = decodedPath.replace(/\\\\/g, '/');\n\t\t\tfinalPath = path.resolve(normalizedSlashes); // Resolve to absolute path for current OS\n\n\t\t\tlog.info(`Normalized and resolved session path: ${finalPath}`);\n\t\t\treturn finalPath;\n\t\t}\n\n\t\t// Fallback Logic (remains the same)\n\t\tlog.warn('No project root URI found in session. Attempting fallbacks...');\n\t\tconst cwd = process.cwd();\n\n\t\t// Fallback 1: Use server path deduction (Cursor IDE)\n\t\tconst serverPath = process.argv[1];\n\t\tif (serverPath && serverPath.includes('mcp-server')) {\n\t\t\tconst mcpServerIndex = serverPath.indexOf('mcp-server');\n\t\t\tif (mcpServerIndex !== -1) {\n\t\t\t\tconst projectRoot = path.dirname(\n\t\t\t\t\tserverPath.substring(0, mcpServerIndex)\n\t\t\t\t); // Go up one level\n\n\t\t\t\tif (\n\t\t\t\t\tfs.existsSync(path.join(projectRoot, '.cursor')) ||\n\t\t\t\t\tfs.existsSync(path.join(projectRoot, 'mcp-server')) ||\n\t\t\t\t\tfs.existsSync(path.join(projectRoot, 'package.json'))\n\t\t\t\t) {\n\t\t\t\t\tlog.info(\n\t\t\t\t\t\t`Using project root derived from server path: ${projectRoot}`\n\t\t\t\t\t);\n\t\t\t\t\treturn projectRoot; // Already absolute\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Fallback 2: Use CWD\n\t\tlog.info(`Using current working directory as ultimate fallback: ${cwd}`);\n\t\treturn cwd; // Already absolute\n\t} catch (e) {\n\t\tlog.error(`Error in getProjectRootFromSession: ${e.message}`);\n\t\t// Attempt final fallback to CWD on error\n\t\tconst cwd = process.cwd();\n\t\tlog.warn(\n\t\t\t`Returning CWD (${cwd}) due to error during session root processing.`\n\t\t);\n\t\treturn cwd;\n\t}\n}\n\n/**\n * Handle API result with standardized error handling and response formatting\n * @param {Object} result - Result object from API call with success, data, and error properties\n * @param {Object} log - Logger object\n * @param {string} errorPrefix - Prefix for error messages\n * @param {Function} processFunction - Optional function to process successful result data\n * @param {string} [projectRoot] - Optional project root for tag information\n * @returns {Object} - Standardized MCP response object\n */\nasync function handleApiResult(\n\tresult,\n\tlog,\n\terrorPrefix = 'API error',\n\tprocessFunction = processMCPResponseData,\n\tprojectRoot = null\n) {\n\t// Get version info for every response\n\tconst versionInfo = getVersionInfo();\n\n\t// Get tag info if project root is provided\n\tconst tagInfo = projectRoot ? getTagInfo(projectRoot, log) : null;\n\n\tif (!result.success) {\n\t\tconst errorMsg = result.error?.message || `Unknown ${errorPrefix}`;\n\t\tlog.error(`${errorPrefix}: ${errorMsg}`);\n\t\treturn createErrorResponse(errorMsg, versionInfo, tagInfo);\n\t}\n\n\t// Process the result data if needed\n\tconst processedData = processFunction\n\t\t? processFunction(result.data)\n\t\t: result.data;\n\n\tlog.info('Successfully completed operation');\n\n\t// Create the response payload including version info and tag info\n\tconst responsePayload = {\n\t\tdata: processedData,\n\t\tversion: versionInfo\n\t};\n\n\t// Add tag information if available\n\tif (tagInfo) {\n\t\tresponsePayload.tag = tagInfo;\n\t}\n\n\treturn createContentResponse(responsePayload);\n}\n\n/**\n * Executes a task-master CLI command synchronously.\n * @param {string} command - The command to execute (e.g., 'add-task')\n * @param {Object} log - Logger instance\n * @param {Array} args - Arguments for the command\n * @param {string|undefined} projectRootRaw - Optional raw project root path (will be normalized internally)\n * @param {Object|null} customEnv - Optional object containing environment variables to pass to the child process\n * @returns {Object} - The result of the command execution\n */\nfunction executeTaskMasterCommand(\n\tcommand,\n\tlog,\n\targs = [],\n\tprojectRootRaw = null,\n\tcustomEnv = null // Changed from session to customEnv\n) {\n\ttry {\n\t\t// Normalize project root internally using the getProjectRoot utility\n\t\tconst cwd = getProjectRoot(projectRootRaw, log);\n\n\t\tlog.info(\n\t\t\t`Executing task-master ${command} with args: ${JSON.stringify(\n\t\t\t\targs\n\t\t\t)} in directory: ${cwd}`\n\t\t);\n\n\t\t// Prepare full arguments array\n\t\tconst fullArgs = [command, ...args];\n\n\t\t// Common options for spawn\n\t\tconst spawnOptions = {\n\t\t\tencoding: 'utf8',\n\t\t\tcwd: cwd,\n\t\t\t// Merge process.env with customEnv, giving precedence to customEnv\n\t\t\tenv: { ...process.env, ...(customEnv || {}) }\n\t\t};\n\n\t\t// Log the environment being passed (optional, for debugging)\n\t\t// log.info(`Spawn options env: ${JSON.stringify(spawnOptions.env)}`);\n\n\t\t// Execute the command using the global task-master CLI or local script\n\t\t// Try the global CLI first\n\t\tlet result = spawnSync('task-master', fullArgs, spawnOptions);\n\n\t\t// If global CLI is not available, try fallback to the local script\n\t\tif (result.error && result.error.code === 'ENOENT') {\n\t\t\tlog.info('Global task-master not found, falling back to local script');\n\t\t\t// Pass the same spawnOptions (including env) to the fallback\n\t\t\tresult = spawnSync('node', ['scripts/dev.js', ...fullArgs], spawnOptions);\n\t\t}\n\n\t\tif (result.error) {\n\t\t\tthrow new Error(`Command execution error: ${result.error.message}`);\n\t\t}\n\n\t\tif (result.status !== 0) {\n\t\t\t// Improve error handling by combining stderr and stdout if stderr is empty\n\t\t\tconst errorOutput = result.stderr\n\t\t\t\t? result.stderr.trim()\n\t\t\t\t: result.stdout\n\t\t\t\t\t? result.stdout.trim()\n\t\t\t\t\t: 'Unknown error';\n\t\t\tthrow new Error(\n\t\t\t\t`Command failed with exit code ${result.status}: ${errorOutput}`\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tstdout: result.stdout,\n\t\t\tstderr: result.stderr\n\t\t};\n\t} catch (error) {\n\t\tlog.error(`Error executing task-master command: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: error.message\n\t\t};\n\t}\n}\n\n/**\n * Checks cache for a result using the provided key. If not found, executes the action function,\n * caches the result upon success, and returns the result.\n *\n * @param {Object} options - Configuration options.\n * @param {string} options.cacheKey - The unique key for caching this operation's result.\n * @param {Function} options.actionFn - The async function to execute if the cache misses.\n * Should return an object like { success: boolean, data?: any, error?: { code: string, message: string } }.\n * @param {Object} options.log - The logger instance.\n * @returns {Promise<Object>} - An object containing the result.\n * Format: { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nasync function getCachedOrExecute({ cacheKey, actionFn, log }) {\n\t// Check cache first\n\tconst cachedResult = contextManager.getCachedData(cacheKey);\n\n\tif (cachedResult !== undefined) {\n\t\tlog.info(`Cache hit for key: ${cacheKey}`);\n\t\treturn cachedResult;\n\t}\n\n\tlog.info(`Cache miss for key: ${cacheKey}. Executing action function.`);\n\n\t// Execute the action function if cache missed\n\tconst result = await actionFn();\n\n\t// If the action was successful, cache the result\n\tif (result.success && result.data !== undefined) {\n\t\tlog.info(`Action successful. Caching result for key: ${cacheKey}`);\n\t\tcontextManager.setCachedData(cacheKey, result);\n\t} else if (!result.success) {\n\t\tlog.warn(\n\t\t\t`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`\n\t\t);\n\t} else {\n\t\tlog.warn(\n\t\t\t`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`\n\t\t);\n\t}\n\n\treturn result;\n}\n\n/**\n * Recursively removes specified fields from task objects, whether single or in an array.\n * Handles common data structures returned by task commands.\n * @param {Object|Array} taskOrData - A single task object or a data object containing a 'tasks' array.\n * @param {string[]} fieldsToRemove - An array of field names to remove.\n * @returns {Object|Array} - The processed data with specified fields removed.\n */\nfunction processMCPResponseData(\n\ttaskOrData,\n\tfieldsToRemove = ['details', 'testStrategy']\n) {\n\tif (!taskOrData) {\n\t\treturn taskOrData;\n\t}\n\n\t// Helper function to process a single task object\n\tconst processSingleTask = (task) => {\n\t\tif (typeof task !== 'object' || task === null) {\n\t\t\treturn task;\n\t\t}\n\n\t\tconst processedTask = { ...task };\n\n\t\t// Remove specified fields from the task\n\t\tfieldsToRemove.forEach((field) => {\n\t\t\tdelete processedTask[field];\n\t\t});\n\n\t\t// Recursively process subtasks if they exist and are an array\n\t\tif (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {\n\t\t\t// Use processArrayOfTasks to handle the subtasks array\n\t\t\tprocessedTask.subtasks = processArrayOfTasks(processedTask.subtasks);\n\t\t}\n\n\t\treturn processedTask;\n\t};\n\n\t// Helper function to process an array of tasks\n\tconst processArrayOfTasks = (tasks) => {\n\t\treturn tasks.map(processSingleTask);\n\t};\n\n\t// Check if the input is a data structure containing a 'tasks' array (like from listTasks)\n\tif (\n\t\ttypeof taskOrData === 'object' &&\n\t\ttaskOrData !== null &&\n\t\tArray.isArray(taskOrData.tasks)\n\t) {\n\t\treturn {\n\t\t\t...taskOrData, // Keep other potential fields like 'stats', 'filter'\n\t\t\ttasks: processArrayOfTasks(taskOrData.tasks)\n\t\t};\n\t}\n\t// Check if the input is likely a single task object (add more checks if needed)\n\telse if (\n\t\ttypeof taskOrData === 'object' &&\n\t\ttaskOrData !== null &&\n\t\t'id' in taskOrData &&\n\t\t'title' in taskOrData\n\t) {\n\t\treturn processSingleTask(taskOrData);\n\t}\n\t// Check if the input is an array of tasks directly (less common but possible)\n\telse if (Array.isArray(taskOrData)) {\n\t\treturn processArrayOfTasks(taskOrData);\n\t}\n\n\t// If it doesn't match known task structures, return it as is\n\treturn taskOrData;\n}\n\n/**\n * Creates standard content response for tools\n * @param {string|Object} content - Content to include in response\n * @returns {Object} - Content response object in FastMCP format\n */\nfunction createContentResponse(content) {\n\t// FastMCP requires text type, so we format objects as JSON strings\n\treturn {\n\t\tcontent: [\n\t\t\t{\n\t\t\t\ttype: 'text',\n\t\t\t\ttext:\n\t\t\t\t\ttypeof content === 'object'\n\t\t\t\t\t\t? // Format JSON nicely with indentation\n\t\t\t\t\t\t\tJSON.stringify(content, null, 2)\n\t\t\t\t\t\t: // Keep other content types as-is\n\t\t\t\t\t\t\tString(content)\n\t\t\t}\n\t\t]\n\t};\n}\n\n/**\n * Creates error response for tools\n * @param {string} errorMessage - Error message to include in response\n * @param {Object} [versionInfo] - Optional version information object\n * @param {Object} [tagInfo] - Optional tag information object\n * @returns {Object} - Error content response object in FastMCP format\n */\nfunction createErrorResponse(errorMessage, versionInfo, tagInfo) {\n\t// Provide fallback version info if not provided\n\tif (!versionInfo) {\n\t\tversionInfo = getVersionInfo();\n\t}\n\n\tlet responseText = `Error: ${errorMessage}\nVersion: ${versionInfo.version}\nName: ${versionInfo.name}`;\n\n\t// Add tag information if available\n\tif (tagInfo) {\n\t\tresponseText += `\nCurrent Tag: ${tagInfo.currentTag}`;\n\t}\n\n\treturn {\n\t\tcontent: [\n\t\t\t{\n\t\t\t\ttype: 'text',\n\t\t\t\ttext: responseText\n\t\t\t}\n\t\t],\n\t\tisError: true\n\t};\n}\n\n/**\n * Creates a logger wrapper object compatible with core function expectations.\n * Adapts the MCP logger to the { info, warn, error, debug, success } structure.\n * @param {Object} log - The MCP logger instance.\n * @returns {Object} - The logger wrapper object.\n */\nfunction createLogWrapper(log) {\n\treturn {\n\t\tinfo: (message, ...args) => log.info(message, ...args),\n\t\twarn: (message, ...args) => log.warn(message, ...args),\n\t\terror: (message, ...args) => log.error(message, ...args),\n\t\t// Handle optional debug method\n\t\tdebug: (message, ...args) =>\n\t\t\tlog.debug ? log.debug(message, ...args) : null,\n\t\t// Map success to info as a common fallback\n\t\tsuccess: (message, ...args) => log.info(message, ...args)\n\t};\n}\n\n/**\n * Resolves and normalizes a project root path from various formats.\n * Handles URI encoding, Windows paths, and file protocols.\n * @param {string | undefined | null} rawPath - The raw project root path.\n * @param {object} [log] - Optional logger object.\n * @returns {string | null} Normalized absolute path or null if input is invalid/empty.\n */\nfunction normalizeProjectRoot(rawPath, log) {\n\tif (!rawPath) return null;\n\ttry {\n\t\tlet pathString = Array.isArray(rawPath) ? rawPath[0] : String(rawPath);\n\t\tif (!pathString) return null;\n\n\t\t// 1. Decode URI Encoding\n\t\t// Use try-catch for decoding as malformed URIs can throw\n\t\ttry {\n\t\t\tpathString = decodeURIComponent(pathString);\n\t\t} catch (decodeError) {\n\t\t\tif (log)\n\t\t\t\tlog.warn(\n\t\t\t\t\t`Could not decode URI component for path \"${rawPath}\": ${decodeError.message}. Proceeding with raw string.`\n\t\t\t\t);\n\t\t\t// Proceed with the original string if decoding fails\n\t\t\tpathString = Array.isArray(rawPath) ? rawPath[0] : String(rawPath);\n\t\t}\n\n\t\t// 2. Strip file:// prefix (handle 2 or 3 slashes)\n\t\tif (pathString.startsWith('file:///')) {\n\t\t\tpathString = pathString.slice(7); // Slice 7 for file:///, may leave leading / on Windows\n\t\t} else if (pathString.startsWith('file://')) {\n\t\t\tpathString = pathString.slice(7); // Slice 7 for file://\n\t\t}\n\n\t\t// 3. Handle potential Windows leading slash after stripping prefix (e.g., /C:/...)\n\t\t// This checks if it starts with / followed by a drive letter C: D: etc.\n\t\tif (\n\t\t\tpathString.startsWith('/') &&\n\t\t\t/[A-Za-z]:/.test(pathString.substring(1, 3))\n\t\t) {\n\t\t\tpathString = pathString.substring(1); // Remove the leading slash\n\t\t}\n\n\t\t// 4. Normalize backslashes to forward slashes\n\t\tpathString = pathString.replace(/\\\\/g, '/');\n\n\t\t// 5. Resolve to absolute path using server's OS convention\n\t\tconst resolvedPath = path.resolve(pathString);\n\t\treturn resolvedPath;\n\t} catch (error) {\n\t\tif (log) {\n\t\t\tlog.error(\n\t\t\t\t`Error normalizing project root path \"${rawPath}\": ${error.message}`\n\t\t\t);\n\t\t}\n\t\treturn null; // Return null on error\n\t}\n}\n\n/**\n * Extracts the raw project root path from the session (without normalization).\n * Used as a fallback within the HOF.\n * @param {Object} session - The MCP session object.\n * @param {Object} log - The MCP logger object.\n * @returns {string|null} The raw path string or null.\n */\nfunction getRawProjectRootFromSession(session, log) {\n\ttry {\n\t\t// Check primary location\n\t\tif (session?.roots?.[0]?.uri) {\n\t\t\treturn session.roots[0].uri;\n\t\t}\n\t\t// Check alternate location\n\t\telse if (session?.roots?.roots?.[0]?.uri) {\n\t\t\treturn session.roots.roots[0].uri;\n\t\t}\n\t\treturn null; // Not found in expected session locations\n\t} catch (e) {\n\t\tlog.error(`Error accessing session roots: ${e.message}`);\n\t\treturn null;\n\t}\n}\n\n/**\n * Higher-order function to wrap MCP tool execute methods.\n * Ensures args.projectRoot is present and normalized before execution.\n * Uses TASK_MASTER_PROJECT_ROOT environment variable with proper precedence.\n * @param {Function} executeFn - The original async execute(args, context) function.\n * @returns {Function} The wrapped async execute function.\n */\nfunction withNormalizedProjectRoot(executeFn) {\n\treturn async (args, context) => {\n\t\tconst { log, session } = context;\n\t\tlet normalizedRoot = null;\n\t\tlet rootSource = 'unknown';\n\n\t\ttry {\n\t\t\t// PRECEDENCE ORDER:\n\t\t\t// 1. TASK_MASTER_PROJECT_ROOT environment variable (from process.env or session)\n\t\t\t// 2. args.projectRoot (explicitly provided)\n\t\t\t// 3. Session-based project root resolution\n\t\t\t// 4. Current directory fallback\n\n\t\t\t// 1. Check for TASK_MASTER_PROJECT_ROOT environment variable first\n\t\t\tif (process.env.TASK_MASTER_PROJECT_ROOT) {\n\t\t\t\tconst envRoot = process.env.TASK_MASTER_PROJECT_ROOT;\n\t\t\t\tnormalizedRoot = path.isAbsolute(envRoot)\n\t\t\t\t\t? envRoot\n\t\t\t\t\t: path.resolve(process.cwd(), envRoot);\n\t\t\t\trootSource = 'TASK_MASTER_PROJECT_ROOT environment variable';\n\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t}\n\t\t\t// Also check session environment variables for TASK_MASTER_PROJECT_ROOT\n\t\t\telse if (session?.env?.TASK_MASTER_PROJECT_ROOT) {\n\t\t\t\tconst envRoot = session.env.TASK_MASTER_PROJECT_ROOT;\n\t\t\t\tnormalizedRoot = path.isAbsolute(envRoot)\n\t\t\t\t\t? envRoot\n\t\t\t\t\t: path.resolve(process.cwd(), envRoot);\n\t\t\t\trootSource = 'TASK_MASTER_PROJECT_ROOT session environment variable';\n\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t}\n\t\t\t// 2. If no environment variable, try args.projectRoot\n\t\t\telse if (args.projectRoot) {\n\t\t\t\tnormalizedRoot = normalizeProjectRoot(args.projectRoot, log);\n\t\t\t\trootSource = 'args.projectRoot';\n\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t}\n\t\t\t// 3. If no args.projectRoot, try session-based resolution\n\t\t\telse {\n\t\t\t\tconst sessionRoot = getProjectRootFromSession(session, log);\n\t\t\t\tif (sessionRoot) {\n\t\t\t\t\tnormalizedRoot = sessionRoot; // getProjectRootFromSession already normalizes\n\t\t\t\t\trootSource = 'session';\n\t\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (!normalizedRoot) {\n\t\t\t\tlog.error(\n\t\t\t\t\t'Could not determine project root from environment, args, or session.'\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t'Could not determine project root. Please provide projectRoot argument or ensure TASK_MASTER_PROJECT_ROOT environment variable is set.'\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Inject the normalized root back into args\n\t\t\tconst updatedArgs = { ...args, projectRoot: normalizedRoot };\n\n\t\t\t// Execute the original function with normalized root in args\n\t\t\treturn await executeFn(updatedArgs, context);\n\t\t} catch (error) {\n\t\t\tlog.error(\n\t\t\t\t`Error within withNormalizedProjectRoot HOF (Normalized Root: ${normalizedRoot}): ${error.message}`\n\t\t\t);\n\t\t\t// Add stack trace if available and debug enabled\n\t\t\tif (error.stack && log.debug) {\n\t\t\t\tlog.debug(error.stack);\n\t\t\t}\n\t\t\t// Return a generic error or re-throw depending on desired behavior\n\t\t\treturn createErrorResponse(`Operation failed: ${error.message}`);\n\t\t}\n\t};\n}\n\n// Ensure all functions are exported\nexport {\n\tgetProjectRoot,\n\tgetProjectRootFromSession,\n\tgetTagInfo,\n\thandleApiResult,\n\texecuteTaskMasterCommand,\n\tgetCachedOrExecute,\n\tprocessMCPResponseData,\n\tcreateContentResponse,\n\tcreateErrorResponse,\n\tcreateLogWrapper,\n\tnormalizeProjectRoot,\n\tgetRawProjectRootFromSession,\n\twithNormalizedProjectRoot\n};\n"], ["/claude-task-master/scripts/modules/task-manager/models.js", "/**\n * models.js\n * Core functionality for managing AI model configurations\n */\n\nimport https from 'https';\nimport http from 'http';\nimport {\n\tgetMainModelId,\n\tgetResearchModelId,\n\tgetFallbackModelId,\n\tgetAvailableModels,\n\tgetMainProvider,\n\tgetResearchProvider,\n\tgetFallbackProvider,\n\tisApiKeySet,\n\tgetMcpApiKeyStatus,\n\tgetConfig,\n\twriteConfig,\n\tisConfigFilePresent,\n\tgetAllProviders,\n\tgetBaseUrlForRole\n} from '../config-manager.js';\nimport { findConfigPath } from '../../../src/utils/path-utils.js';\nimport { log } from '../utils.js';\nimport { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';\n\n// Constants\nconst CONFIG_MISSING_ERROR =\n\t'The configuration file is missing. Run \"task-master init\" to create it.';\n\n/**\n * Fetches the list of models from OpenRouter API.\n * @returns {Promise<Array|null>} A promise that resolves with the list of model IDs or null if fetch fails.\n */\nfunction fetchOpenRouterModels() {\n\treturn new Promise((resolve) => {\n\t\tconst options = {\n\t\t\thostname: 'openrouter.ai',\n\t\t\tpath: '/api/v1/models',\n\t\t\tmethod: 'GET',\n\t\t\theaders: {\n\t\t\t\tAccept: 'application/json'\n\t\t\t}\n\t\t};\n\n\t\tconst req = https.request(options, (res) => {\n\t\t\tlet data = '';\n\t\t\tres.on('data', (chunk) => {\n\t\t\t\tdata += chunk;\n\t\t\t});\n\t\t\tres.on('end', () => {\n\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\tresolve(parsedData.data || []); // Return the array of models\n\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\tconsole.error('Error parsing OpenRouter response:', e);\n\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t`OpenRouter API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t);\n\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\n\t\treq.on('error', (e) => {\n\t\t\tconsole.error('Error fetching OpenRouter models:', e);\n\t\t\tresolve(null); // Indicate failure\n\t\t});\n\t\treq.end();\n\t});\n}\n\n/**\n * Fetches the list of models from Ollama instance.\n * @param {string} baseURL - The base URL for the Ollama API (e.g., \"http://localhost:11434/api\")\n * @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails.\n */\nfunction fetchOllamaModels(baseURL = 'http://localhost:11434/api') {\n\treturn new Promise((resolve) => {\n\t\ttry {\n\t\t\t// Parse the base URL to extract hostname, port, and base path\n\t\t\tconst url = new URL(baseURL);\n\t\t\tconst isHttps = url.protocol === 'https:';\n\t\t\tconst port = url.port || (isHttps ? 443 : 80);\n\t\t\tconst basePath = url.pathname.endsWith('/')\n\t\t\t\t? url.pathname.slice(0, -1)\n\t\t\t\t: url.pathname;\n\n\t\t\tconst options = {\n\t\t\t\thostname: url.hostname,\n\t\t\t\tport: parseInt(port, 10),\n\t\t\t\tpath: `${basePath}/tags`,\n\t\t\t\tmethod: 'GET',\n\t\t\t\theaders: {\n\t\t\t\t\tAccept: 'application/json'\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tconst requestLib = isHttps ? https : http;\n\t\t\tconst req = requestLib.request(options, (res) => {\n\t\t\t\tlet data = '';\n\t\t\t\tres.on('data', (chunk) => {\n\t\t\t\t\tdata += chunk;\n\t\t\t\t});\n\t\t\t\tres.on('end', () => {\n\t\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\t\tresolve(parsedData.models || []); // Return the array of models\n\t\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\t\tconsole.error('Error parsing Ollama response:', e);\n\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t`Ollama API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\n\t\t\treq.on('error', (e) => {\n\t\t\t\tconsole.error('Error fetching Ollama models:', e);\n\t\t\t\tresolve(null); // Indicate failure\n\t\t\t});\n\t\t\treq.end();\n\t\t} catch (e) {\n\t\t\tconsole.error('Error parsing Ollama base URL:', e);\n\t\t\tresolve(null); // Indicate failure\n\t\t}\n\t});\n}\n\n/**\n * Get the current model configuration\n * @param {Object} [options] - Options for the operation\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with current model configuration\n */\nasync function getModelConfiguration(options = {}) {\n\tconst { mcpLog, projectRoot, session } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\tif (!projectRoot) {\n\t\tthrow new Error('Project root is required but not found.');\n\t}\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\tthrow new Error(CONFIG_MISSING_ERROR);\n\t}\n\n\ttry {\n\t\t// Get current settings - these should use the config from the found path automatically\n\t\tconst mainProvider = getMainProvider(projectRoot);\n\t\tconst mainModelId = getMainModelId(projectRoot);\n\t\tconst researchProvider = getResearchProvider(projectRoot);\n\t\tconst researchModelId = getResearchModelId(projectRoot);\n\t\tconst fallbackProvider = getFallbackProvider(projectRoot);\n\t\tconst fallbackModelId = getFallbackModelId(projectRoot);\n\n\t\t// Check API keys\n\t\tconst mainCliKeyOk = isApiKeySet(mainProvider, session, projectRoot);\n\t\tconst mainMcpKeyOk = getMcpApiKeyStatus(mainProvider, projectRoot);\n\t\tconst researchCliKeyOk = isApiKeySet(\n\t\t\tresearchProvider,\n\t\t\tsession,\n\t\t\tprojectRoot\n\t\t);\n\t\tconst researchMcpKeyOk = getMcpApiKeyStatus(researchProvider, projectRoot);\n\t\tconst fallbackCliKeyOk = fallbackProvider\n\t\t\t? isApiKeySet(fallbackProvider, session, projectRoot)\n\t\t\t: true;\n\t\tconst fallbackMcpKeyOk = fallbackProvider\n\t\t\t? getMcpApiKeyStatus(fallbackProvider, projectRoot)\n\t\t\t: true;\n\n\t\t// Get available models to find detailed info\n\t\tconst availableModels = getAvailableModels(projectRoot);\n\n\t\t// Find model details\n\t\tconst mainModelData = availableModels.find((m) => m.id === mainModelId);\n\t\tconst researchModelData = availableModels.find(\n\t\t\t(m) => m.id === researchModelId\n\t\t);\n\t\tconst fallbackModelData = fallbackModelId\n\t\t\t? availableModels.find((m) => m.id === fallbackModelId)\n\t\t\t: null;\n\n\t\t// Return structured configuration data\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tactiveModels: {\n\t\t\t\t\tmain: {\n\t\t\t\t\t\tprovider: mainProvider,\n\t\t\t\t\t\tmodelId: mainModelId,\n\t\t\t\t\t\tsweScore: mainModelData?.swe_score || null,\n\t\t\t\t\t\tcost: mainModelData?.cost_per_1m_tokens || null,\n\t\t\t\t\t\tkeyStatus: {\n\t\t\t\t\t\t\tcli: mainCliKeyOk,\n\t\t\t\t\t\t\tmcp: mainMcpKeyOk\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\tresearch: {\n\t\t\t\t\t\tprovider: researchProvider,\n\t\t\t\t\t\tmodelId: researchModelId,\n\t\t\t\t\t\tsweScore: researchModelData?.swe_score || null,\n\t\t\t\t\t\tcost: researchModelData?.cost_per_1m_tokens || null,\n\t\t\t\t\t\tkeyStatus: {\n\t\t\t\t\t\t\tcli: researchCliKeyOk,\n\t\t\t\t\t\t\tmcp: researchMcpKeyOk\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\tfallback: fallbackProvider\n\t\t\t\t\t\t? {\n\t\t\t\t\t\t\t\tprovider: fallbackProvider,\n\t\t\t\t\t\t\t\tmodelId: fallbackModelId,\n\t\t\t\t\t\t\t\tsweScore: fallbackModelData?.swe_score || null,\n\t\t\t\t\t\t\t\tcost: fallbackModelData?.cost_per_1m_tokens || null,\n\t\t\t\t\t\t\t\tkeyStatus: {\n\t\t\t\t\t\t\t\t\tcli: fallbackCliKeyOk,\n\t\t\t\t\t\t\t\t\tmcp: fallbackMcpKeyOk\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t: null\n\t\t\t\t},\n\t\t\t\tmessage: 'Successfully retrieved current model configuration'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error getting model configuration: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CONFIG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\n/**\n * Get all available models not currently in use\n * @param {Object} [options] - Options for the operation\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with available models\n */\nasync function getAvailableModelsList(options = {}) {\n\tconst { mcpLog, projectRoot } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\tif (!projectRoot) {\n\t\tthrow new Error('Project root is required but not found.');\n\t}\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\tthrow new Error(CONFIG_MISSING_ERROR);\n\t}\n\n\ttry {\n\t\t// Get all available models\n\t\tconst allAvailableModels = getAvailableModels(projectRoot);\n\n\t\tif (!allAvailableModels || allAvailableModels.length === 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmodels: [],\n\t\t\t\t\tmessage: 'No available models found'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Get currently used model IDs\n\t\tconst mainModelId = getMainModelId(projectRoot);\n\t\tconst researchModelId = getResearchModelId(projectRoot);\n\t\tconst fallbackModelId = getFallbackModelId(projectRoot);\n\n\t\t// Filter out placeholder models and active models\n\t\tconst activeIds = [mainModelId, researchModelId, fallbackModelId].filter(\n\t\t\tBoolean\n\t\t);\n\t\tconst otherAvailableModels = allAvailableModels.map((model) => ({\n\t\t\tprovider: model.provider || 'N/A',\n\t\t\tmodelId: model.id,\n\t\t\tsweScore: model.swe_score || null,\n\t\t\tcost: model.cost_per_1m_tokens || null,\n\t\t\tallowedRoles: model.allowed_roles || []\n\t\t}));\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmodels: otherAvailableModels,\n\t\t\t\tmessage: `Successfully retrieved ${otherAvailableModels.length} available models`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error getting available models: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MODELS_LIST_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\n/**\n * Update a specific model in the configuration\n * @param {string} role - The model role to update ('main', 'research', 'fallback')\n * @param {string} modelId - The model ID to set for the role\n * @param {Object} [options] - Options for the operation\n * @param {string} [options.providerHint] - Provider hint if already determined ('openrouter' or 'ollama')\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with result of update operation\n */\nasync function setModel(role, modelId, options = {}) {\n\tconst { mcpLog, projectRoot, providerHint } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\tif (!projectRoot) {\n\t\tthrow new Error('Project root is required but not found.');\n\t}\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\tthrow new Error(CONFIG_MISSING_ERROR);\n\t}\n\n\t// Validate role\n\tif (!['main', 'research', 'fallback'].includes(role)) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_ROLE',\n\t\t\t\tmessage: `Invalid role: ${role}. Must be one of: main, research, fallback.`\n\t\t\t}\n\t\t};\n\t}\n\n\t// Validate model ID\n\tif (typeof modelId !== 'string' || modelId.trim() === '') {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_MODEL_ID',\n\t\t\t\tmessage: `Invalid model ID: ${modelId}. Must be a non-empty string.`\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tconst availableModels = getAvailableModels(projectRoot);\n\t\tconst currentConfig = getConfig(projectRoot);\n\t\tlet determinedProvider = null; // Initialize provider\n\t\tlet warningMessage = null;\n\n\t\t// Find the model data in internal list initially to see if it exists at all\n\t\tlet modelData = availableModels.find((m) => m.id === modelId);\n\n\t\t// --- Revised Logic: Prioritize providerHint --- //\n\n\t\tif (providerHint) {\n\t\t\t// Hint provided (--ollama or --openrouter flag used)\n\t\t\tif (modelData && modelData.provider === providerHint) {\n\t\t\t\t// Found internally AND provider matches the hint\n\t\t\t\tdeterminedProvider = providerHint;\n\t\t\t\treport(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Model ${modelId} found internally with matching provider hint ${determinedProvider}.`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Either not found internally, OR found but under a DIFFERENT provider than hinted.\n\t\t\t\t// Proceed with custom logic based ONLY on the hint.\n\t\t\t\tif (providerHint === CUSTOM_PROVIDERS.OPENROUTER) {\n\t\t\t\t\t// Check OpenRouter ONLY because hint was openrouter\n\t\t\t\t\treport('info', `Checking OpenRouter for ${modelId} (as hinted)...`);\n\t\t\t\t\tconst openRouterModels = await fetchOpenRouterModels();\n\n\t\t\t\t\tif (\n\t\t\t\t\t\topenRouterModels &&\n\t\t\t\t\t\topenRouterModels.some((m) => m.id === modelId)\n\t\t\t\t\t) {\n\t\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.OPENROUTER;\n\n\t\t\t\t\t\t// Check if this is a free model (ends with :free)\n\t\t\t\t\t\tif (modelId.endsWith(':free')) {\n\t\t\t\t\t\t\twarningMessage = `Warning: OpenRouter free model '${modelId}' selected. Free models have significant limitations including lower context windows, reduced rate limits, and may not support advanced features like tool_use. Consider using the paid version '${modelId.replace(':free', '')}' for full functionality.`;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\twarningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Hinted as OpenRouter but not found in live check\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Model ID \"${modelId}\" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.OLLAMA) {\n\t\t\t\t\t// Check Ollama ONLY because hint was ollama\n\t\t\t\t\treport('info', `Checking Ollama for ${modelId} (as hinted)...`);\n\n\t\t\t\t\t// Get the Ollama base URL from config\n\t\t\t\t\tconst ollamaBaseURL = getBaseUrlForRole(role, projectRoot);\n\t\t\t\t\tconst ollamaModels = await fetchOllamaModels(ollamaBaseURL);\n\n\t\t\t\t\tif (ollamaModels === null) {\n\t\t\t\t\t\t// Connection failed - server probably not running\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`\n\t\t\t\t\t\t);\n\t\t\t\t\t} else if (ollamaModels.some((m) => m.model === modelId)) {\n\t\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.OLLAMA;\n\t\t\t\t\t\twarningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`;\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Server is running but model not found\n\t\t\t\t\t\tconst tagsUrl = `${ollamaBaseURL}/tags`;\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Model ID \"${modelId}\" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.BEDROCK) {\n\t\t\t\t\t// Set provider without model validation since Bedrock models are managed by AWS\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.BEDROCK;\n\t\t\t\t\twarningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`;\n\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.CLAUDE_CODE) {\n\t\t\t\t\t// Claude Code provider - check if model exists in our list\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.CLAUDE_CODE;\n\t\t\t\t\t// Re-find modelData specifically for claude-code provider\n\t\t\t\t\tconst claudeCodeModels = availableModels.filter(\n\t\t\t\t\t\t(m) => m.provider === 'claude-code'\n\t\t\t\t\t);\n\t\t\t\t\tconst claudeCodeModelData = claudeCodeModels.find(\n\t\t\t\t\t\t(m) => m.id === modelId\n\t\t\t\t\t);\n\t\t\t\t\tif (claudeCodeModelData) {\n\t\t\t\t\t\t// Update modelData to the found claude-code model\n\t\t\t\t\t\tmodelData = claudeCodeModelData;\n\t\t\t\t\t\treport('info', `Setting Claude Code model '${modelId}'.`);\n\t\t\t\t\t} else {\n\t\t\t\t\t\twarningMessage = `Warning: Claude Code model '${modelId}' not found in supported models. Setting without validation.`;\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t}\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.AZURE) {\n\t\t\t\t\t// Set provider without model validation since Azure models are managed by Azure\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.AZURE;\n\t\t\t\t\twarningMessage = `Warning: Custom Azure model '${modelId}' set. Please ensure the model deployment is valid and accessible in your Azure account.`;\n\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.VERTEX) {\n\t\t\t\t\t// Set provider without model validation since Vertex models are managed by Google Cloud\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.VERTEX;\n\t\t\t\t\twarningMessage = `Warning: Custom Vertex AI model '${modelId}' set. Please ensure the model is valid and accessible in your Google Cloud project.`;\n\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.GEMINI_CLI) {\n\t\t\t\t\t// Gemini CLI provider - check if model exists in our list\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.GEMINI_CLI;\n\t\t\t\t\t// Re-find modelData specifically for gemini-cli provider\n\t\t\t\t\tconst geminiCliModels = availableModels.filter(\n\t\t\t\t\t\t(m) => m.provider === 'gemini-cli'\n\t\t\t\t\t);\n\t\t\t\t\tconst geminiCliModelData = geminiCliModels.find(\n\t\t\t\t\t\t(m) => m.id === modelId\n\t\t\t\t\t);\n\t\t\t\t\tif (geminiCliModelData) {\n\t\t\t\t\t\t// Update modelData to the found gemini-cli model\n\t\t\t\t\t\tmodelData = geminiCliModelData;\n\t\t\t\t\t\treport('info', `Setting Gemini CLI model '${modelId}'.`);\n\t\t\t\t\t} else {\n\t\t\t\t\t\twarningMessage = `Warning: Gemini CLI model '${modelId}' not found in supported models. Setting without validation.`;\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Invalid provider hint - should not happen with our constants\n\t\t\t\t\tthrow new Error(`Invalid provider hint received: ${providerHint}`);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// No hint provided (flags not used)\n\t\t\tif (modelData) {\n\t\t\t\t// Found internally, use the provider from the internal list\n\t\t\t\tdeterminedProvider = modelData.provider;\n\t\t\t\treport(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Model ${modelId} found internally with provider ${determinedProvider}.`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Model not found and no provider hint was given\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'MODEL_NOT_FOUND_NO_HINT',\n\t\t\t\t\t\tmessage: `Model ID \"${modelId}\" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t}\n\n\t\t// --- End of Revised Logic --- //\n\n\t\t// At this point, we should have a determinedProvider if the model is valid (internally or custom)\n\t\tif (!determinedProvider) {\n\t\t\t// This case acts as a safeguard\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'PROVIDER_UNDETERMINED',\n\t\t\t\t\tmessage: `Could not determine the provider for model ID \"${modelId}\".`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Update configuration\n\t\tcurrentConfig.models[role] = {\n\t\t\t...currentConfig.models[role], // Keep existing params like temperature\n\t\t\tprovider: determinedProvider,\n\t\t\tmodelId: modelId\n\t\t};\n\n\t\t// If model data is available, update maxTokens from supported-models.json\n\t\tif (modelData && modelData.max_tokens) {\n\t\t\tcurrentConfig.models[role].maxTokens = modelData.max_tokens;\n\t\t}\n\n\t\t// Write updated configuration\n\t\tconst writeResult = writeConfig(currentConfig, projectRoot);\n\t\tif (!writeResult) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CONFIG_WRITE_ERROR',\n\t\t\t\t\tmessage: 'Error writing updated configuration to configuration file'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`;\n\t\treport('info', successMessage);\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\trole,\n\t\t\t\tprovider: determinedProvider,\n\t\t\t\tmodelId,\n\t\t\t\tmessage: successMessage,\n\t\t\t\twarning: warningMessage // Include warning in the response data\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error setting ${role} model: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'SET_MODEL_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\n/**\n * Get API key status for all known providers.\n * @param {Object} [options] - Options for the operation\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with API key status report\n */\nasync function getApiKeyStatusReport(options = {}) {\n\tconst { mcpLog, projectRoot, session } = options;\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\ttry {\n\t\tconst providers = getAllProviders();\n\t\tconst providersToCheck = providers.filter(\n\t\t\t(p) => p.toLowerCase() !== 'ollama'\n\t\t); // Ollama is not a provider, it's a service, doesn't need an api key usually\n\t\tconst statusReport = providersToCheck.map((provider) => {\n\t\t\t// Use provided projectRoot for MCP status check\n\t\t\tconst cliOk = isApiKeySet(provider, session, projectRoot); // Pass session and projectRoot for CLI check\n\t\t\tconst mcpOk = getMcpApiKeyStatus(provider, projectRoot);\n\t\t\treturn {\n\t\t\t\tprovider,\n\t\t\t\tcli: cliOk,\n\t\t\t\tmcp: mcpOk\n\t\t\t};\n\t\t});\n\n\t\treport('info', 'Successfully generated API key status report.');\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\treport: statusReport,\n\t\t\t\tmessage: 'API key status report generated.'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error generating API key status report: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'API_KEY_STATUS_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\nexport {\n\tgetModelConfiguration,\n\tgetAvailableModelsList,\n\tsetModel,\n\tgetApiKeyStatusReport\n};\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/set-task-status.js", "/**\n * set-task-status.js\n * Direct function implementation for setting task status\n */\n\nimport { setTaskStatus } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { nextTaskDirect } from './next-task.js';\n/**\n * Direct function wrapper for setTaskStatus with error handling.\n *\n * @param {Object} args - Command arguments containing id, status, tasksJsonPath, and projectRoot.\n * @param {string} args.id - The ID of the task to update.\n * @param {string} args.status - The new status to set for the task.\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function setTaskStatusDirect(args, log, context = {}) {\n\t// Destructure expected args, including the resolved tasksJsonPath and projectRoot\n\tconst { tasksJsonPath, id, status, complexityReportPath, projectRoot, tag } =\n\t\targs;\n\tconst { session } = context;\n\ttry {\n\t\tlog.info(`Setting task status with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters (id and status)\n\t\tif (!id) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No task ID specified. Please provide a task ID to update.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_TASK_ID', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tif (!status) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No status specified. Please provide a new status value.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_STATUS', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Execute core setTaskStatus function\n\t\tconst taskId = id;\n\t\tconst newStatus = status;\n\n\t\tlog.info(`Setting task ${taskId} status to \"${newStatus}\"`);\n\n\t\t// Call the core function with proper silent mode handling\n\t\tenableSilentMode(); // Enable silent mode before calling core function\n\t\ttry {\n\t\t\t// Call the core function\n\t\t\tawait setTaskStatus(tasksPath, taskId, newStatus, {\n\t\t\t\tmcpLog: log,\n\t\t\t\tprojectRoot,\n\t\t\t\tsession,\n\t\t\t\ttag\n\t\t\t});\n\n\t\t\tlog.info(`Successfully set task ${taskId} status to ${newStatus}`);\n\n\t\t\t// Return success data\n\t\t\tconst result = {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Successfully updated task ${taskId} status to \"${newStatus}\"`,\n\t\t\t\t\ttaskId,\n\t\t\t\t\tstatus: newStatus,\n\t\t\t\t\ttasksPath: tasksPath // Return the path used\n\t\t\t\t}\n\t\t\t};\n\n\t\t\t// If the task was completed, attempt to fetch the next task\n\t\t\tif (result.data.status === 'done') {\n\t\t\t\ttry {\n\t\t\t\t\tlog.info(`Attempting to fetch next task for task ${taskId}`);\n\t\t\t\t\tconst nextResult = await nextTaskDirect(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\t\ttag\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog,\n\t\t\t\t\t\t{ session }\n\t\t\t\t\t);\n\n\t\t\t\t\tif (nextResult.success) {\n\t\t\t\t\t\tlog.info(\n\t\t\t\t\t\t\t`Successfully retrieved next task: ${nextResult.data.nextTask}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tresult.data = {\n\t\t\t\t\t\t\t...result.data,\n\t\t\t\t\t\t\tnextTask: nextResult.data.nextTask,\n\t\t\t\t\t\t\tisNextSubtask: nextResult.data.isSubtask,\n\t\t\t\t\t\t\tnextSteps: nextResult.data.nextSteps\n\t\t\t\t\t\t};\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.warn(\n\t\t\t\t\t\t\t`Failed to retrieve next task: ${nextResult.error?.message || 'Unknown error'}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t} catch (nextErr) {\n\t\t\t\t\tlog.error(`Error retrieving next task: ${nextErr.message}`);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn result;\n\t\t} catch (error) {\n\t\t\tlog.error(`Error setting task status: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'SET_STATUS_ERROR',\n\t\t\t\t\tmessage: error.message || 'Unknown error setting task status'\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\t// ALWAYS restore normal logging in finally block\n\t\t\tdisableSilentMode();\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled if there was an uncaught error in the outer try block\n\t\tif (isSilentMode()) {\n\t\t\tdisableSilentMode();\n\t\t}\n\n\t\tlog.error(`Error setting task status: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'SET_STATUS_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error setting task status'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/init.js", "/**\n * Task Master\n * Copyright (c) 2025 Eyal Toledano, Ralph Khreish\n *\n * This software is licensed under the MIT License with Commons Clause.\n * You may use this software for any purpose, including commercial applications,\n * and modify and redistribute it freely, subject to the following restrictions:\n *\n * 1. You may not sell this software or offer it as a service.\n * 2. The origin of this software must not be misrepresented.\n * 3. Altered source versions must be plainly marked as such.\n *\n * For the full license text, see the LICENSE file in the root directory.\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport readline from 'readline';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\nimport chalk from 'chalk';\nimport figlet from 'figlet';\nimport boxen from 'boxen';\nimport gradient from 'gradient-string';\nimport { isSilentMode } from './modules/utils.js';\nimport { insideGitWorkTree } from './modules/utils/git-utils.js';\nimport { manageGitignoreFile } from '../src/utils/manage-gitignore.js';\nimport { RULE_PROFILES } from '../src/constants/profiles.js';\nimport {\n\tconvertAllRulesToProfileRules,\n\tgetRulesProfile\n} from '../src/utils/rule-transformer.js';\nimport { updateConfigMaxTokens } from './modules/update-config-tokens.js';\n\nimport { execSync } from 'child_process';\nimport {\n\tEXAMPLE_PRD_FILE,\n\tTASKMASTER_CONFIG_FILE,\n\tTASKMASTER_TEMPLATES_DIR,\n\tTASKMASTER_DIR,\n\tTASKMASTER_TASKS_DIR,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_REPORTS_DIR,\n\tTASKMASTER_STATE_FILE,\n\tENV_EXAMPLE_FILE,\n\tGITIGNORE_FILE\n} from '../src/constants/paths.js';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n// Define log levels\nconst LOG_LEVELS = {\n\tdebug: 0,\n\tinfo: 1,\n\twarn: 2,\n\terror: 3,\n\tsuccess: 4\n};\n\n// Determine log level from environment variable or default to 'info'\nconst LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL\n\t? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()]\n\t: LOG_LEVELS.info; // Default to info\n\n// Create a color gradient for the banner\nconst coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);\nconst warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']);\n\n// Display a fancy banner\nfunction displayBanner() {\n\tif (isSilentMode()) return;\n\n\tconsole.clear();\n\tconst bannerText = figlet.textSync('Task Master AI', {\n\t\tfont: 'Standard',\n\t\thorizontalLayout: 'default',\n\t\tverticalLayout: 'default'\n\t});\n\n\tconsole.log(coolGradient(bannerText));\n\n\t// Add creator credit line below the banner\n\tconsole.log(\n\t\tchalk.dim('by ') + chalk.cyan.underline('https://x.com/eyaltoledano')\n\t);\n\n\tconsole.log(\n\t\tboxen(chalk.white(`${chalk.bold('Initializing')} your new project`), {\n\t\t\tpadding: 1,\n\t\t\tmargin: { top: 0, bottom: 1 },\n\t\t\tborderStyle: 'round',\n\t\t\tborderColor: 'cyan'\n\t\t})\n\t);\n}\n\n// Logging function with icons and colors\nfunction log(level, ...args) {\n\tconst icons = {\n\t\tdebug: chalk.gray('🔍'),\n\t\tinfo: chalk.blue('ℹ️'),\n\t\twarn: chalk.yellow('⚠️'),\n\t\terror: chalk.red('❌'),\n\t\tsuccess: chalk.green('✅')\n\t};\n\n\tif (LOG_LEVELS[level] >= LOG_LEVEL) {\n\t\tconst icon = icons[level] || '';\n\n\t\t// Only output to console if not in silent mode\n\t\tif (!isSilentMode()) {\n\t\t\tif (level === 'error') {\n\t\t\t\tconsole.error(icon, chalk.red(...args));\n\t\t\t} else if (level === 'warn') {\n\t\t\t\tconsole.warn(icon, chalk.yellow(...args));\n\t\t\t} else if (level === 'success') {\n\t\t\t\tconsole.log(icon, chalk.green(...args));\n\t\t\t} else if (level === 'info') {\n\t\t\t\tconsole.log(icon, chalk.blue(...args));\n\t\t\t} else {\n\t\t\t\tconsole.log(icon, ...args);\n\t\t\t}\n\t\t}\n\t}\n\n\t// Write to debug log if DEBUG=true\n\tif (process.env.DEBUG === 'true') {\n\t\tconst logMessage = `[${level.toUpperCase()}] ${args.join(' ')}\\n`;\n\t\tfs.appendFileSync('init-debug.log', logMessage);\n\t}\n}\n\n// Function to create directory if it doesn't exist\nfunction ensureDirectoryExists(dirPath) {\n\tif (!fs.existsSync(dirPath)) {\n\t\tfs.mkdirSync(dirPath, { recursive: true });\n\t\tlog('info', `Created directory: ${dirPath}`);\n\t}\n}\n\n// Function to add shell aliases to the user's shell configuration\nfunction addShellAliases() {\n\tconst homeDir = process.env.HOME || process.env.USERPROFILE;\n\tlet shellConfigFile;\n\n\t// Determine which shell config file to use\n\tif (process.env.SHELL?.includes('zsh')) {\n\t\tshellConfigFile = path.join(homeDir, '.zshrc');\n\t} else if (process.env.SHELL?.includes('bash')) {\n\t\tshellConfigFile = path.join(homeDir, '.bashrc');\n\t} else {\n\t\tlog('warn', 'Could not determine shell type. Aliases not added.');\n\t\treturn false;\n\t}\n\n\ttry {\n\t\t// Check if file exists\n\t\tif (!fs.existsSync(shellConfigFile)) {\n\t\t\tlog(\n\t\t\t\t'warn',\n\t\t\t\t`Shell config file ${shellConfigFile} not found. Aliases not added.`\n\t\t\t);\n\t\t\treturn false;\n\t\t}\n\n\t\t// Check if aliases already exist\n\t\tconst configContent = fs.readFileSync(shellConfigFile, 'utf8');\n\t\tif (configContent.includes(\"alias tm='task-master'\")) {\n\t\t\tlog('info', 'Task Master aliases already exist in shell config.');\n\t\t\treturn true;\n\t\t}\n\n\t\t// Add aliases to the shell config file\n\t\tconst aliasBlock = `\n# Task Master aliases added on ${new Date().toLocaleDateString()}\nalias tm='task-master'\nalias taskmaster='task-master'\n`;\n\n\t\tfs.appendFileSync(shellConfigFile, aliasBlock);\n\t\tlog('success', `Added Task Master aliases to ${shellConfigFile}`);\n\t\tlog(\n\t\t\t'info',\n\t\t\t`To use the aliases in your current terminal, run: source ${shellConfigFile}`\n\t\t);\n\n\t\treturn true;\n\t} catch (error) {\n\t\tlog('error', `Failed to add aliases: ${error.message}`);\n\t\treturn false;\n\t}\n}\n\n// Function to create initial state.json file for tag management\nfunction createInitialStateFile(targetDir) {\n\tconst stateFilePath = path.join(targetDir, TASKMASTER_STATE_FILE);\n\n\t// Check if state.json already exists\n\tif (fs.existsSync(stateFilePath)) {\n\t\tlog('info', 'State file already exists, preserving current configuration');\n\t\treturn;\n\t}\n\n\t// Create initial state configuration\n\tconst initialState = {\n\t\tcurrentTag: 'master',\n\t\tlastSwitched: new Date().toISOString(),\n\t\tbranchTagMapping: {},\n\t\tmigrationNoticeShown: false\n\t};\n\n\ttry {\n\t\tfs.writeFileSync(stateFilePath, JSON.stringify(initialState, null, 2));\n\t\tlog('success', `Created initial state file: ${stateFilePath}`);\n\t\tlog('info', 'Default tag set to \"master\" for task organization');\n\t} catch (error) {\n\t\tlog('error', `Failed to create state file: ${error.message}`);\n\t}\n}\n\n// Function to copy a file from the package to the target directory\nfunction copyTemplateFile(templateName, targetPath, replacements = {}) {\n\t// Get the file content from the appropriate source directory\n\tlet sourcePath;\n\n\t// Map template names to their actual source paths\n\tswitch (templateName) {\n\t\t// case 'scripts_README.md':\n\t\t// \tsourcePath = path.join(__dirname, '..', 'assets', 'scripts_README.md');\n\t\t// \tbreak;\n\t\t// case 'README-task-master.md':\n\t\t// \tsourcePath = path.join(__dirname, '..', 'README-task-master.md');\n\t\t// \tbreak;\n\t\tdefault:\n\t\t\t// For other files like env.example, gitignore, etc. that don't have direct equivalents\n\t\t\tsourcePath = path.join(__dirname, '..', 'assets', templateName);\n\t}\n\n\t// Check if the source file exists\n\tif (!fs.existsSync(sourcePath)) {\n\t\t// Fall back to templates directory for files that might not have been moved yet\n\t\tsourcePath = path.join(__dirname, '..', 'assets', templateName);\n\t\tif (!fs.existsSync(sourcePath)) {\n\t\t\tlog('error', `Source file not found: ${sourcePath}`);\n\t\t\treturn;\n\t\t}\n\t}\n\n\tlet content = fs.readFileSync(sourcePath, 'utf8');\n\n\t// Replace placeholders with actual values\n\tObject.entries(replacements).forEach(([key, value]) => {\n\t\tconst regex = new RegExp(`\\\\{\\\\{${key}\\\\}\\\\}`, 'g');\n\t\tcontent = content.replace(regex, value);\n\t});\n\n\t// Handle special files that should be merged instead of overwritten\n\tif (fs.existsSync(targetPath)) {\n\t\tconst filename = path.basename(targetPath);\n\n\t\t// Handle .gitignore - append lines that don't exist\n\t\tif (filename === '.gitignore') {\n\t\t\tlog('info', `${targetPath} already exists, merging content...`);\n\t\t\tconst existingContent = fs.readFileSync(targetPath, 'utf8');\n\t\t\tconst existingLines = new Set(\n\t\t\t\texistingContent.split('\\n').map((line) => line.trim())\n\t\t\t);\n\t\t\tconst newLines = content\n\t\t\t\t.split('\\n')\n\t\t\t\t.filter((line) => !existingLines.has(line.trim()));\n\n\t\t\tif (newLines.length > 0) {\n\t\t\t\t// Add a comment to separate the original content from our additions\n\t\t\t\tconst updatedContent = `${existingContent.trim()}\\n\\n# Added by Task Master AI\\n${newLines.join('\\n')}`;\n\t\t\t\tfs.writeFileSync(targetPath, updatedContent);\n\t\t\t\tlog('success', `Updated ${targetPath} with additional entries`);\n\t\t\t} else {\n\t\t\t\tlog('info', `No new content to add to ${targetPath}`);\n\t\t\t}\n\t\t\treturn;\n\t\t}\n\n\t\t// Handle README.md - offer to preserve or create a different file\n\t\tif (filename === 'README-task-master.md') {\n\t\t\tlog('info', `${targetPath} already exists`);\n\t\t\t// Create a separate README file specifically for this project\n\t\t\tconst taskMasterReadmePath = path.join(\n\t\t\t\tpath.dirname(targetPath),\n\t\t\t\t'README-task-master.md'\n\t\t\t);\n\t\t\tfs.writeFileSync(taskMasterReadmePath, content);\n\t\t\tlog(\n\t\t\t\t'success',\n\t\t\t\t`Created ${taskMasterReadmePath} (preserved original README-task-master.md)`\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// For other files, warn and prompt before overwriting\n\t\tlog('warn', `${targetPath} already exists, skipping.`);\n\t\treturn;\n\t}\n\n\t// If the file doesn't exist, create it normally\n\tfs.writeFileSync(targetPath, content);\n\tlog('info', `Created file: ${targetPath}`);\n}\n\n// Main function to initialize a new project\nasync function initializeProject(options = {}) {\n\t// Receives options as argument\n\t// Only display banner if not in silent mode\n\tif (!isSilentMode()) {\n\t\tdisplayBanner();\n\t}\n\n\t// Debug logging only if not in silent mode\n\t// if (!isSilentMode()) {\n\t// \tconsole.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED =====');\n\t// \tconsole.log('Full options object:', JSON.stringify(options));\n\t// \tconsole.log('options.yes:', options.yes);\n\t// \tconsole.log('==================================================');\n\t// }\n\n\t// Handle boolean aliases flags\n\tif (options.aliases === true) {\n\t\toptions.addAliases = true; // --aliases flag provided\n\t} else if (options.aliases === false) {\n\t\toptions.addAliases = false; // --no-aliases flag provided\n\t}\n\t// If options.aliases and options.noAliases are undefined, we'll prompt for it\n\n\t// Handle boolean git flags\n\tif (options.git === true) {\n\t\toptions.initGit = true; // --git flag provided\n\t} else if (options.git === false) {\n\t\toptions.initGit = false; // --no-git flag provided\n\t}\n\t// If options.git and options.noGit are undefined, we'll prompt for it\n\n\t// Handle boolean gitTasks flags\n\tif (options.gitTasks === true) {\n\t\toptions.storeTasksInGit = true; // --git-tasks flag provided\n\t} else if (options.gitTasks === false) {\n\t\toptions.storeTasksInGit = false; // --no-git-tasks flag provided\n\t}\n\t// If options.gitTasks and options.noGitTasks are undefined, we'll prompt for it\n\n\tconst skipPrompts = options.yes || (options.name && options.description);\n\n\t// if (!isSilentMode()) {\n\t// \tconsole.log('Skip prompts determined:', skipPrompts);\n\t// }\n\n\tlet selectedRuleProfiles;\n\tif (options.rulesExplicitlyProvided) {\n\t\t// If --rules flag was used, always respect it.\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Using rule profiles provided via command line: ${options.rules.join(', ')}`\n\t\t);\n\t\tselectedRuleProfiles = options.rules;\n\t} else if (skipPrompts) {\n\t\t// If non-interactive (e.g., --yes) and no rules specified, default to ALL.\n\t\tlog(\n\t\t\t'info',\n\t\t\t`No rules specified in non-interactive mode, defaulting to all profiles.`\n\t\t);\n\t\tselectedRuleProfiles = RULE_PROFILES;\n\t} else {\n\t\t// If interactive and no rules specified, default to NONE.\n\t\t// The 'rules --setup' wizard will handle selection.\n\t\tlog(\n\t\t\t'info',\n\t\t\t'No rules specified; interactive setup will be launched to select profiles.'\n\t\t);\n\t\tselectedRuleProfiles = [];\n\t}\n\n\tif (skipPrompts) {\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log('SKIPPING PROMPTS - Using defaults or provided values');\n\t\t}\n\n\t\t// Use provided options or defaults\n\t\tconst projectName = options.name || 'task-master-project';\n\t\tconst projectDescription =\n\t\t\toptions.description || 'A project managed with Task Master AI';\n\t\tconst projectVersion = options.version || '0.1.0';\n\t\tconst authorName = options.author || 'Vibe coder';\n\t\tconst dryRun = options.dryRun || false;\n\t\tconst addAliases =\n\t\t\toptions.addAliases !== undefined ? options.addAliases : true; // Default to true if not specified\n\t\tconst initGit = options.initGit !== undefined ? options.initGit : true; // Default to true if not specified\n\t\tconst storeTasksInGit =\n\t\t\toptions.storeTasksInGit !== undefined ? options.storeTasksInGit : true; // Default to true if not specified\n\n\t\tif (dryRun) {\n\t\t\tlog('info', 'DRY RUN MODE: No files will be modified');\n\t\t\tlog('info', 'Would initialize Task Master project');\n\t\t\tlog('info', 'Would create/update necessary project files');\n\n\t\t\t// Show flag-specific behavior\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`${addAliases ? 'Would add shell aliases (tm, taskmaster)' : 'Would skip shell aliases'}`\n\t\t\t);\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`${initGit ? 'Would initialize Git repository' : 'Would skip Git initialization'}`\n\t\t\t);\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`${storeTasksInGit ? 'Would store tasks in Git' : 'Would exclude tasks from Git'}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\tdryRun: true\n\t\t\t};\n\t\t}\n\n\t\tcreateProjectStructure(\n\t\t\taddAliases,\n\t\t\tinitGit,\n\t\t\tstoreTasksInGit,\n\t\t\tdryRun,\n\t\t\toptions,\n\t\t\tselectedRuleProfiles\n\t\t);\n\t} else {\n\t\t// Interactive logic\n\t\tlog('info', 'Required options not provided, proceeding with prompts.');\n\n\t\ttry {\n\t\t\tconst rl = readline.createInterface({\n\t\t\t\tinput: process.stdin,\n\t\t\t\toutput: process.stdout\n\t\t\t});\n\t\t\t// Prompt for shell aliases (skip if --aliases or --no-aliases flag was provided)\n\t\t\tlet addAliasesPrompted = true; // Default to true\n\t\t\tif (options.addAliases !== undefined) {\n\t\t\t\taddAliasesPrompted = options.addAliases; // Use flag value if provided\n\t\t\t} else {\n\t\t\t\tconst addAliasesInput = await promptQuestion(\n\t\t\t\t\trl,\n\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t'Add shell aliases for task-master? This lets you type \"tm\" instead of \"task-master\" (Y/n): '\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\taddAliasesPrompted = addAliasesInput.trim().toLowerCase() !== 'n';\n\t\t\t}\n\n\t\t\t// Prompt for Git initialization (skip if --git or --no-git flag was provided)\n\t\t\tlet initGitPrompted = true; // Default to true\n\t\t\tif (options.initGit !== undefined) {\n\t\t\t\tinitGitPrompted = options.initGit; // Use flag value if provided\n\t\t\t} else {\n\t\t\t\tconst gitInitInput = await promptQuestion(\n\t\t\t\t\trl,\n\t\t\t\t\tchalk.cyan('Initialize a Git repository in project root? (Y/n): ')\n\t\t\t\t);\n\t\t\t\tinitGitPrompted = gitInitInput.trim().toLowerCase() !== 'n';\n\t\t\t}\n\n\t\t\t// Prompt for Git tasks storage (skip if --git-tasks or --no-git-tasks flag was provided)\n\t\t\tlet storeGitPrompted = true; // Default to true\n\t\t\tif (options.storeTasksInGit !== undefined) {\n\t\t\t\tstoreGitPrompted = options.storeTasksInGit; // Use flag value if provided\n\t\t\t} else {\n\t\t\t\tconst gitTasksInput = await promptQuestion(\n\t\t\t\t\trl,\n\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t'Store tasks in Git (tasks.json and tasks/ directory)? (Y/n): '\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tstoreGitPrompted = gitTasksInput.trim().toLowerCase() !== 'n';\n\t\t\t}\n\n\t\t\t// Confirm settings...\n\t\t\tconsole.log('\\nTask Master Project settings:');\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t'Add shell aliases (so you can use \"tm\" instead of \"task-master\"):'\n\t\t\t\t),\n\t\t\t\tchalk.white(addAliasesPrompted ? 'Yes' : 'No')\n\t\t\t);\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue('Initialize Git repository in project root:'),\n\t\t\t\tchalk.white(initGitPrompted ? 'Yes' : 'No')\n\t\t\t);\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue('Store tasks in Git (tasks.json and tasks/ directory):'),\n\t\t\t\tchalk.white(storeGitPrompted ? 'Yes' : 'No')\n\t\t\t);\n\n\t\t\tconst confirmInput = await promptQuestion(\n\t\t\t\trl,\n\t\t\t\tchalk.yellow('\\nDo you want to continue with these settings? (Y/n): ')\n\t\t\t);\n\t\t\tconst shouldContinue = confirmInput.trim().toLowerCase() !== 'n';\n\n\t\t\tif (!shouldContinue) {\n\t\t\t\trl.close();\n\t\t\t\tlog('info', 'Project initialization cancelled by user');\n\t\t\t\tprocess.exit(0);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Only run interactive rules if rules flag not provided via command line\n\t\t\tif (options.rulesExplicitlyProvided) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Using rule profiles provided via command line: ${selectedRuleProfiles.join(', ')}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst dryRun = options.dryRun || false;\n\n\t\t\tif (dryRun) {\n\t\t\t\tlog('info', 'DRY RUN MODE: No files will be modified');\n\t\t\t\tlog('info', 'Would initialize Task Master project');\n\t\t\t\tlog('info', 'Would create/update necessary project files');\n\n\t\t\t\t// Show flag-specific behavior\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${addAliasesPrompted ? 'Would add shell aliases (tm, taskmaster)' : 'Would skip shell aliases'}`\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${initGitPrompted ? 'Would initialize Git repository' : 'Would skip Git initialization'}`\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${storeGitPrompted ? 'Would store tasks in Git' : 'Would exclude tasks from Git'}`\n\t\t\t\t);\n\n\t\t\t\treturn {\n\t\t\t\t\tdryRun: true\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Create structure using only necessary values\n\t\t\tcreateProjectStructure(\n\t\t\t\taddAliasesPrompted,\n\t\t\t\tinitGitPrompted,\n\t\t\t\tstoreGitPrompted,\n\t\t\t\tdryRun,\n\t\t\t\toptions,\n\t\t\t\tselectedRuleProfiles\n\t\t\t);\n\t\t\trl.close();\n\t\t} catch (error) {\n\t\t\tif (rl) {\n\t\t\t\trl.close();\n\t\t\t}\n\t\t\tlog('error', `Error during initialization process: ${error.message}`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t}\n}\n\n// Helper function to promisify readline question\nfunction promptQuestion(rl, question) {\n\treturn new Promise((resolve) => {\n\t\trl.question(question, (answer) => {\n\t\t\tresolve(answer);\n\t\t});\n\t});\n}\n\n// Function to create the project structure\nfunction createProjectStructure(\n\taddAliases,\n\tinitGit,\n\tstoreTasksInGit,\n\tdryRun,\n\toptions,\n\tselectedRuleProfiles = RULE_PROFILES\n) {\n\tconst targetDir = process.cwd();\n\tlog('info', `Initializing project in ${targetDir}`);\n\n\t// Create NEW .taskmaster directory structure (using constants)\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_TASKS_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_DOCS_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_REPORTS_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_TEMPLATES_DIR));\n\n\t// Create initial state.json file for tag management\n\tcreateInitialStateFile(targetDir);\n\n\t// Copy template files with replacements\n\tconst replacements = {\n\t\tyear: new Date().getFullYear()\n\t};\n\n\t// Helper function to create rule profiles\n\tfunction _processSingleProfile(profileName) {\n\t\tconst profile = getRulesProfile(profileName);\n\t\tif (profile) {\n\t\t\tconvertAllRulesToProfileRules(targetDir, profile);\n\t\t\t// Also triggers MCP config setup (if applicable)\n\t\t} else {\n\t\t\tlog('warn', `Unknown rule profile: ${profileName}`);\n\t\t}\n\t}\n\n\t// Copy .env.example\n\tcopyTemplateFile(\n\t\t'env.example',\n\t\tpath.join(targetDir, ENV_EXAMPLE_FILE),\n\t\treplacements\n\t);\n\n\t// Copy config.json with project name to NEW location\n\tcopyTemplateFile(\n\t\t'config.json',\n\t\tpath.join(targetDir, TASKMASTER_CONFIG_FILE),\n\t\t{\n\t\t\t...replacements\n\t\t}\n\t);\n\n\t// Update config.json with correct maxTokens values from supported-models.json\n\tconst configPath = path.join(targetDir, TASKMASTER_CONFIG_FILE);\n\tif (updateConfigMaxTokens(configPath)) {\n\t\tlog('info', 'Updated config with correct maxTokens values');\n\t} else {\n\t\tlog('warn', 'Could not update maxTokens in config');\n\t}\n\n\t// Copy .gitignore with GitTasks preference\n\ttry {\n\t\tconst gitignoreTemplatePath = path.join(\n\t\t\t__dirname,\n\t\t\t'..',\n\t\t\t'assets',\n\t\t\t'gitignore'\n\t\t);\n\t\tconst templateContent = fs.readFileSync(gitignoreTemplatePath, 'utf8');\n\t\tmanageGitignoreFile(\n\t\t\tpath.join(targetDir, GITIGNORE_FILE),\n\t\t\ttemplateContent,\n\t\t\tstoreTasksInGit,\n\t\t\tlog\n\t\t);\n\t} catch (error) {\n\t\tlog('error', `Failed to create .gitignore: ${error.message}`);\n\t}\n\n\t// Copy example_prd.txt to NEW location\n\tcopyTemplateFile('example_prd.txt', path.join(targetDir, EXAMPLE_PRD_FILE));\n\n\t// Initialize git repository if git is available\n\ttry {\n\t\tif (initGit === false) {\n\t\t\tlog('info', 'Git initialization skipped due to --no-git flag.');\n\t\t} else if (initGit === true) {\n\t\t\tif (insideGitWorkTree()) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'Existing Git repository detected – skipping git init despite --git flag.'\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog('info', 'Initializing Git repository due to --git flag...');\n\t\t\t\texecSync('git init', { cwd: targetDir, stdio: 'ignore' });\n\t\t\t\tlog('success', 'Git repository initialized');\n\t\t\t}\n\t\t} else {\n\t\t\t// Default behavior when no flag is provided (from interactive prompt)\n\t\t\tif (insideGitWorkTree()) {\n\t\t\t\tlog('info', 'Existing Git repository detected – skipping git init.');\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'No Git repository detected. Initializing one in project root...'\n\t\t\t\t);\n\t\t\t\texecSync('git init', { cwd: targetDir, stdio: 'ignore' });\n\t\t\t\tlog('success', 'Git repository initialized');\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlog('warn', 'Git not available, skipping repository initialization');\n\t}\n\n\t// Only run the manual transformer if rules were provided via flags.\n\t// The interactive `rules --setup` wizard handles its own installation.\n\tif (options.rulesExplicitlyProvided || options.yes) {\n\t\tlog('info', 'Generating profile rules from command-line flags...');\n\t\tfor (const profileName of selectedRuleProfiles) {\n\t\t\t_processSingleProfile(profileName);\n\t\t}\n\t}\n\n\t// Add shell aliases if requested\n\tif (addAliases) {\n\t\taddShellAliases();\n\t}\n\n\t// Run npm install automatically\n\tconst npmInstallOptions = {\n\t\tcwd: targetDir,\n\t\t// Default to inherit for interactive CLI, change if silent\n\t\tstdio: 'inherit'\n\t};\n\n\tif (isSilentMode()) {\n\t\t// If silent (MCP mode), suppress npm install output\n\t\tnpmInstallOptions.stdio = 'ignore';\n\t\tlog('info', 'Running npm install silently...'); // Log our own message\n\t} else {\n\t\t// Interactive mode, show the boxen message\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Installing dependencies...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: 0.5,\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t}\n\n\t// === Add Rule Profiles Setup Step ===\n\tif (\n\t\t!isSilentMode() &&\n\t\t!dryRun &&\n\t\t!options?.yes &&\n\t\t!options.rulesExplicitlyProvided\n\t) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Configuring Rule Profiles...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: { top: 1, bottom: 0.5 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Running interactive rules setup. Please select which rule profiles to include.'\n\t\t);\n\t\ttry {\n\t\t\t// Correct command confirmed by you.\n\t\t\texecSync('npx task-master rules --setup', {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: targetDir\n\t\t\t});\n\t\t\tlog('success', 'Rule profiles configured.');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to configure rule profiles:', error.message);\n\t\t\tlog('warn', 'You may need to run \"task-master rules --setup\" manually.');\n\t\t}\n\t} else if (isSilentMode() || dryRun || options?.yes) {\n\t\t// This branch can log why setup was skipped, similar to the model setup logic.\n\t\tif (options.rulesExplicitlyProvided) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'Skipping interactive rules setup because --rules flag was used.'\n\t\t\t);\n\t\t} else {\n\t\t\tlog('info', 'Skipping interactive rules setup in non-interactive mode.');\n\t\t}\n\t}\n\t// =====================================\n\n\t// === Add Response Language Step ===\n\tif (!isSilentMode() && !dryRun && !options?.yes) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Configuring Response Language...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: { top: 1, bottom: 0.5 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Running interactive response language setup. Please input your preferred language.'\n\t\t);\n\t\ttry {\n\t\t\texecSync('npx task-master lang --setup', {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: targetDir\n\t\t\t});\n\t\t\tlog('success', 'Response Language configured.');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to configure response language:', error.message);\n\t\t\tlog('warn', 'You may need to run \"task-master lang --setup\" manually.');\n\t\t}\n\t} else if (isSilentMode() && !dryRun) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Skipping interactive response language setup in silent (MCP) mode.'\n\t\t);\n\t\tlog(\n\t\t\t'warn',\n\t\t\t'Please configure response language using \"task-master models --set-response-language\" or the \"models\" MCP tool.'\n\t\t);\n\t} else if (dryRun) {\n\t\tlog('info', 'DRY RUN: Skipping interactive response language setup.');\n\t}\n\t// =====================================\n\n\t// === Add Model Configuration Step ===\n\tif (!isSilentMode() && !dryRun && !options?.yes) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Configuring AI Models...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: { top: 1, bottom: 0.5 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Running interactive model setup. Please select your preferred AI models.'\n\t\t);\n\t\ttry {\n\t\t\texecSync('npx task-master models --setup', {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: targetDir\n\t\t\t});\n\t\t\tlog('success', 'AI Models configured.');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to configure AI models:', error.message);\n\t\t\tlog('warn', 'You may need to run \"task-master models --setup\" manually.');\n\t\t}\n\t} else if (isSilentMode() && !dryRun) {\n\t\tlog('info', 'Skipping interactive model setup in silent (MCP) mode.');\n\t\tlog(\n\t\t\t'warn',\n\t\t\t'Please configure AI models using \"task-master models --set-...\" or the \"models\" MCP tool.'\n\t\t);\n\t} else if (dryRun) {\n\t\tlog('info', 'DRY RUN: Skipping interactive model setup.');\n\t} else if (options?.yes) {\n\t\tlog('info', 'Skipping interactive model setup due to --yes flag.');\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Default AI models will be used. You can configure different models later using \"task-master models --setup\" or \"task-master models --set-...\" commands.'\n\t\t);\n\t}\n\t// ====================================\n\n\t// Add shell aliases if requested\n\tif (addAliases && !dryRun) {\n\t\tlog('info', 'Adding shell aliases...');\n\t\tconst aliasResult = addShellAliases();\n\t\tif (aliasResult) {\n\t\t\tlog('success', 'Shell aliases added successfully');\n\t\t}\n\t} else if (addAliases && dryRun) {\n\t\tlog('info', 'DRY RUN: Would add shell aliases (tm, taskmaster)');\n\t}\n\n\t// Display success message\n\tif (!isSilentMode()) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\t`${warmGradient.multiline(\n\t\t\t\t\tfiglet.textSync('Success!', { font: 'Standard' })\n\t\t\t\t)}\\n${chalk.green('Project initialized successfully!')}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tmargin: 1,\n\t\t\t\t\tborderStyle: 'double',\n\t\t\t\t\tborderColor: 'green'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Display next steps in a nice box\n\tif (!isSilentMode()) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\t`${chalk.cyan.bold('Things you should do next:')}\\n\\n${chalk.white('1. ')}${chalk.yellow(\n\t\t\t\t\t'Configure AI models (if needed) and add API keys to `.env`'\n\t\t\t\t)}\\n${chalk.white(' ├─ ')}${chalk.dim('Models: Use `task-master models` commands')}\\n${chalk.white(' └─ ')}${chalk.dim(\n\t\t\t\t\t'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)'\n\t\t\t\t)}\\n${chalk.white('2. ')}${chalk.yellow(\n\t\t\t\t\t'Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt'\n\t\t\t\t)}\\n${chalk.white('3. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:'\n\t\t\t\t)}\\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('parse_prd')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master parse-prd scripts/prd.txt')}\\n${chalk.white('4. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to analyze the complexity of the tasks in your PRD using research'\n\t\t\t\t)}\\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('analyze_project_complexity')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master analyze-complexity')}\\n${chalk.white('5. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to expand all of your tasks using the complexity analysis'\n\t\t\t\t)}\\n${chalk.white('6. ')}${chalk.yellow('Ask Cursor to begin working on the next task')}\\n${chalk.white('7. ')}${chalk.yellow(\n\t\t\t\t\t'Add new tasks anytime using the add-task command or MCP tool'\n\t\t\t\t)}\\n${chalk.white('8. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to set the status of one or many tasks/subtasks at a time. Use the task id from the task lists.'\n\t\t\t\t)}\\n${chalk.white('9. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to update all tasks from a specific task id based on new learnings or pivots in your project.'\n\t\t\t\t)}\\n${chalk.white('10. ')}${chalk.green.bold('Ship it!')}\\n\\n${chalk.dim(\n\t\t\t\t\t'* Review the README.md file to learn how to use other commands via Cursor Agent.'\n\t\t\t\t)}\\n${chalk.dim(\n\t\t\t\t\t'* Use the task-master command without arguments to see all available commands.'\n\t\t\t\t)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tmargin: 1,\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\ttitle: 'Getting Started',\n\t\t\t\t\ttitleAlignment: 'center'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n}\n\n// Ensure necessary functions are exported\nexport { initializeProject, log };\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/expand-task.js", "/**\n * expand-task.js\n * Direct function implementation for expanding a task into subtasks\n */\n\nimport expandTask from '../../../../scripts/modules/task-manager/expand-task.js';\nimport {\n\treadJSON,\n\twriteJSON,\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport path from 'path';\nimport fs from 'fs';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for expanding a task into subtasks with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - The ID of the task to expand.\n * @param {number|string} [args.num] - Number of subtasks to generate.\n * @param {boolean} [args.research] - Enable research role for subtask generation.\n * @param {string} [args.prompt] - Additional context to guide subtask generation.\n * @param {boolean} [args.force] - Force expansion even if subtasks exist.\n * @param {string} [args.projectRoot] - Project root directory.\n * @param {string} [args.tag] - Tag for the task\n * @param {Object} log - Logger object\n * @param {Object} context - Context object containing session\n * @param {Object} [context.session] - MCP Session object\n * @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function expandTaskDirect(args, log, context = {}) {\n\tconst { session } = context; // Extract session\n\t// Destructure expected args, including projectRoot\n\tconst {\n\t\ttasksJsonPath,\n\t\tid,\n\t\tnum,\n\t\tresearch,\n\t\tprompt,\n\t\tforce,\n\t\tprojectRoot,\n\t\ttag,\n\t\tcomplexityReportPath\n\t} = args;\n\n\t// Log session root data for debugging\n\tlog.info(\n\t\t`Session data in expandTaskDirect: ${JSON.stringify({\n\t\t\thasSession: !!session,\n\t\t\tsessionKeys: session ? Object.keys(session) : [],\n\t\t\troots: session?.roots,\n\t\t\trootsStr: JSON.stringify(session?.roots)\n\t\t})}`\n\t);\n\n\t// Check if tasksJsonPath was provided\n\tif (!tasksJsonPath) {\n\t\tlog.error('expandTaskDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Use provided path\n\tconst tasksPath = tasksJsonPath;\n\n\tlog.info(`[expandTaskDirect] Using tasksPath: ${tasksPath}`);\n\n\t// Validate task ID\n\tconst taskId = id ? parseInt(id, 10) : null;\n\tif (!taskId) {\n\t\tlog.error('Task ID is required');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\tmessage: 'Task ID is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Process other parameters\n\tconst numSubtasks = num ? parseInt(num, 10) : undefined;\n\tconst useResearch = research === true;\n\tconst additionalContext = prompt || '';\n\tconst forceFlag = force === true;\n\n\ttry {\n\t\tlog.info(\n\t\t\t`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}, Force: ${forceFlag}`\n\t\t);\n\n\t\t// Read tasks data\n\t\tlog.info(`[expandTaskDirect] Attempting to read JSON from: ${tasksPath}`);\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tlog.info(\n\t\t\t`[expandTaskDirect] Result of readJSON: ${data ? 'Data read successfully' : 'readJSON returned null or undefined'}`\n\t\t);\n\n\t\tif (!data || !data.tasks) {\n\t\t\tlog.error(\n\t\t\t\t`[expandTaskDirect] readJSON failed or returned invalid data for path: ${tasksPath}`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASKS_FILE',\n\t\t\t\t\tmessage: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Find the specific task\n\t\tlog.info(`[expandTaskDirect] Searching for task ID ${taskId} in data`);\n\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\t\tlog.info(`[expandTaskDirect] Task found: ${task ? 'Yes' : 'No'}`);\n\n\t\tif (!task) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'TASK_NOT_FOUND',\n\t\t\t\t\tmessage: `Task with ID ${taskId} not found`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check if task is completed\n\t\tif (task.status === 'done' || task.status === 'completed') {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'TASK_COMPLETED',\n\t\t\t\t\tmessage: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check for existing subtasks and force flag\n\t\tconst hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;\n\t\tif (hasExistingSubtasks && !forceFlag) {\n\t\t\tlog.info(\n\t\t\t\t`Task ${taskId} already has ${task.subtasks.length} subtasks. Use --force to overwrite.`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Task ${taskId} already has subtasks. Expansion skipped.`,\n\t\t\t\t\ttask,\n\t\t\t\t\tsubtasksAdded: 0,\n\t\t\t\t\thasExistingSubtasks\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// If force flag is set, clear existing subtasks\n\t\tif (hasExistingSubtasks && forceFlag) {\n\t\t\tlog.info(\n\t\t\t\t`Force flag set. Clearing existing subtasks for task ${taskId}.`\n\t\t\t);\n\t\t\ttask.subtasks = [];\n\t\t}\n\n\t\t// Keep a copy of the task before modification\n\t\tconst originalTask = JSON.parse(JSON.stringify(task));\n\n\t\t// Tracking subtasks count before expansion\n\t\tconst subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;\n\n\t\t// Directly modify the data instead of calling the CLI function\n\t\tif (!task.subtasks) {\n\t\t\ttask.subtasks = [];\n\t\t}\n\n\t\t// Save tasks.json with potentially empty subtasks array and proper context\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Create logger wrapper using the utility\n\t\tconst mcpLog = createLogWrapper(log);\n\n\t\tlet wasSilent; // Declare wasSilent outside the try block\n\t\t// Process the request\n\t\ttry {\n\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\twasSilent = isSilentMode(); // Assign inside the try block\n\t\t\tif (!wasSilent) enableSilentMode();\n\n\t\t\t// Call the core expandTask function with the wrapped logger and projectRoot\n\t\t\tconst coreResult = await expandTask(\n\t\t\t\ttasksPath,\n\t\t\t\ttaskId,\n\t\t\t\tnumSubtasks,\n\t\t\t\tuseResearch,\n\t\t\t\tadditionalContext,\n\t\t\t\t{\n\t\t\t\t\tcomplexityReportPath,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tsession,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcommandName: 'expand-task',\n\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\ttag\n\t\t\t\t},\n\t\t\t\tforceFlag\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tif (!wasSilent && isSilentMode()) disableSilentMode();\n\n\t\t\t// Read the updated data\n\t\t\tconst updatedData = readJSON(tasksPath, projectRoot);\n\t\t\tconst updatedTask = updatedData.tasks.find((t) => t.id === taskId);\n\n\t\t\t// Calculate how many subtasks were added\n\t\t\tconst subtasksAdded = updatedTask.subtasks\n\t\t\t\t? updatedTask.subtasks.length - subtasksCountBefore\n\t\t\t\t: 0;\n\n\t\t\t// Return the result, including telemetryData\n\t\t\tlog.info(\n\t\t\t\t`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\ttask: coreResult.task,\n\t\t\t\t\tsubtasksAdded,\n\t\t\t\t\thasExistingSubtasks,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tif (!wasSilent && isSilentMode()) disableSilentMode();\n\n\t\t\tlog.error(`Error expanding task: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage: error.message || 'Failed to expand task'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlog.error(`Error expanding task: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message || 'Failed to expand task'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/utils/contextGatherer.js", "/**\n * contextGatherer.js\n * Comprehensive context gathering utility for Task Master AI operations\n * Supports task context, file context, project tree, and custom context\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport pkg from 'gpt-tokens';\nimport Fuse from 'fuse.js';\nimport {\n\treadJSON,\n\tfindTaskById,\n\ttruncate,\n\tflattenTasksWithSubtasks\n} from '../utils.js';\n\nconst { encode } = pkg;\n\n/**\n * Context Gatherer class for collecting and formatting context from various sources\n */\nexport class ContextGatherer {\n\tconstructor(projectRoot, tag) {\n\t\tthis.projectRoot = projectRoot;\n\t\tthis.tasksPath = path.join(\n\t\t\tprojectRoot,\n\t\t\t'.taskmaster',\n\t\t\t'tasks',\n\t\t\t'tasks.json'\n\t\t);\n\t\tthis.tag = tag;\n\t\tthis.allTasks = this._loadAllTasks();\n\t}\n\n\t_loadAllTasks() {\n\t\ttry {\n\t\t\tconst data = readJSON(this.tasksPath, this.projectRoot, this.tag);\n\t\t\tconst tasks = data?.tasks || [];\n\t\t\treturn tasks;\n\t\t} catch (error) {\n\t\t\tconsole.warn(\n\t\t\t\t`Warning: Could not load tasks for ContextGatherer: ${error.message}`\n\t\t\t);\n\t\t\treturn [];\n\t\t}\n\t}\n\n\t/**\n\t * Count tokens in a text string using gpt-tokens\n\t * @param {string} text - Text to count tokens for\n\t * @returns {number} Token count\n\t */\n\tcountTokens(text) {\n\t\tif (!text || typeof text !== 'string') {\n\t\t\treturn 0;\n\t\t}\n\t\ttry {\n\t\t\treturn encode(text).length;\n\t\t} catch (error) {\n\t\t\t// Fallback to rough character-based estimation if tokenizer fails\n\t\t\t// Rough estimate: ~4 characters per token for English text\n\t\t\treturn Math.ceil(text.length / 4);\n\t\t}\n\t}\n\n\t/**\n\t * Main method to gather context from multiple sources\n\t * @param {Object} options - Context gathering options\n\t * @param {Array<string>} [options.tasks] - Task/subtask IDs to include\n\t * @param {Array<string>} [options.files] - File paths to include\n\t * @param {string} [options.customContext] - Additional custom context\n\t * @param {boolean} [options.includeProjectTree] - Include project file tree\n\t * @param {string} [options.format] - Output format: 'research', 'chat', 'system-prompt'\n\t * @param {boolean} [options.includeTokenCounts] - Whether to include token breakdown\n\t * @param {string} [options.semanticQuery] - A query string for semantic task searching.\n\t * @param {number} [options.maxSemanticResults] - Max number of semantic results.\n\t * @param {Array<number>} [options.dependencyTasks] - Array of task IDs to build dependency graphs from.\n\t * @returns {Promise<Object>} Object with context string and analysis data\n\t */\n\tasync gather(options = {}) {\n\t\tconst {\n\t\t\ttasks = [],\n\t\t\tfiles = [],\n\t\t\tcustomContext = '',\n\t\t\tincludeProjectTree = false,\n\t\t\tformat = 'research',\n\t\t\tincludeTokenCounts = false,\n\t\t\tsemanticQuery,\n\t\t\tmaxSemanticResults = 10,\n\t\t\tdependencyTasks = []\n\t\t} = options;\n\n\t\tconst contextSections = [];\n\t\tconst finalTaskIds = new Set(tasks.map(String));\n\t\tlet analysisData = null;\n\t\tlet tokenBreakdown = null;\n\n\t\t// Initialize token breakdown if requested\n\t\tif (includeTokenCounts) {\n\t\t\ttokenBreakdown = {\n\t\t\t\ttotal: 0,\n\t\t\t\tcustomContext: null,\n\t\t\t\ttasks: [],\n\t\t\t\tfiles: [],\n\t\t\t\tprojectTree: null\n\t\t\t};\n\t\t}\n\n\t\t// Semantic Search\n\t\tif (semanticQuery && this.allTasks.length > 0) {\n\t\t\tconst semanticResults = this._performSemanticSearch(\n\t\t\t\tsemanticQuery,\n\t\t\t\tmaxSemanticResults\n\t\t\t);\n\n\t\t\t// Store the analysis data for UI display\n\t\t\tanalysisData = semanticResults.analysisData;\n\n\t\t\tsemanticResults.tasks.forEach((task) => {\n\t\t\t\tfinalTaskIds.add(String(task.id));\n\t\t\t});\n\t\t}\n\n\t\t// Dependency Graph Analysis\n\t\tif (dependencyTasks.length > 0) {\n\t\t\tconst dependencyResults = this._buildDependencyGraphs(dependencyTasks);\n\t\t\tdependencyResults.allRelatedTaskIds.forEach((id) =>\n\t\t\t\tfinalTaskIds.add(String(id))\n\t\t\t);\n\t\t\t// We can format and add dependencyResults.graphVisualization later if needed\n\t\t}\n\n\t\t// Add custom context first\n\t\tif (customContext && customContext.trim()) {\n\t\t\tconst formattedCustomContext = this._formatCustomContext(\n\t\t\t\tcustomContext,\n\t\t\t\tformat\n\t\t\t);\n\t\t\tcontextSections.push(formattedCustomContext);\n\n\t\t\t// Calculate tokens for custom context if requested\n\t\t\tif (includeTokenCounts) {\n\t\t\t\ttokenBreakdown.customContext = {\n\t\t\t\t\ttokens: this.countTokens(formattedCustomContext),\n\t\t\t\t\tcharacters: formattedCustomContext.length\n\t\t\t\t};\n\t\t\t\ttokenBreakdown.total += tokenBreakdown.customContext.tokens;\n\t\t\t}\n\t\t}\n\n\t\t// Gather context for the final list of tasks\n\t\tif (finalTaskIds.size > 0) {\n\t\t\tconst taskContextResult = await this._gatherTaskContext(\n\t\t\t\tArray.from(finalTaskIds),\n\t\t\t\tformat,\n\t\t\t\tincludeTokenCounts\n\t\t\t);\n\t\t\tif (taskContextResult.context) {\n\t\t\t\tcontextSections.push(taskContextResult.context);\n\n\t\t\t\t// Add task breakdown if token counting is enabled\n\t\t\t\tif (includeTokenCounts && taskContextResult.breakdown) {\n\t\t\t\t\ttokenBreakdown.tasks = taskContextResult.breakdown;\n\t\t\t\t\tconst taskTokens = taskContextResult.breakdown.reduce(\n\t\t\t\t\t\t(sum, task) => sum + task.tokens,\n\t\t\t\t\t\t0\n\t\t\t\t\t);\n\t\t\t\t\ttokenBreakdown.total += taskTokens;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Add file context\n\t\tif (files.length > 0) {\n\t\t\tconst fileContextResult = await this._gatherFileContext(\n\t\t\t\tfiles,\n\t\t\t\tformat,\n\t\t\t\tincludeTokenCounts\n\t\t\t);\n\t\t\tif (fileContextResult.context) {\n\t\t\t\tcontextSections.push(fileContextResult.context);\n\n\t\t\t\t// Add file breakdown if token counting is enabled\n\t\t\t\tif (includeTokenCounts && fileContextResult.breakdown) {\n\t\t\t\t\ttokenBreakdown.files = fileContextResult.breakdown;\n\t\t\t\t\tconst fileTokens = fileContextResult.breakdown.reduce(\n\t\t\t\t\t\t(sum, file) => sum + file.tokens,\n\t\t\t\t\t\t0\n\t\t\t\t\t);\n\t\t\t\t\ttokenBreakdown.total += fileTokens;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Add project tree context\n\t\tif (includeProjectTree) {\n\t\t\tconst treeContextResult = await this._gatherProjectTreeContext(\n\t\t\t\tformat,\n\t\t\t\tincludeTokenCounts\n\t\t\t);\n\t\t\tif (treeContextResult.context) {\n\t\t\t\tcontextSections.push(treeContextResult.context);\n\n\t\t\t\t// Add tree breakdown if token counting is enabled\n\t\t\t\tif (includeTokenCounts && treeContextResult.breakdown) {\n\t\t\t\t\ttokenBreakdown.projectTree = treeContextResult.breakdown;\n\t\t\t\t\ttokenBreakdown.total += treeContextResult.breakdown.tokens;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tconst finalContext = this._joinContextSections(contextSections, format);\n\n\t\tconst result = {\n\t\t\tcontext: finalContext,\n\t\t\tanalysisData: analysisData,\n\t\t\tcontextSections: contextSections.length,\n\t\t\tfinalTaskIds: Array.from(finalTaskIds)\n\t\t};\n\n\t\t// Only include tokenBreakdown if it was requested\n\t\tif (includeTokenCounts) {\n\t\t\tresult.tokenBreakdown = tokenBreakdown;\n\t\t}\n\n\t\treturn result;\n\t}\n\n\t_performSemanticSearch(query, maxResults) {\n\t\tconst searchableTasks = this.allTasks.map((task) => {\n\t\t\tconst dependencyTitles =\n\t\t\t\ttask.dependencies?.length > 0\n\t\t\t\t\t? task.dependencies\n\t\t\t\t\t\t\t.map((depId) => this.allTasks.find((t) => t.id === depId)?.title)\n\t\t\t\t\t\t\t.filter(Boolean)\n\t\t\t\t\t\t\t.join(' ')\n\t\t\t\t\t: '';\n\t\t\treturn { ...task, dependencyTitles };\n\t\t});\n\n\t\t// Use the exact same approach as add-task.js\n\t\tconst searchOptions = {\n\t\t\tincludeScore: true, // Return match scores\n\t\t\tthreshold: 0.4, // Lower threshold = stricter matching (range 0-1)\n\t\t\tkeys: [\n\t\t\t\t{ name: 'title', weight: 1.5 }, // Title is most important\n\t\t\t\t{ name: 'description', weight: 2 }, // Description is very important\n\t\t\t\t{ name: 'details', weight: 3 }, // Details is most important\n\t\t\t\t// Search dependencies to find tasks that depend on similar things\n\t\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t\t],\n\t\t\t// Sort matches by score (lower is better)\n\t\t\tshouldSort: true,\n\t\t\t// Allow searching in nested properties\n\t\t\tuseExtendedSearch: true,\n\t\t\t// Return up to 50 matches\n\t\t\tlimit: 50\n\t\t};\n\n\t\t// Create search index using Fuse.js\n\t\tconst fuse = new Fuse(searchableTasks, searchOptions);\n\n\t\t// Extract significant words and phrases from the prompt (like add-task.js does)\n\t\tconst promptWords = query\n\t\t\t.toLowerCase()\n\t\t\t.replace(/[^\\w\\s-]/g, ' ') // Replace non-alphanumeric chars with spaces\n\t\t\t.split(/\\s+/)\n\t\t\t.filter((word) => word.length > 3); // Words at least 4 chars\n\n\t\t// Use the user's prompt for fuzzy search\n\t\tconst fuzzyResults = fuse.search(query);\n\n\t\t// Also search for each significant word to catch different aspects\n\t\tconst wordResults = [];\n\t\tfor (const word of promptWords) {\n\t\t\tif (word.length > 5) {\n\t\t\t\t// Only use significant words\n\t\t\t\tconst results = fuse.search(word);\n\t\t\t\tif (results.length > 0) {\n\t\t\t\t\twordResults.push(...results);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Merge and deduplicate results\n\t\tconst mergedResults = [...fuzzyResults];\n\n\t\t// Add word results that aren't already in fuzzyResults\n\t\tfor (const wordResult of wordResults) {\n\t\t\tif (!mergedResults.some((r) => r.item.id === wordResult.item.id)) {\n\t\t\t\tmergedResults.push(wordResult);\n\t\t\t}\n\t\t}\n\n\t\t// Group search results by relevance\n\t\tconst highRelevance = mergedResults\n\t\t\t.filter((result) => result.score < 0.25)\n\t\t\t.map((result) => result.item);\n\n\t\tconst mediumRelevance = mergedResults\n\t\t\t.filter((result) => result.score >= 0.25 && result.score < 0.4)\n\t\t\t.map((result) => result.item);\n\n\t\t// Get recent tasks (newest first)\n\t\tconst recentTasks = [...this.allTasks]\n\t\t\t.sort((a, b) => b.id - a.id)\n\t\t\t.slice(0, 5);\n\n\t\t// Combine high relevance, medium relevance, and recent tasks\n\t\t// Prioritize high relevance first\n\t\tconst allRelevantTasks = [...highRelevance];\n\n\t\t// Add medium relevance if not already included\n\t\tfor (const task of mediumRelevance) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add recent tasks if not already included\n\t\tfor (const task of recentTasks) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Get top N results for context\n\t\tconst finalResults = allRelevantTasks.slice(0, maxResults);\n\t\treturn {\n\t\t\ttasks: finalResults,\n\t\t\tanalysisData: {\n\t\t\t\thighRelevance: highRelevance,\n\t\t\t\tmediumRelevance: mediumRelevance,\n\t\t\t\trecentTasks: recentTasks,\n\t\t\t\tallRelevantTasks: allRelevantTasks\n\t\t\t}\n\t\t};\n\t}\n\n\t_buildDependencyContext(taskIds) {\n\t\tconst { allRelatedTaskIds, graphs, depthMap } =\n\t\t\tthis._buildDependencyGraphs(taskIds);\n\t\tif (allRelatedTaskIds.size === 0) return '';\n\n\t\tconst dependentTasks = Array.from(allRelatedTaskIds)\n\t\t\t.map((id) => this.allTasks.find((t) => t.id === id))\n\t\t\t.filter(Boolean)\n\t\t\t.sort((a, b) => (depthMap.get(a.id) || 0) - (depthMap.get(b.id) || 0));\n\n\t\tconst uniqueDetailedTasks = dependentTasks.slice(0, 8);\n\n\t\tlet context = `\\nThis task relates to a dependency structure with ${dependentTasks.length} related tasks in the chain.`;\n\n\t\tconst directDeps = this.allTasks.filter((t) => taskIds.includes(t.id));\n\t\tif (directDeps.length > 0) {\n\t\t\tcontext += `\\n\\nDirect dependencies:\\n${directDeps\n\t\t\t\t.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)\n\t\t\t\t.join('\\n')}`;\n\t\t}\n\n\t\tconst indirectDeps = dependentTasks.filter((t) => !taskIds.includes(t.id));\n\t\tif (indirectDeps.length > 0) {\n\t\t\tcontext += `\\n\\nIndirect dependencies (dependencies of dependencies):\\n${indirectDeps\n\t\t\t\t.slice(0, 5)\n\t\t\t\t.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)\n\t\t\t\t.join('\\n')}`;\n\t\t\tif (indirectDeps.length > 5)\n\t\t\t\tcontext += `\\n- ... and ${\n\t\t\t\t\tindirectDeps.length - 5\n\t\t\t\t} more indirect dependencies`;\n\t\t}\n\n\t\tcontext += `\\n\\nDetailed information about dependencies:`;\n\t\tfor (const depTask of uniqueDetailedTasks) {\n\t\t\tconst isDirect = taskIds.includes(depTask.id)\n\t\t\t\t? ' [DIRECT DEPENDENCY]'\n\t\t\t\t: '';\n\t\t\tcontext += `\\n\\n------ Task ${depTask.id}${isDirect}: ${depTask.title} ------\\n`;\n\t\t\tcontext += `Description: ${depTask.description}\\n`;\n\t\t\tif (depTask.dependencies?.length) {\n\t\t\t\tcontext += `Dependencies: ${depTask.dependencies.join(', ')}\\n`;\n\t\t\t}\n\t\t\tif (depTask.details) {\n\t\t\t\tcontext += `Implementation Details: ${truncate(\n\t\t\t\t\tdepTask.details,\n\t\t\t\t\t400\n\t\t\t\t)}\\n`;\n\t\t\t}\n\t\t}\n\n\t\tif (graphs.length > 0) {\n\t\t\tcontext += '\\n\\nDependency Chain Visualization:';\n\t\t\tcontext += graphs\n\t\t\t\t.map((graph) => this._formatDependencyChain(graph))\n\t\t\t\t.join('');\n\t\t}\n\n\t\treturn context;\n\t}\n\n\t_buildDependencyGraphs(taskIds) {\n\t\tconst visited = new Set();\n\t\tconst depthMap = new Map();\n\t\tconst graphs = [];\n\n\t\tfor (const id of taskIds) {\n\t\t\tconst graph = this._buildDependencyGraph(id, visited, depthMap);\n\t\t\tif (graph) graphs.push(graph);\n\t\t}\n\n\t\treturn { allRelatedTaskIds: visited, graphs, depthMap };\n\t}\n\n\t_buildDependencyGraph(taskId, visited, depthMap, depth = 0) {\n\t\tif (visited.has(taskId) || depth > 5) return null; // Limit recursion depth\n\t\tconst task = this.allTasks.find((t) => t.id === taskId);\n\t\tif (!task) return null;\n\n\t\tvisited.add(taskId);\n\t\tif (!depthMap.has(taskId) || depth < depthMap.get(taskId)) {\n\t\t\tdepthMap.set(taskId, depth);\n\t\t}\n\n\t\tconst dependencies =\n\t\t\ttask.dependencies\n\t\t\t\t?.map((depId) =>\n\t\t\t\t\tthis._buildDependencyGraph(depId, visited, depthMap, depth + 1)\n\t\t\t\t)\n\t\t\t\t.filter(Boolean) || [];\n\n\t\treturn { ...task, dependencies };\n\t}\n\n\t_formatDependencyChain(node, prefix = '', isLast = true, depth = 0) {\n\t\tif (depth > 3) return '';\n\t\tconst connector = isLast ? '└── ' : '├── ';\n\t\tlet result = `${prefix}${connector}Task ${node.id}: ${node.title}`;\n\t\tif (node.dependencies?.length) {\n\t\t\tconst childPrefix = prefix + (isLast ? ' ' : '│ ');\n\t\t\tresult += node.dependencies\n\t\t\t\t.map((dep, index) =>\n\t\t\t\t\tthis._formatDependencyChain(\n\t\t\t\t\t\tdep,\n\t\t\t\t\t\tchildPrefix,\n\t\t\t\t\t\tindex === node.dependencies.length - 1,\n\t\t\t\t\t\tdepth + 1\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\t.join('');\n\t\t}\n\t\treturn '\\n' + result;\n\t}\n\n\t/**\n\t * Parse task ID strings into structured format\n\t * Supports formats: \"15\", \"15.2\", \"16,17.1\"\n\t * @param {Array<string>} taskIds - Array of task ID strings\n\t * @returns {Array<Object>} Parsed task identifiers\n\t */\n\t_parseTaskIds(taskIds) {\n\t\tconst parsed = [];\n\n\t\tfor (const idStr of taskIds) {\n\t\t\tif (idStr.includes('.')) {\n\t\t\t\t// Subtask format: \"15.2\"\n\t\t\t\tconst [parentId, subtaskId] = idStr.split('.');\n\t\t\t\tparsed.push({\n\t\t\t\t\ttype: 'subtask',\n\t\t\t\t\tparentId: parseInt(parentId, 10),\n\t\t\t\t\tsubtaskId: parseInt(subtaskId, 10),\n\t\t\t\t\tfullId: idStr\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\t// Task format: \"15\"\n\t\t\t\tparsed.push({\n\t\t\t\t\ttype: 'task',\n\t\t\t\t\ttaskId: parseInt(idStr, 10),\n\t\t\t\t\tfullId: idStr\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\treturn parsed;\n\t}\n\n\t/**\n\t * Gather context from tasks and subtasks\n\t * @param {Array<string>} taskIds - Task/subtask IDs\n\t * @param {string} format - Output format\n\t * @param {boolean} includeTokenCounts - Whether to include token breakdown\n\t * @returns {Promise<Object>} Task context result with breakdown\n\t */\n\tasync _gatherTaskContext(taskIds, format, includeTokenCounts = false) {\n\t\ttry {\n\t\t\tif (!this.allTasks || this.allTasks.length === 0) {\n\t\t\t\treturn { context: null, breakdown: [] };\n\t\t\t}\n\n\t\t\tconst parsedIds = this._parseTaskIds(taskIds);\n\t\t\tconst contextItems = [];\n\t\t\tconst breakdown = [];\n\n\t\t\tfor (const parsed of parsedIds) {\n\t\t\t\tlet formattedItem = null;\n\t\t\t\tlet itemInfo = null;\n\n\t\t\t\tif (parsed.type === 'task') {\n\t\t\t\t\tconst result = findTaskById(this.allTasks, parsed.taskId);\n\t\t\t\t\tif (result.task) {\n\t\t\t\t\t\tformattedItem = this._formatTaskForContext(result.task, format);\n\t\t\t\t\t\titemInfo = {\n\t\t\t\t\t\t\tid: parsed.fullId,\n\t\t\t\t\t\t\ttype: 'task',\n\t\t\t\t\t\t\ttitle: result.task.title,\n\t\t\t\t\t\t\ttokens: includeTokenCounts ? this.countTokens(formattedItem) : 0,\n\t\t\t\t\t\t\tcharacters: formattedItem.length\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t} else if (parsed.type === 'subtask') {\n\t\t\t\t\tconst parentResult = findTaskById(this.allTasks, parsed.parentId);\n\t\t\t\t\tif (parentResult.task && parentResult.task.subtasks) {\n\t\t\t\t\t\tconst subtask = parentResult.task.subtasks.find(\n\t\t\t\t\t\t\t(st) => st.id === parsed.subtaskId\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (subtask) {\n\t\t\t\t\t\t\tformattedItem = this._formatSubtaskForContext(\n\t\t\t\t\t\t\t\tsubtask,\n\t\t\t\t\t\t\t\tparentResult.task,\n\t\t\t\t\t\t\t\tformat\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\titemInfo = {\n\t\t\t\t\t\t\t\tid: parsed.fullId,\n\t\t\t\t\t\t\t\ttype: 'subtask',\n\t\t\t\t\t\t\t\ttitle: subtask.title,\n\t\t\t\t\t\t\t\tparentTitle: parentResult.task.title,\n\t\t\t\t\t\t\t\ttokens: includeTokenCounts\n\t\t\t\t\t\t\t\t\t? this.countTokens(formattedItem)\n\t\t\t\t\t\t\t\t\t: 0,\n\t\t\t\t\t\t\t\tcharacters: formattedItem.length\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (formattedItem && itemInfo) {\n\t\t\t\t\tcontextItems.push(formattedItem);\n\t\t\t\t\tif (includeTokenCounts) {\n\t\t\t\t\t\tbreakdown.push(itemInfo);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (contextItems.length === 0) {\n\t\t\t\treturn { context: null, breakdown: [] };\n\t\t\t}\n\n\t\t\tconst finalContext = this._formatTaskContextSection(contextItems, format);\n\t\t\treturn {\n\t\t\t\tcontext: finalContext,\n\t\t\t\tbreakdown: includeTokenCounts ? breakdown : []\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tconsole.warn(`Warning: Could not gather task context: ${error.message}`);\n\t\t\treturn { context: null, breakdown: [] };\n\t\t}\n\t}\n\n\t/**\n\t * Format a task for context inclusion\n\t * @param {Object} task - Task object\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted task context\n\t */\n\t_formatTaskForContext(task, format) {\n\t\tconst sections = [];\n\n\t\tsections.push(`**Task ${task.id}: ${task.title}**`);\n\t\tsections.push(`Description: ${task.description}`);\n\t\tsections.push(`Status: ${task.status || 'pending'}`);\n\t\tsections.push(`Priority: ${task.priority || 'medium'}`);\n\n\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\tsections.push(`Dependencies: ${task.dependencies.join(', ')}`);\n\t\t}\n\n\t\tif (task.details) {\n\t\t\tconst details = truncate(task.details, 500);\n\t\t\tsections.push(`Implementation Details: ${details}`);\n\t\t}\n\n\t\tif (task.testStrategy) {\n\t\t\tconst testStrategy = truncate(task.testStrategy, 300);\n\t\t\tsections.push(`Test Strategy: ${testStrategy}`);\n\t\t}\n\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tsections.push(`Subtasks: ${task.subtasks.length} subtasks defined`);\n\t\t}\n\n\t\treturn sections.join('\\n');\n\t}\n\n\t/**\n\t * Format a subtask for context inclusion\n\t * @param {Object} subtask - Subtask object\n\t * @param {Object} parentTask - Parent task object\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted subtask context\n\t */\n\t_formatSubtaskForContext(subtask, parentTask, format) {\n\t\tconst sections = [];\n\n\t\tsections.push(\n\t\t\t`**Subtask ${parentTask.id}.${subtask.id}: ${subtask.title}**`\n\t\t);\n\t\tsections.push(`Parent Task: ${parentTask.title}`);\n\t\tsections.push(`Description: ${subtask.description}`);\n\t\tsections.push(`Status: ${subtask.status || 'pending'}`);\n\n\t\tif (subtask.dependencies && subtask.dependencies.length > 0) {\n\t\t\tsections.push(`Dependencies: ${subtask.dependencies.join(', ')}`);\n\t\t}\n\n\t\tif (subtask.details) {\n\t\t\tconst details = truncate(subtask.details, 500);\n\t\t\tsections.push(`Implementation Details: ${details}`);\n\t\t}\n\n\t\treturn sections.join('\\n');\n\t}\n\n\t/**\n\t * Gather context from files\n\t * @param {Array<string>} filePaths - File paths to read\n\t * @param {string} format - Output format\n\t * @param {boolean} includeTokenCounts - Whether to include token breakdown\n\t * @returns {Promise<Object>} File context result with breakdown\n\t */\n\tasync _gatherFileContext(filePaths, format, includeTokenCounts = false) {\n\t\tconst fileContents = [];\n\t\tconst breakdown = [];\n\n\t\tfor (const filePath of filePaths) {\n\t\t\ttry {\n\t\t\t\tconst fullPath = path.isAbsolute(filePath)\n\t\t\t\t\t? filePath\n\t\t\t\t\t: path.join(this.projectRoot, filePath);\n\n\t\t\t\tif (!fs.existsSync(fullPath)) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst stats = fs.statSync(fullPath);\n\t\t\t\tif (!stats.isFile()) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Check file size (limit to 50KB for context)\n\t\t\t\tif (stats.size > 50 * 1024) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst content = fs.readFileSync(fullPath, 'utf-8');\n\t\t\t\tconst relativePath = path.relative(this.projectRoot, fullPath);\n\n\t\t\t\tconst fileData = {\n\t\t\t\t\tpath: relativePath,\n\t\t\t\t\tsize: stats.size,\n\t\t\t\t\tcontent: content,\n\t\t\t\t\tlastModified: stats.mtime\n\t\t\t\t};\n\n\t\t\t\tfileContents.push(fileData);\n\n\t\t\t\t// Calculate tokens for this individual file if requested\n\t\t\t\tif (includeTokenCounts) {\n\t\t\t\t\tconst formattedFile = this._formatSingleFileForContext(\n\t\t\t\t\t\tfileData,\n\t\t\t\t\t\tformat\n\t\t\t\t\t);\n\t\t\t\t\tbreakdown.push({\n\t\t\t\t\t\tpath: relativePath,\n\t\t\t\t\t\tsizeKB: Math.round(stats.size / 1024),\n\t\t\t\t\t\ttokens: this.countTokens(formattedFile),\n\t\t\t\t\t\tcharacters: formattedFile.length\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\t`Warning: Could not read file ${filePath}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\tif (fileContents.length === 0) {\n\t\t\treturn { context: null, breakdown: [] };\n\t\t}\n\n\t\tconst finalContext = this._formatFileContextSection(fileContents, format);\n\t\treturn {\n\t\t\tcontext: finalContext,\n\t\t\tbreakdown: includeTokenCounts ? breakdown : []\n\t\t};\n\t}\n\n\t/**\n\t * Generate project file tree context\n\t * @param {string} format - Output format\n\t * @param {boolean} includeTokenCounts - Whether to include token breakdown\n\t * @returns {Promise<Object>} Project tree context result with breakdown\n\t */\n\tasync _gatherProjectTreeContext(format, includeTokenCounts = false) {\n\t\ttry {\n\t\t\tconst tree = this._generateFileTree(this.projectRoot, 5); // Max depth 5\n\t\t\tconst finalContext = this._formatProjectTreeSection(tree, format);\n\n\t\t\tconst breakdown = includeTokenCounts\n\t\t\t\t? {\n\t\t\t\t\t\ttokens: this.countTokens(finalContext),\n\t\t\t\t\t\tcharacters: finalContext.length,\n\t\t\t\t\t\tfileCount: tree.fileCount || 0,\n\t\t\t\t\t\tdirCount: tree.dirCount || 0\n\t\t\t\t\t}\n\t\t\t\t: null;\n\n\t\t\treturn {\n\t\t\t\tcontext: finalContext,\n\t\t\t\tbreakdown: breakdown\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tconsole.warn(\n\t\t\t\t`Warning: Could not generate project tree: ${error.message}`\n\t\t\t);\n\t\t\treturn { context: null, breakdown: null };\n\t\t}\n\t}\n\n\t/**\n\t * Format a single file for context (used for token counting)\n\t * @param {Object} fileData - File data object\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted file context\n\t */\n\t_formatSingleFileForContext(fileData, format) {\n\t\tconst header = `**File: ${fileData.path}** (${Math.round(fileData.size / 1024)}KB)`;\n\t\tconst content = `\\`\\`\\`\\n${fileData.content}\\n\\`\\`\\``;\n\t\treturn `${header}\\n\\n${content}`;\n\t}\n\n\t/**\n\t * Generate file tree structure\n\t * @param {string} dirPath - Directory path\n\t * @param {number} maxDepth - Maximum depth to traverse\n\t * @param {number} currentDepth - Current depth\n\t * @returns {Object} File tree structure\n\t */\n\t_generateFileTree(dirPath, maxDepth, currentDepth = 0) {\n\t\tconst ignoreDirs = [\n\t\t\t'.git',\n\t\t\t'node_modules',\n\t\t\t'.env',\n\t\t\t'coverage',\n\t\t\t'dist',\n\t\t\t'build'\n\t\t];\n\t\tconst ignoreFiles = ['.DS_Store', '.env', '.env.local', '.env.production'];\n\n\t\tif (currentDepth >= maxDepth) {\n\t\t\treturn null;\n\t\t}\n\n\t\ttry {\n\t\t\tconst items = fs.readdirSync(dirPath);\n\t\t\tconst tree = {\n\t\t\t\tname: path.basename(dirPath),\n\t\t\t\ttype: 'directory',\n\t\t\t\tchildren: [],\n\t\t\t\tfileCount: 0,\n\t\t\t\tdirCount: 0\n\t\t\t};\n\n\t\t\tfor (const item of items) {\n\t\t\t\tif (ignoreDirs.includes(item) || ignoreFiles.includes(item)) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst itemPath = path.join(dirPath, item);\n\t\t\t\tconst stats = fs.statSync(itemPath);\n\n\t\t\t\tif (stats.isDirectory()) {\n\t\t\t\t\ttree.dirCount++;\n\t\t\t\t\tif (currentDepth < maxDepth - 1) {\n\t\t\t\t\t\tconst subtree = this._generateFileTree(\n\t\t\t\t\t\t\titemPath,\n\t\t\t\t\t\t\tmaxDepth,\n\t\t\t\t\t\t\tcurrentDepth + 1\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (subtree) {\n\t\t\t\t\t\t\ttree.children.push(subtree);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ttree.fileCount++;\n\t\t\t\t\ttree.children.push({\n\t\t\t\t\t\tname: item,\n\t\t\t\t\t\ttype: 'file',\n\t\t\t\t\t\tsize: stats.size\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn tree;\n\t\t} catch (error) {\n\t\t\treturn null;\n\t\t}\n\t}\n\n\t/**\n\t * Format custom context section\n\t * @param {string} customContext - Custom context string\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted custom context\n\t */\n\t_formatCustomContext(customContext, format) {\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## Additional Context\\n\\n${customContext}`;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**Additional Context:**\\n${customContext}`;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `Additional context: ${customContext}`;\n\t\t\tdefault:\n\t\t\t\treturn customContext;\n\t\t}\n\t}\n\n\t/**\n\t * Format task context section\n\t * @param {Array<string>} taskItems - Formatted task items\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted task context section\n\t */\n\t_formatTaskContextSection(taskItems, format) {\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## Task Context\\n\\n${taskItems.join('\\n\\n---\\n\\n')}`;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**Task Context:**\\n\\n${taskItems.join('\\n\\n')}`;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `Task context: ${taskItems.join(' | ')}`;\n\t\t\tdefault:\n\t\t\t\treturn taskItems.join('\\n\\n');\n\t\t}\n\t}\n\n\t/**\n\t * Format file context section\n\t * @param {Array<Object>} fileContents - File content objects\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted file context section\n\t */\n\t_formatFileContextSection(fileContents, format) {\n\t\tconst fileItems = fileContents.map((file) => {\n\t\t\tconst header = `**File: ${file.path}** (${Math.round(file.size / 1024)}KB)`;\n\t\t\tconst content = `\\`\\`\\`\\n${file.content}\\n\\`\\`\\``;\n\t\t\treturn `${header}\\n\\n${content}`;\n\t\t});\n\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## File Context\\n\\n${fileItems.join('\\n\\n---\\n\\n')}`;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**File Context:**\\n\\n${fileItems.join('\\n\\n')}`;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `File context: ${fileContents.map((f) => `${f.path} (${f.content.substring(0, 200)}...)`).join(' | ')}`;\n\t\t\tdefault:\n\t\t\t\treturn fileItems.join('\\n\\n');\n\t\t}\n\t}\n\n\t/**\n\t * Format project tree section\n\t * @param {Object} tree - File tree structure\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted project tree section\n\t */\n\t_formatProjectTreeSection(tree, format) {\n\t\tconst treeString = this._renderFileTree(tree);\n\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## Project Structure\\n\\n\\`\\`\\`\\n${treeString}\\n\\`\\`\\``;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**Project Structure:**\\n\\`\\`\\`\\n${treeString}\\n\\`\\`\\``;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `Project structure: ${treeString.replace(/\\n/g, ' | ')}`;\n\t\t\tdefault:\n\t\t\t\treturn treeString;\n\t\t}\n\t}\n\n\t/**\n\t * Render file tree as string\n\t * @param {Object} tree - File tree structure\n\t * @param {string} prefix - Current prefix for indentation\n\t * @returns {string} Rendered tree string\n\t */\n\t_renderFileTree(tree, prefix = '') {\n\t\tlet result = `${prefix}${tree.name}/`;\n\n\t\tif (tree.fileCount > 0 || tree.dirCount > 0) {\n\t\t\tresult += ` (${tree.fileCount} files, ${tree.dirCount} dirs)`;\n\t\t}\n\n\t\tresult += '\\n';\n\n\t\tif (tree.children) {\n\t\t\ttree.children.forEach((child, index) => {\n\t\t\t\tconst isLast = index === tree.children.length - 1;\n\t\t\t\tconst childPrefix = prefix + (isLast ? '└── ' : '├── ');\n\t\t\t\tconst nextPrefix = prefix + (isLast ? ' ' : '│ ');\n\n\t\t\t\tif (child.type === 'directory') {\n\t\t\t\t\tresult += this._renderFileTree(child, childPrefix);\n\t\t\t\t} else {\n\t\t\t\t\tresult += `${childPrefix}${child.name}\\n`;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\n\t\treturn result;\n\t}\n\n\t/**\n\t * Join context sections based on format\n\t * @param {Array<string>} sections - Context sections\n\t * @param {string} format - Output format\n\t * @returns {string} Joined context string\n\t */\n\t_joinContextSections(sections, format) {\n\t\tif (sections.length === 0) {\n\t\t\treturn '';\n\t\t}\n\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn sections.join('\\n\\n---\\n\\n');\n\t\t\tcase 'chat':\n\t\t\t\treturn sections.join('\\n\\n');\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn sections.join(' ');\n\t\t\tdefault:\n\t\t\t\treturn sections.join('\\n\\n');\n\t\t}\n\t}\n}\n\n/**\n * Factory function to create a context gatherer instance\n * @param {string} projectRoot - Project root directory\n * @param {string} tag - Tag for the task\n * @returns {ContextGatherer} Context gatherer instance\n * @throws {Error} If tag is not provided\n */\nexport function createContextGatherer(projectRoot, tag) {\n\tif (!tag) {\n\t\tthrow new Error('Tag is required');\n\t}\n\treturn new ContextGatherer(projectRoot, tag);\n}\n\nexport default ContextGatherer;\n"], ["/claude-task-master/scripts/modules/task-manager/add-subtask.js", "import path from 'path';\n\nimport { log, readJSON, writeJSON, getCurrentTag } from '../utils.js';\nimport { isTaskDependentOn } from '../task-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\n\n/**\n * Add a subtask to a parent task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number|string} parentId - ID of the parent task\n * @param {number|string|null} existingTaskId - ID of an existing task to convert to subtask (optional)\n * @param {Object} newSubtaskData - Data for creating a new subtask (used if existingTaskId is null)\n * @param {boolean} generateFiles - Whether to regenerate task files after adding the subtask\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} context.projectRoot - Project root path\n * @param {string} context.tag - Tag for the task\n * @returns {Object} The newly created or converted subtask\n */\nasync function addSubtask(\n\ttasksPath,\n\tparentId,\n\texistingTaskId = null,\n\tnewSubtaskData = null,\n\tgenerateFiles = false,\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\ttry {\n\t\tlog('info', `Adding subtask to parent task ${parentId}...`);\n\n\t\t// Read the existing tasks with proper context\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`Invalid or missing tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Convert parent ID to number\n\t\tconst parentIdNum = parseInt(parentId, 10);\n\n\t\t// Find the parent task\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentIdNum);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(`Parent task with ID ${parentIdNum} not found`);\n\t\t}\n\n\t\t// Initialize subtasks array if it doesn't exist\n\t\tif (!parentTask.subtasks) {\n\t\t\tparentTask.subtasks = [];\n\t\t}\n\n\t\tlet newSubtask;\n\n\t\t// Case 1: Convert an existing task to a subtask\n\t\tif (existingTaskId !== null) {\n\t\t\tconst existingTaskIdNum = parseInt(existingTaskId, 10);\n\n\t\t\t// Find the existing task\n\t\t\tconst existingTaskIndex = data.tasks.findIndex(\n\t\t\t\t(t) => t.id === existingTaskIdNum\n\t\t\t);\n\t\t\tif (existingTaskIndex === -1) {\n\t\t\t\tthrow new Error(`Task with ID ${existingTaskIdNum} not found`);\n\t\t\t}\n\n\t\t\tconst existingTask = data.tasks[existingTaskIndex];\n\n\t\t\t// Check if task is already a subtask\n\t\t\tif (existingTask.parentTaskId) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Task ${existingTaskIdNum} is already a subtask of task ${existingTask.parentTaskId}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Check for circular dependency\n\t\t\tif (existingTaskIdNum === parentIdNum) {\n\t\t\t\tthrow new Error(`Cannot make a task a subtask of itself`);\n\t\t\t}\n\n\t\t\t// Check if parent task is a subtask of the task we're converting\n\t\t\t// This would create a circular dependency\n\t\t\tif (isTaskDependentOn(data.tasks, parentTask, existingTaskIdNum)) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Cannot create circular dependency: task ${parentIdNum} is already a subtask or dependent of task ${existingTaskIdNum}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Find the highest subtask ID to determine the next ID\n\t\t\tconst highestSubtaskId =\n\t\t\t\tparentTask.subtasks.length > 0\n\t\t\t\t\t? Math.max(...parentTask.subtasks.map((st) => st.id))\n\t\t\t\t\t: 0;\n\t\t\tconst newSubtaskId = highestSubtaskId + 1;\n\n\t\t\t// Clone the existing task to be converted to a subtask\n\t\t\tnewSubtask = {\n\t\t\t\t...existingTask,\n\t\t\t\tid: newSubtaskId,\n\t\t\t\tparentTaskId: parentIdNum\n\t\t\t};\n\n\t\t\t// Add to parent's subtasks\n\t\t\tparentTask.subtasks.push(newSubtask);\n\n\t\t\t// Remove the task from the main tasks array\n\t\t\tdata.tasks.splice(existingTaskIndex, 1);\n\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`Converted task ${existingTaskIdNum} to subtask ${parentIdNum}.${newSubtaskId}`\n\t\t\t);\n\t\t}\n\t\t// Case 2: Create a new subtask\n\t\telse if (newSubtaskData) {\n\t\t\t// Find the highest subtask ID to determine the next ID\n\t\t\tconst highestSubtaskId =\n\t\t\t\tparentTask.subtasks.length > 0\n\t\t\t\t\t? Math.max(...parentTask.subtasks.map((st) => st.id))\n\t\t\t\t\t: 0;\n\t\t\tconst newSubtaskId = highestSubtaskId + 1;\n\n\t\t\t// Create the new subtask object\n\t\t\tnewSubtask = {\n\t\t\t\tid: newSubtaskId,\n\t\t\t\ttitle: newSubtaskData.title,\n\t\t\t\tdescription: newSubtaskData.description || '',\n\t\t\t\tdetails: newSubtaskData.details || '',\n\t\t\t\tstatus: newSubtaskData.status || 'pending',\n\t\t\t\tdependencies: newSubtaskData.dependencies || [],\n\t\t\t\tparentTaskId: parentIdNum\n\t\t\t};\n\n\t\t\t// Add to parent's subtasks\n\t\t\tparentTask.subtasks.push(newSubtask);\n\n\t\t\tlog('info', `Created new subtask ${parentIdNum}.${newSubtaskId}`);\n\t\t} else {\n\t\t\tthrow new Error(\n\t\t\t\t'Either existingTaskId or newSubtaskData must be provided'\n\t\t\t);\n\t\t}\n\n\t\t// Write the updated tasks back to the file with proper context\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Generate task files if requested\n\t\tif (generateFiles) {\n\t\t\tlog('info', 'Regenerating task files...');\n\t\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), context);\n\t\t}\n\n\t\treturn newSubtask;\n\t} catch (error) {\n\t\tlog('error', `Error adding subtask: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\nexport default addSubtask;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/clear-subtasks.js", "/**\n * Direct function wrapper for clearSubtasks\n */\n\nimport { clearSubtasks } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\treadJSON\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\nimport path from 'path';\n\n/**\n * Clear subtasks from specified tasks\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} [args.id] - Task IDs (comma-separated) to clear subtasks from\n * @param {boolean} [args.all] - Clear subtasks from all tasks\n * @param {string} [args.tag] - Tag context to operate on (defaults to current active tag)\n * @param {string} [args.projectRoot] - Project root path (for MCP/env fallback)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function clearSubtasksDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, all, tag, projectRoot } = args;\n\ttry {\n\t\tlog.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('clearSubtasksDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Either id or all must be provided\n\t\tif (!id && !all) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Either task IDs with id parameter or all parameter must be provided'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Check if tasks.json exists\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'FILE_NOT_FOUND_ERROR',\n\t\t\t\t\tmessage: `Tasks file not found at ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlet taskIds;\n\n\t\t// Use readJSON which handles silent migration and tag resolution\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\n\t\tif (!data || !data.tasks) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: `No tasks found in tasks file: ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst currentTag = data.tag || tag;\n\t\tconst tasks = data.tasks;\n\n\t\t// If all is specified, get all task IDs\n\t\tif (all) {\n\t\t\tlog.info(`Clearing subtasks from all tasks in tag '${currentTag}'`);\n\t\t\tif (tasks.length === 0) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\t\tmessage: `No tasks found in tag context '${currentTag}'`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t\ttaskIds = tasks.map((t) => t.id).join(',');\n\t\t} else {\n\t\t\t// Use the provided task IDs\n\t\t\ttaskIds = id;\n\t\t}\n\n\t\tlog.info(`Clearing subtasks from tasks: ${taskIds} in tag '${currentTag}'`);\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\t// Call the core function\n\t\tclearSubtasks(tasksPath, taskIds, { projectRoot, tag: currentTag });\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\t// Read the updated data to provide a summary\n\t\tconst updatedData = readJSON(tasksPath, projectRoot, currentTag);\n\t\tconst taskIdArray = taskIds.split(',').map((id) => parseInt(id.trim(), 10));\n\n\t\t// Build a summary of what was done\n\t\tconst clearedTasksCount = taskIdArray.length;\n\t\tconst updatedTasks = updatedData.tasks || [];\n\n\t\tconst taskSummary = taskIdArray.map((id) => {\n\t\t\tconst task = updatedTasks.find((t) => t.id === id);\n\t\t\treturn task ? { id, title: task.title } : { id, title: 'Task not found' };\n\t\t});\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully cleared subtasks from ${clearedTasksCount} task(s) in tag '${currentTag}'`,\n\t\t\t\ttasksCleared: taskSummary,\n\t\t\t\ttag: currentTag\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in clearSubtasksDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/remove-subtask.js", "import path from 'path';\nimport { log, readJSON, writeJSON } from '../utils.js';\nimport generateTaskFiles from './generate-task-files.js';\n\n/**\n * Remove a subtask from its parent task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} subtaskId - ID of the subtask to remove in format \"parentId.subtaskId\"\n * @param {boolean} convertToTask - Whether to convert the subtask to a standalone task\n * @param {boolean} generateFiles - Whether to regenerate task files after removing the subtask\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @returns {Object|null} The removed subtask if convertToTask is true, otherwise null\n */\nasync function removeSubtask(\n\ttasksPath,\n\tsubtaskId,\n\tconvertToTask = false,\n\tgenerateFiles = false,\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\ttry {\n\t\tlog('info', `Removing subtask ${subtaskId}...`);\n\n\t\t// Read the existing tasks with proper context\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`Invalid or missing tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Parse the subtask ID (format: \"parentId.subtaskId\")\n\t\tif (!subtaskId.includes('.')) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid subtask ID format: ${subtaskId}. Expected format: \"parentId.subtaskId\"`\n\t\t\t);\n\t\t}\n\n\t\tconst [parentIdStr, subtaskIdStr] = subtaskId.split('.');\n\t\tconst parentId = parseInt(parentIdStr, 10);\n\t\tconst subtaskIdNum = parseInt(subtaskIdStr, 10);\n\n\t\t// Find the parent task\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(`Parent task with ID ${parentId} not found`);\n\t\t}\n\n\t\t// Check if parent has subtasks\n\t\tif (!parentTask.subtasks || parentTask.subtasks.length === 0) {\n\t\t\tthrow new Error(`Parent task ${parentId} has no subtasks`);\n\t\t}\n\n\t\t// Find the subtask to remove\n\t\tconst subtaskIndex = parentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === subtaskIdNum\n\t\t);\n\t\tif (subtaskIndex === -1) {\n\t\t\tthrow new Error(`Subtask ${subtaskId} not found`);\n\t\t}\n\n\t\t// Get a copy of the subtask before removing it\n\t\tconst removedSubtask = { ...parentTask.subtasks[subtaskIndex] };\n\n\t\t// Remove the subtask from the parent\n\t\tparentTask.subtasks.splice(subtaskIndex, 1);\n\n\t\t// If parent has no more subtasks, remove the subtasks array\n\t\tif (parentTask.subtasks.length === 0) {\n\t\t\tparentTask.subtasks = undefined;\n\t\t}\n\n\t\tlet convertedTask = null;\n\n\t\t// Convert the subtask to a standalone task if requested\n\t\tif (convertToTask) {\n\t\t\tlog('info', `Converting subtask ${subtaskId} to a standalone task...`);\n\n\t\t\t// Find the highest task ID to determine the next ID\n\t\t\tconst highestId = Math.max(...data.tasks.map((t) => t.id));\n\t\t\tconst newTaskId = highestId + 1;\n\n\t\t\t// Create the new task from the subtask\n\t\t\tconvertedTask = {\n\t\t\t\tid: newTaskId,\n\t\t\t\ttitle: removedSubtask.title,\n\t\t\t\tdescription: removedSubtask.description || '',\n\t\t\t\tdetails: removedSubtask.details || '',\n\t\t\t\tstatus: removedSubtask.status || 'pending',\n\t\t\t\tdependencies: removedSubtask.dependencies || [],\n\t\t\t\tpriority: parentTask.priority || 'medium' // Inherit priority from parent\n\t\t\t};\n\n\t\t\t// Add the parent task as a dependency if not already present\n\t\t\tif (!convertedTask.dependencies.includes(parentId)) {\n\t\t\t\tconvertedTask.dependencies.push(parentId);\n\t\t\t}\n\n\t\t\t// Add the converted task to the tasks array\n\t\t\tdata.tasks.push(convertedTask);\n\n\t\t\tlog('info', `Created new task ${newTaskId} from subtask ${subtaskId}`);\n\t\t} else {\n\t\t\tlog('info', `Subtask ${subtaskId} deleted`);\n\t\t}\n\n\t\t// Write the updated tasks back to the file with proper context\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Generate task files if requested\n\t\tif (generateFiles) {\n\t\t\tlog('info', 'Regenerating task files...');\n\t\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), context);\n\t\t}\n\n\t\treturn convertedTask;\n\t} catch (error) {\n\t\tlog('error', `Error removing subtask: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\nexport default removeSubtask;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/update-task-by-id.js", "/**\n * update-task-by-id.js\n * Direct function implementation for updating a single task by ID with new information\n */\n\nimport { updateTaskById } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for updateTaskById with error handling.\n *\n * @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot.\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Task ID (or subtask ID like \"1.2\").\n * @param {string} args.prompt - New information/context prompt.\n * @param {boolean} [args.research] - Whether to use research role.\n * @param {boolean} [args.append] - Whether to append timestamped information instead of full update.\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function updateTaskByIdDirect(args, log, context = {}) {\n\tconst { session } = context;\n\t// Destructure expected args, including projectRoot\n\tconst { tasksJsonPath, id, prompt, research, append, projectRoot, tag } =\n\t\targs;\n\n\tconst logWrapper = createLogWrapper(log);\n\n\ttry {\n\t\tlogWrapper.info(\n\t\t\t`Updating task by ID via direct function. ID: ${id}, ProjectRoot: ${projectRoot}`\n\t\t);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters (id and prompt)\n\t\tif (!id) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No task ID specified. Please provide a task ID to update.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_TASK_ID', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tif (!prompt) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No prompt specified. Please provide a prompt with new information for the task update.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_PROMPT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Parse taskId - handle both string and number values\n\t\tlet taskId;\n\t\tif (typeof id === 'string') {\n\t\t\t// Handle subtask IDs (e.g., \"5.2\")\n\t\t\tif (id.includes('.')) {\n\t\t\t\ttaskId = id; // Keep as string for subtask IDs\n\t\t\t} else {\n\t\t\t\t// Parse as integer for main task IDs\n\t\t\t\ttaskId = parseInt(id, 10);\n\t\t\t\tif (Number.isNaN(taskId)) {\n\t\t\t\t\tconst errorMessage = `Invalid task ID: ${id}. Task ID must be a positive integer or subtask ID (e.g., \"5.2\").`;\n\t\t\t\t\tlogWrapper.error(errorMessage);\n\t\t\t\t\treturn {\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: { code: 'INVALID_TASK_ID', message: errorMessage }\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\ttaskId = id;\n\t\t}\n\n\t\t// Use the provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Get research flag\n\t\tconst useResearch = research === true;\n\n\t\tlogWrapper.info(\n\t\t\t`Updating task with ID ${taskId} with prompt \"${prompt}\" and research: ${useResearch}`\n\t\t);\n\n\t\tconst wasSilent = isSilentMode();\n\t\tif (!wasSilent) {\n\t\t\tenableSilentMode();\n\t\t}\n\n\t\ttry {\n\t\t\t// Execute core updateTaskById function with proper parameters\n\t\t\tconst coreResult = await updateTaskById(\n\t\t\t\ttasksPath,\n\t\t\t\ttaskId,\n\t\t\t\tprompt,\n\t\t\t\tuseResearch,\n\t\t\t\t{\n\t\t\t\t\tmcpLog: logWrapper,\n\t\t\t\t\tsession,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\ttag,\n\t\t\t\t\tcommandName: 'update-task',\n\t\t\t\t\toutputType: 'mcp'\n\t\t\t\t},\n\t\t\t\t'json',\n\t\t\t\tappend || false\n\t\t\t);\n\n\t\t\t// Check if the core function returned null or an object without success\n\t\t\tif (!coreResult || coreResult.updatedTask === null) {\n\t\t\t\t// Core function logs the reason, just return success with info\n\t\t\t\tconst message = `Task ${taskId} was not updated (likely already completed).`;\n\t\t\t\tlogWrapper.info(message);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: true,\n\t\t\t\t\tdata: {\n\t\t\t\t\t\tmessage: message,\n\t\t\t\t\t\ttaskId: taskId,\n\t\t\t\t\t\tupdated: false,\n\t\t\t\t\t\ttelemetryData: coreResult?.telemetryData,\n\t\t\t\t\t\ttagInfo: coreResult?.tagInfo\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Task was updated successfully\n\t\t\tconst successMessage = `Successfully updated task with ID ${taskId} based on the prompt`;\n\t\t\tlogWrapper.success(successMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: successMessage,\n\t\t\t\t\ttaskId: taskId,\n\t\t\t\t\ttasksPath: tasksPath,\n\t\t\t\t\tuseResearch: useResearch,\n\t\t\t\t\tupdated: true,\n\t\t\t\t\tupdatedTask: coreResult.updatedTask,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tlogWrapper.error(`Error updating task by ID: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'UPDATE_TASK_CORE_ERROR',\n\t\t\t\t\tmessage: error.message || 'Unknown error updating task'\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(`Setup error in updateTaskByIdDirect: ${error.message}`);\n\t\tif (isSilentMode()) disableSilentMode();\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_SETUP_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown setup error'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/show-task.js", "/**\n * show-task.js\n * Direct function implementation for showing task details\n */\n\nimport {\n\tfindTaskById,\n\treadComplexityReport,\n\treadJSON\n} from '../../../../scripts/modules/utils.js';\nimport { findTasksPath } from '../utils/path-utils.js';\n\n/**\n * Direct function wrapper for getting task details.\n *\n * @param {Object} args - Command arguments.\n * @param {string} args.id - Task ID to show.\n * @param {string} [args.file] - Optional path to the tasks file (passed to findTasksPath).\n * @param {string} args.reportPath - Explicit path to the complexity report file.\n * @param {string} [args.status] - Optional status to filter subtasks by.\n * @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool).\n * @param {string} [args.tag] - Tag for the task\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function showTaskDirect(args, log) {\n\t// This function doesn't need session context since it only reads data\n\t// Destructure projectRoot and other args. projectRoot is assumed normalized.\n\tconst { id, file, reportPath, status, projectRoot, tag } = args;\n\n\tlog.info(\n\t\t`Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}`\n\t);\n\n\t// --- Path Resolution using the passed (already normalized) projectRoot ---\n\tlet tasksJsonPath;\n\ttry {\n\t\t// Use the projectRoot passed directly from args\n\t\ttasksJsonPath = findTasksPath(\n\t\t\t{ projectRoot: projectRoot, file: file },\n\t\t\tlog\n\t\t);\n\t\tlog.info(`Resolved tasks path: ${tasksJsonPath}`);\n\t} catch (error) {\n\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'TASKS_FILE_NOT_FOUND',\n\t\t\t\tmessage: `Failed to find tasks.json: ${error.message}`\n\t\t\t}\n\t\t};\n\t}\n\t// --- End Path Resolution ---\n\n\t// --- Rest of the function remains the same, using tasksJsonPath ---\n\ttry {\n\t\tconst tasksData = readJSON(tasksJsonPath, projectRoot, tag);\n\t\tif (!tasksData || !tasksData.tasks) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_TASKS_DATA', message: 'Invalid tasks data' }\n\t\t\t};\n\t\t}\n\n\t\tconst complexityReport = readComplexityReport(reportPath);\n\n\t\t// Parse comma-separated IDs\n\t\tconst taskIds = id\n\t\t\t.split(',')\n\t\t\t.map((taskId) => taskId.trim())\n\t\t\t.filter((taskId) => taskId.length > 0);\n\n\t\tif (taskIds.length === 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASK_ID',\n\t\t\t\t\tmessage: 'No valid task IDs provided'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Handle single task ID (existing behavior)\n\t\tif (taskIds.length === 1) {\n\t\t\tconst { task, originalSubtaskCount } = findTaskById(\n\t\t\t\ttasksData.tasks,\n\t\t\t\ttaskIds[0],\n\t\t\t\tcomplexityReport,\n\t\t\t\tstatus\n\t\t\t);\n\n\t\t\tif (!task) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'TASK_NOT_FOUND',\n\t\t\t\t\t\tmessage: `Task or subtask with ID ${taskIds[0]} not found`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(`Successfully retrieved task ${taskIds[0]}.`);\n\n\t\t\tconst returnData = { ...task };\n\t\t\tif (originalSubtaskCount !== null) {\n\t\t\t\treturnData._originalSubtaskCount = originalSubtaskCount;\n\t\t\t\treturnData._subtaskFilter = status;\n\t\t\t}\n\n\t\t\treturn { success: true, data: returnData };\n\t\t}\n\n\t\t// Handle multiple task IDs\n\t\tconst foundTasks = [];\n\t\tconst notFoundIds = [];\n\n\t\ttaskIds.forEach((taskId) => {\n\t\t\tconst { task, originalSubtaskCount } = findTaskById(\n\t\t\t\ttasksData.tasks,\n\t\t\t\ttaskId,\n\t\t\t\tcomplexityReport,\n\t\t\t\tstatus\n\t\t\t);\n\n\t\t\tif (task) {\n\t\t\t\tconst taskData = { ...task };\n\t\t\t\tif (originalSubtaskCount !== null) {\n\t\t\t\t\ttaskData._originalSubtaskCount = originalSubtaskCount;\n\t\t\t\t\ttaskData._subtaskFilter = status;\n\t\t\t\t}\n\t\t\t\tfoundTasks.push(taskData);\n\t\t\t} else {\n\t\t\t\tnotFoundIds.push(taskId);\n\t\t\t}\n\t\t});\n\n\t\tlog.info(\n\t\t\t`Successfully retrieved ${foundTasks.length} of ${taskIds.length} requested tasks.`\n\t\t);\n\n\t\t// Return multiple tasks with metadata\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttasks: foundTasks,\n\t\t\t\trequestedIds: taskIds,\n\t\t\t\tfoundCount: foundTasks.length,\n\t\t\t\tnotFoundIds: notFoundIds,\n\t\t\t\tisMultiple: true\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\tlog.error(`Error showing task ${id}: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'TASK_OPERATION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/sync-readme.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport { log, findProjectRoot } from './utils.js';\nimport { getProjectName } from './config-manager.js';\nimport listTasks from './task-manager/list-tasks.js';\n\n/**\n * Creates a basic README structure if one doesn't exist\n * @param {string} projectName - Name of the project\n * @returns {string} - Basic README content\n */\nfunction createBasicReadme(projectName) {\n\treturn `# ${projectName}\n\nThis project is managed using Task Master.\n\n`;\n}\n\n/**\n * Create UTM tracking URL for task-master.dev\n * @param {string} projectRoot - The project root path\n * @returns {string} - UTM tracked URL\n */\nfunction createTaskMasterUrl(projectRoot) {\n\t// Get the actual folder name from the project root path\n\tconst folderName = path.basename(projectRoot);\n\n\t// Clean folder name for UTM (replace spaces/special chars with hyphens)\n\tconst cleanFolderName = folderName\n\t\t.toLowerCase()\n\t\t.replace(/[^a-z0-9]/g, '-')\n\t\t.replace(/-+/g, '-')\n\t\t.replace(/^-|-$/g, '');\n\n\tconst utmParams = new URLSearchParams({\n\t\tutm_source: 'github-readme',\n\t\tutm_medium: 'readme-export',\n\t\tutm_campaign: cleanFolderName || 'task-sync',\n\t\tutm_content: 'task-export-link'\n\t});\n\n\treturn `https://task-master.dev?${utmParams.toString()}`;\n}\n\n/**\n * Create the start marker with metadata\n * @param {Object} options - Export options\n * @returns {string} - Formatted start marker\n */\nfunction createStartMarker(options) {\n\tconst { timestamp, withSubtasks, status, projectRoot } = options;\n\n\t// Format status filter text\n\tconst statusText = status\n\t\t? `Status filter: ${status}`\n\t\t: 'Status filter: none';\n\tconst subtasksText = withSubtasks ? 'with subtasks' : 'without subtasks';\n\n\t// Create the export info content\n\tconst exportInfo =\n\t\t`🎯 **Taskmaster Export** - ${timestamp}\\n` +\n\t\t`📋 Export: ${subtasksText} • ${statusText}\\n` +\n\t\t`🔗 Powered by [Task Master](${createTaskMasterUrl(projectRoot)})`;\n\n\t// Create a markdown box using code blocks and emojis to mimic our UI style\n\tconst boxContent =\n\t\t`<!-- TASKMASTER_EXPORT_START -->\\n` +\n\t\t`> ${exportInfo.split('\\n').join('\\n> ')}\\n\\n`;\n\n\treturn boxContent;\n}\n\n/**\n * Create the end marker\n * @returns {string} - Formatted end marker\n */\nfunction createEndMarker() {\n\treturn (\n\t\t`\\n> 📋 **End of Taskmaster Export** - Tasks are synced from your project using the \\`sync-readme\\` command.\\n` +\n\t\t`<!-- TASKMASTER_EXPORT_END -->\\n`\n\t);\n}\n\n/**\n * Syncs the current task list to README.md at the project root\n * @param {string} projectRoot - Path to the project root directory\n * @param {Object} options - Options for syncing\n * @param {boolean} options.withSubtasks - Include subtasks in the output (default: false)\n * @param {string} options.status - Filter by status (e.g., 'pending', 'done')\n * @param {string} options.tasksPath - Custom path to tasks.json\n * @returns {boolean} - True if sync was successful, false otherwise\n * TODO: Add tag support - this is not currently supported how we want to handle this - Parthy\n */\nexport async function syncTasksToReadme(projectRoot = null, options = {}) {\n\ttry {\n\t\tconst actualProjectRoot = projectRoot || findProjectRoot() || '.';\n\t\tconst { withSubtasks = false, status, tasksPath, tag } = options;\n\n\t\t// Get current tasks using the list-tasks functionality with markdown-readme format\n\t\tconst tasksOutput = await listTasks(\n\t\t\ttasksPath ||\n\t\t\t\tpath.join(actualProjectRoot, '.taskmaster', 'tasks', 'tasks.json'),\n\t\t\tstatus,\n\t\t\tnull,\n\t\t\twithSubtasks,\n\t\t\t'markdown-readme',\n\t\t\t{ projectRoot, tag }\n\t\t);\n\n\t\tif (!tasksOutput) {\n\t\t\tconsole.log(chalk.red('❌ Failed to generate task output'));\n\t\t\treturn false;\n\t\t}\n\n\t\t// Generate timestamp and metadata\n\t\tconst timestamp =\n\t\t\tnew Date().toISOString().replace('T', ' ').substring(0, 19) + ' UTC';\n\t\tconst projectName = getProjectName(actualProjectRoot);\n\n\t\t// Create the export markers with metadata\n\t\tconst startMarker = createStartMarker({\n\t\t\ttimestamp,\n\t\t\twithSubtasks,\n\t\t\tstatus,\n\t\t\tprojectRoot: actualProjectRoot\n\t\t});\n\n\t\tconst endMarker = createEndMarker();\n\n\t\t// Create the complete task section\n\t\tconst taskSection = startMarker + tasksOutput + endMarker;\n\n\t\t// Read current README content\n\t\tconst readmePath = path.join(actualProjectRoot, 'README.md');\n\t\tlet readmeContent = '';\n\t\ttry {\n\t\t\treadmeContent = fs.readFileSync(readmePath, 'utf8');\n\t\t} catch (err) {\n\t\t\tif (err.code === 'ENOENT') {\n\t\t\t\t// Create basic README if it doesn't exist\n\t\t\t\treadmeContent = createBasicReadme(projectName);\n\t\t\t} else {\n\t\t\t\tthrow err;\n\t\t\t}\n\t\t}\n\n\t\t// Check if export markers exist and replace content between them\n\t\tconst startComment = '<!-- TASKMASTER_EXPORT_START -->';\n\t\tconst endComment = '<!-- TASKMASTER_EXPORT_END -->';\n\n\t\tlet updatedContent;\n\t\tconst startIndex = readmeContent.indexOf(startComment);\n\t\tconst endIndex = readmeContent.indexOf(endComment);\n\n\t\tif (startIndex !== -1 && endIndex !== -1) {\n\t\t\t// Replace existing task section\n\t\t\tconst beforeTasks = readmeContent.substring(0, startIndex);\n\t\t\tconst afterTasks = readmeContent.substring(endIndex + endComment.length);\n\t\t\tupdatedContent = beforeTasks + taskSection + afterTasks;\n\t\t} else {\n\t\t\t// Append to end of README\n\t\t\tupdatedContent = readmeContent + '\\n' + taskSection;\n\t\t}\n\n\t\t// Write updated content to README\n\t\tfs.writeFileSync(readmePath, updatedContent, 'utf8');\n\n\t\tconsole.log(chalk.green('✅ Successfully synced tasks to README.md'));\n\t\tconsole.log(\n\t\t\tchalk.cyan(\n\t\t\t\t`📋 Export details: ${withSubtasks ? 'with' : 'without'} subtasks${status ? `, status: ${status}` : ''}`\n\t\t\t)\n\t\t);\n\t\tconsole.log(chalk.gray(`📍 Location: ${readmePath}`));\n\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.log(chalk.red('❌ Failed to sync tasks to README:'), error.message);\n\t\tlog('error', `README sync error: ${error.message}`);\n\t\treturn false;\n\t}\n}\n\nexport default syncTasksToReadme;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/list-tags.js", "/**\n * list-tags.js\n * Direct function implementation for listing all tags\n */\n\nimport { tags } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for listing all tags with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {boolean} [args.showMetadata=false] - Whether to include metadata in the output\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function listTagsDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, showMetadata = false, projectRoot } = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('listTagsDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info('Listing all tags');\n\n\t\t// Prepare options\n\t\tconst options = {\n\t\t\tshowMetadata\n\t\t};\n\n\t\t// Call the tags function\n\t\tconst result = await tags(\n\t\t\ttasksJsonPath,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Transform the result to remove full task data and provide summary info\n\t\tconst tagsSummary = result.tags.map((tag) => {\n\t\t\tconst tasks = tag.tasks || [];\n\n\t\t\t// Calculate status breakdown\n\t\t\tconst statusBreakdown = tasks.reduce((acc, task) => {\n\t\t\t\tconst status = task.status || 'pending';\n\t\t\t\tacc[status] = (acc[status] || 0) + 1;\n\t\t\t\treturn acc;\n\t\t\t}, {});\n\n\t\t\t// Calculate subtask counts\n\t\t\tconst subtaskCounts = tasks.reduce(\n\t\t\t\t(acc, task) => {\n\t\t\t\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\t\t\t\tacc.totalSubtasks += task.subtasks.length;\n\t\t\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\t\t\tconst subStatus = subtask.status || 'pending';\n\t\t\t\t\t\t\tacc.subtasksByStatus[subStatus] =\n\t\t\t\t\t\t\t\t(acc.subtasksByStatus[subStatus] || 0) + 1;\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\treturn acc;\n\t\t\t\t},\n\t\t\t\t{ totalSubtasks: 0, subtasksByStatus: {} }\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\tname: tag.name,\n\t\t\t\tisCurrent: tag.isCurrent,\n\t\t\t\ttaskCount: tasks.length,\n\t\t\t\tcompletedTasks: tag.completedTasks,\n\t\t\t\tstatusBreakdown,\n\t\t\t\tsubtaskCounts,\n\t\t\t\tcreated: tag.created,\n\t\t\t\tdescription: tag.description\n\t\t\t};\n\t\t});\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttags: tagsSummary,\n\t\t\t\tcurrentTag: result.currentTag,\n\t\t\t\ttotalTags: result.totalTags,\n\t\t\t\tmessage: `Found ${result.totalTags} tag(s)`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in listTagsDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'LIST_TAGS_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/research.js", "/**\n * research.js\n * Direct function implementation for AI-powered research queries\n */\n\nimport path from 'path';\nimport { performResearch } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for performing AI-powered research with project context.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.query - Research query/prompt (required)\n * @param {string} [args.taskIds] - Comma-separated list of task/subtask IDs for context\n * @param {string} [args.filePaths] - Comma-separated list of file paths for context\n * @param {string} [args.customContext] - Additional custom context text\n * @param {boolean} [args.includeProjectTree=false] - Include project file tree in context\n * @param {string} [args.detailLevel='medium'] - Detail level: 'low', 'medium', 'high'\n * @param {string} [args.saveTo] - Automatically save to task/subtask ID (e.g., \"15\" or \"15.2\")\n * @param {boolean} [args.saveToFile=false] - Save research results to .taskmaster/docs/research/ directory\n * @param {string} [args.projectRoot] - Project root path\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function researchDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst {\n\t\tquery,\n\t\ttaskIds,\n\t\tfilePaths,\n\t\tcustomContext,\n\t\tincludeProjectTree = false,\n\t\tdetailLevel = 'medium',\n\t\tsaveTo,\n\t\tsaveToFile = false,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\tconst { session } = context; // Destructure session from context\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check required parameters\n\t\tif (!query || typeof query !== 'string' || query.trim().length === 0) {\n\t\t\tlog.error('Missing or invalid required parameter: query');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'The query parameter is required and must be a non-empty string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Parse comma-separated task IDs if provided\n\t\tconst parsedTaskIds = taskIds\n\t\t\t? taskIds\n\t\t\t\t\t.split(',')\n\t\t\t\t\t.map((id) => id.trim())\n\t\t\t\t\t.filter((id) => id.length > 0)\n\t\t\t: [];\n\n\t\t// Parse comma-separated file paths if provided\n\t\tconst parsedFilePaths = filePaths\n\t\t\t? filePaths\n\t\t\t\t\t.split(',')\n\t\t\t\t\t.map((path) => path.trim())\n\t\t\t\t\t.filter((path) => path.length > 0)\n\t\t\t: [];\n\n\t\t// Validate detail level\n\t\tconst validDetailLevels = ['low', 'medium', 'high'];\n\t\tif (!validDetailLevels.includes(detailLevel)) {\n\t\t\tlog.error(`Invalid detail level: ${detailLevel}`);\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_PARAMETER',\n\t\t\t\t\tmessage: `Detail level must be one of: ${validDetailLevels.join(', ')}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(\n\t\t\t`Performing research query: \"${query.substring(0, 100)}${query.length > 100 ? '...' : ''}\", ` +\n\t\t\t\t`taskIds: [${parsedTaskIds.join(', ')}], ` +\n\t\t\t\t`filePaths: [${parsedFilePaths.join(', ')}], ` +\n\t\t\t\t`detailLevel: ${detailLevel}, ` +\n\t\t\t\t`includeProjectTree: ${includeProjectTree}, ` +\n\t\t\t\t`projectRoot: ${projectRoot}`\n\t\t);\n\n\t\t// Prepare options for the research function\n\t\tconst researchOptions = {\n\t\t\ttaskIds: parsedTaskIds,\n\t\t\tfilePaths: parsedFilePaths,\n\t\t\tcustomContext: customContext || '',\n\t\t\tincludeProjectTree,\n\t\t\tdetailLevel,\n\t\t\tprojectRoot,\n\t\t\ttag,\n\t\t\tsaveToFile\n\t\t};\n\n\t\t// Prepare context for the research function\n\t\tconst researchContext = {\n\t\t\tsession,\n\t\t\tmcpLog,\n\t\t\tcommandName: 'research',\n\t\t\toutputType: 'mcp'\n\t\t};\n\n\t\t// Call the performResearch function\n\t\tconst result = await performResearch(\n\t\t\tquery.trim(),\n\t\t\tresearchOptions,\n\t\t\tresearchContext,\n\t\t\t'json', // outputFormat - use 'json' to suppress CLI UI\n\t\t\tfalse // allowFollowUp - disable for MCP calls\n\t\t);\n\n\t\t// Auto-save to task/subtask if requested\n\t\tif (saveTo) {\n\t\t\ttry {\n\t\t\t\tconst isSubtask = saveTo.includes('.');\n\n\t\t\t\t// Format research content for saving\n\t\t\t\tconst researchContent = `## Research Query: ${query.trim()}\n\n**Detail Level:** ${result.detailLevel}\n**Context Size:** ${result.contextSize} characters\n**Timestamp:** ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}\n\n### Results\n\n${result.result}`;\n\n\t\t\t\tif (isSubtask) {\n\t\t\t\t\t// Save to subtask\n\t\t\t\t\tconst { updateSubtaskById } = await import(\n\t\t\t\t\t\t'../../../../scripts/modules/task-manager/update-subtask-by-id.js'\n\t\t\t\t\t);\n\n\t\t\t\t\tconst tasksPath = path.join(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t'.taskmaster',\n\t\t\t\t\t\t'tasks',\n\t\t\t\t\t\t'tasks.json'\n\t\t\t\t\t);\n\t\t\t\t\tawait updateSubtaskById(\n\t\t\t\t\t\ttasksPath,\n\t\t\t\t\t\tsaveTo,\n\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tsession,\n\t\t\t\t\t\t\tmcpLog,\n\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t\ttag\n\t\t\t\t\t\t},\n\t\t\t\t\t\t'json'\n\t\t\t\t\t);\n\n\t\t\t\t\tlog.info(`Research saved to subtask ${saveTo}`);\n\t\t\t\t} else {\n\t\t\t\t\t// Save to task\n\t\t\t\t\tconst updateTaskById = (\n\t\t\t\t\t\tawait import(\n\t\t\t\t\t\t\t'../../../../scripts/modules/task-manager/update-task-by-id.js'\n\t\t\t\t\t\t)\n\t\t\t\t\t).default;\n\n\t\t\t\t\tconst taskIdNum = parseInt(saveTo, 10);\n\t\t\t\t\tconst tasksPath = path.join(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t'.taskmaster',\n\t\t\t\t\t\t'tasks',\n\t\t\t\t\t\t'tasks.json'\n\t\t\t\t\t);\n\t\t\t\t\tawait updateTaskById(\n\t\t\t\t\t\ttasksPath,\n\t\t\t\t\t\ttaskIdNum,\n\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tsession,\n\t\t\t\t\t\t\tmcpLog,\n\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t\ttag\n\t\t\t\t\t\t},\n\t\t\t\t\t\t'json',\n\t\t\t\t\t\ttrue // appendMode = true\n\t\t\t\t\t);\n\n\t\t\t\t\tlog.info(`Research saved to task ${saveTo}`);\n\t\t\t\t}\n\t\t\t} catch (saveError) {\n\t\t\t\tlog.warn(`Error saving research to task/subtask: ${saveError.message}`);\n\t\t\t}\n\t\t}\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tquery: result.query,\n\t\t\t\tresult: result.result,\n\t\t\t\tcontextSize: result.contextSize,\n\t\t\t\tcontextTokens: result.contextTokens,\n\t\t\t\ttokenBreakdown: result.tokenBreakdown,\n\t\t\t\tsystemPromptTokens: result.systemPromptTokens,\n\t\t\t\tuserPromptTokens: result.userPromptTokens,\n\t\t\t\ttotalInputTokens: result.totalInputTokens,\n\t\t\t\tdetailLevel: result.detailLevel,\n\t\t\t\ttelemetryData: result.telemetryData,\n\t\t\t\ttagInfo: result.tagInfo,\n\t\t\t\tsavedFilePath: result.savedFilePath\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in researchDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'RESEARCH_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/ai-services-unified.js", "/**\n * ai-services-unified.js\n * Centralized AI service layer using provider modules and config-manager.\n */\n\n// Vercel AI SDK functions are NOT called directly anymore.\n// import { generateText, streamText, generateObject } from 'ai';\n\n// --- Core Dependencies ---\nimport {\n\tMODEL_MAP,\n\tgetAzureBaseURL,\n\tgetBaseUrlForRole,\n\tgetBedrockBaseURL,\n\tgetDebugFlag,\n\tgetFallbackModelId,\n\tgetFallbackProvider,\n\tgetMainModelId,\n\tgetMainProvider,\n\tgetOllamaBaseURL,\n\tgetParametersForRole,\n\tgetResearchModelId,\n\tgetResearchProvider,\n\tgetResponseLanguage,\n\tgetUserId,\n\tgetVertexLocation,\n\tgetVertexProjectId,\n\tisApiKeySet,\n\tprovidersWithoutApiKeys\n} from './config-manager.js';\nimport {\n\tfindProjectRoot,\n\tgetCurrentTag,\n\tlog,\n\tresolveEnvVariable\n} from './utils.js';\n\n// Import provider classes\nimport {\n\tAnthropicAIProvider,\n\tAzureProvider,\n\tBedrockAIProvider,\n\tClaudeCodeProvider,\n\tGeminiCliProvider,\n\tGoogleAIProvider,\n\tGroqProvider,\n\tOllamaAIProvider,\n\tOpenAIProvider,\n\tOpenRouterAIProvider,\n\tPerplexityAIProvider,\n\tVertexAIProvider,\n\tXAIProvider\n} from '../../src/ai-providers/index.js';\n\n// Import the provider registry\nimport ProviderRegistry from '../../src/provider-registry/index.js';\n\n// Create provider instances\nconst PROVIDERS = {\n\tanthropic: new AnthropicAIProvider(),\n\tperplexity: new PerplexityAIProvider(),\n\tgoogle: new GoogleAIProvider(),\n\topenai: new OpenAIProvider(),\n\txai: new XAIProvider(),\n\tgroq: new GroqProvider(),\n\topenrouter: new OpenRouterAIProvider(),\n\tollama: new OllamaAIProvider(),\n\tbedrock: new BedrockAIProvider(),\n\tazure: new AzureProvider(),\n\tvertex: new VertexAIProvider(),\n\t'claude-code': new ClaudeCodeProvider(),\n\t'gemini-cli': new GeminiCliProvider()\n};\n\nfunction _getProvider(providerName) {\n\t// First check the static PROVIDERS object\n\tif (PROVIDERS[providerName]) {\n\t\treturn PROVIDERS[providerName];\n\t}\n\n\t// If not found, check the provider registry\n\tconst providerRegistry = ProviderRegistry.getInstance();\n\tif (providerRegistry.hasProvider(providerName)) {\n\t\tlog('debug', `Provider \"${providerName}\" found in dynamic registry`);\n\t\treturn providerRegistry.getProvider(providerName);\n\t}\n\n\t// Provider not found in either location\n\treturn null;\n}\n\n// Helper function to get cost for a specific model\nfunction _getCostForModel(providerName, modelId) {\n\tif (!MODEL_MAP || !MODEL_MAP[providerName]) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Provider \"${providerName}\" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`\n\t\t);\n\t\treturn { inputCost: 0, outputCost: 0, currency: 'USD' }; // Default to zero cost\n\t}\n\n\tconst modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);\n\n\tif (!modelData || !modelData.cost_per_1m_tokens) {\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`Cost data not found for model \"${modelId}\" under provider \"${providerName}\". Assuming zero cost.`\n\t\t);\n\t\treturn { inputCost: 0, outputCost: 0, currency: 'USD' }; // Default to zero cost\n\t}\n\n\t// Ensure currency is part of the returned object, defaulting if not present\n\tconst currency = modelData.cost_per_1m_tokens.currency || 'USD';\n\n\treturn {\n\t\tinputCost: modelData.cost_per_1m_tokens.input || 0,\n\t\toutputCost: modelData.cost_per_1m_tokens.output || 0,\n\t\tcurrency: currency\n\t};\n}\n\n// Helper function to get tag information for responses\nfunction _getTagInfo(projectRoot) {\n\ttry {\n\t\tif (!projectRoot) {\n\t\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t\t}\n\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\n\t\t// Read available tags from tasks.json\n\t\tlet availableTags = ['master']; // Default fallback\n\t\ttry {\n\t\t\tconst path = require('path');\n\t\t\tconst fs = require('fs');\n\t\t\tconst tasksPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster',\n\t\t\t\t'tasks',\n\t\t\t\t'tasks.json'\n\t\t\t);\n\n\t\t\tif (fs.existsSync(tasksPath)) {\n\t\t\t\tconst tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));\n\t\t\t\tif (tasksData && typeof tasksData === 'object') {\n\t\t\t\t\t// Check if it's tagged format (has tag-like keys with tasks arrays)\n\t\t\t\t\tconst potentialTags = Object.keys(tasksData).filter(\n\t\t\t\t\t\t(key) =>\n\t\t\t\t\t\t\ttasksData[key] &&\n\t\t\t\t\t\t\ttypeof tasksData[key] === 'object' &&\n\t\t\t\t\t\t\tArray.isArray(tasksData[key].tasks)\n\t\t\t\t\t);\n\n\t\t\t\t\tif (potentialTags.length > 0) {\n\t\t\t\t\t\tavailableTags = potentialTags;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (readError) {\n\t\t\t// Silently fall back to default if we can't read tasks file\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Could not read tasks file for available tags: ${readError.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\treturn {\n\t\t\tcurrentTag: currentTag || 'master',\n\t\t\tavailableTags: availableTags\n\t\t};\n\t} catch (error) {\n\t\tif (getDebugFlag()) {\n\t\t\tlog('debug', `Error getting tag information: ${error.message}`);\n\t\t}\n\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t}\n}\n\n// --- Configuration for Retries ---\nconst MAX_RETRIES = 2;\nconst INITIAL_RETRY_DELAY_MS = 1000;\n\n// Helper function to check if an error is retryable\nfunction isRetryableError(error) {\n\tconst errorMessage = error.message?.toLowerCase() || '';\n\treturn (\n\t\terrorMessage.includes('rate limit') ||\n\t\terrorMessage.includes('overloaded') ||\n\t\terrorMessage.includes('service temporarily unavailable') ||\n\t\terrorMessage.includes('timeout') ||\n\t\terrorMessage.includes('network error') ||\n\t\terror.status === 429 ||\n\t\terror.status >= 500\n\t);\n}\n\n/**\n * Extracts a user-friendly error message from a potentially complex AI error object.\n * Prioritizes nested messages and falls back to the top-level message.\n * @param {Error | object | any} error - The error object.\n * @returns {string} A concise error message.\n */\nfunction _extractErrorMessage(error) {\n\ttry {\n\t\t// Attempt 1: Look for Vercel SDK specific nested structure (common)\n\t\tif (error?.data?.error?.message) {\n\t\t\treturn error.data.error.message;\n\t\t}\n\n\t\t// Attempt 2: Look for nested error message directly in the error object\n\t\tif (error?.error?.message) {\n\t\t\treturn error.error.message;\n\t\t}\n\n\t\t// Attempt 3: Look for nested error message in response body if it's JSON string\n\t\tif (typeof error?.responseBody === 'string') {\n\t\t\ttry {\n\t\t\t\tconst body = JSON.parse(error.responseBody);\n\t\t\t\tif (body?.error?.message) {\n\t\t\t\t\treturn body.error.message;\n\t\t\t\t}\n\t\t\t} catch (parseError) {\n\t\t\t\t// Ignore if responseBody is not valid JSON\n\t\t\t}\n\t\t}\n\n\t\t// Attempt 4: Use the top-level message if it exists\n\t\tif (typeof error?.message === 'string' && error.message) {\n\t\t\treturn error.message;\n\t\t}\n\n\t\t// Attempt 5: Handle simple string errors\n\t\tif (typeof error === 'string') {\n\t\t\treturn error;\n\t\t}\n\n\t\t// Fallback\n\t\treturn 'An unknown AI service error occurred.';\n\t} catch (e) {\n\t\t// Safety net\n\t\treturn 'Failed to extract error message.';\n\t}\n}\n\n/**\n * Internal helper to resolve the API key for a given provider.\n * @param {string} providerName - The name of the provider (lowercase).\n * @param {object|null} session - Optional MCP session object.\n * @param {string|null} projectRoot - Optional project root path for .env fallback.\n * @returns {string|null} The API key or null if not found/needed.\n * @throws {Error} If a required API key is missing.\n */\nfunction _resolveApiKey(providerName, session, projectRoot = null) {\n\t// Get provider instance\n\tconst provider = _getProvider(providerName);\n\tif (!provider) {\n\t\tthrow new Error(\n\t\t\t`Unknown provider '${providerName}' for API key resolution.`\n\t\t);\n\t}\n\n\t// All providers must implement getRequiredApiKeyName()\n\tconst envVarName = provider.getRequiredApiKeyName();\n\n\t// If envVarName is null (like for MCP), return null directly\n\tif (envVarName === null) {\n\t\treturn null;\n\t}\n\n\tconst apiKey = resolveEnvVariable(envVarName, session, projectRoot);\n\n\t// Special handling for providers that can use alternative auth or no API key\n\tif (!provider.isRequiredApiKey()) {\n\t\treturn apiKey || null;\n\t}\n\n\tif (!apiKey) {\n\t\tthrow new Error(\n\t\t\t`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`\n\t\t);\n\t}\n\treturn apiKey;\n}\n\n/**\n * Internal helper to attempt a provider-specific AI API call with retries.\n *\n * @param {function} providerApiFn - The specific provider function to call (e.g., generateAnthropicText).\n * @param {object} callParams - Parameters object for the provider function.\n * @param {string} providerName - Name of the provider (for logging).\n * @param {string} modelId - Specific model ID (for logging).\n * @param {string} attemptRole - The role being attempted (for logging).\n * @returns {Promise<object>} The result from the successful API call.\n * @throws {Error} If the call fails after all retries.\n */\nasync function _attemptProviderCallWithRetries(\n\tprovider,\n\tserviceType,\n\tcallParams,\n\tproviderName,\n\tmodelId,\n\tattemptRole\n) {\n\tlet retries = 0;\n\tconst fnName = serviceType;\n\n\twhile (retries <= MAX_RETRIES) {\n\t\ttry {\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Attempt ${retries + 1}/${MAX_RETRIES + 1} calling ${fnName} (Provider: ${providerName}, Model: ${modelId}, Role: ${attemptRole})`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Call the appropriate method on the provider instance\n\t\t\tconst result = await provider[serviceType](callParams);\n\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${fnName} succeeded for role ${attemptRole} (Provider: ${providerName}) on attempt ${retries + 1}`\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn result;\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'warn',\n\t\t\t\t`Attempt ${retries + 1} failed for role ${attemptRole} (${fnName} / ${providerName}): ${error.message}`\n\t\t\t);\n\n\t\t\tif (isRetryableError(error) && retries < MAX_RETRIES) {\n\t\t\t\tretries++;\n\t\t\t\tconst delay = INITIAL_RETRY_DELAY_MS * 2 ** (retries - 1);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Something went wrong on the provider side. Retrying in ${delay / 1000}s...`\n\t\t\t\t);\n\t\t\t\tawait new Promise((resolve) => setTimeout(resolve, delay));\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`Something went wrong on the provider side. Max retries reached for role ${attemptRole} (${fnName} / ${providerName}).`\n\t\t\t\t);\n\t\t\t\tthrow error;\n\t\t\t}\n\t\t}\n\t}\n\t// Should not be reached due to throw in the else block\n\tthrow new Error(\n\t\t`Exhausted all retries for role ${attemptRole} (${fnName} / ${providerName})`\n\t);\n}\n\n/**\n * Base logic for unified service functions.\n * @param {string} serviceType - Type of service ('generateText', 'streamText', 'generateObject').\n * @param {object} params - Original parameters passed to the service function.\n * @param {string} params.role - The initial client role.\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot] - Optional project root path.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} params.outputType - 'cli' or 'mcp'.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} [params.prompt] - The prompt for the AI.\n * @param {string} [params.schema] - The Zod schema for the expected object.\n * @param {string} [params.objectName] - Name for object/tool.\n * @returns {Promise<any>} Result from the underlying provider call.\n */\nasync function _unifiedServiceRunner(serviceType, params) {\n\tconst {\n\t\trole: initialRole,\n\t\tsession,\n\t\tprojectRoot,\n\t\tsystemPrompt,\n\t\tprompt,\n\t\tschema,\n\t\tobjectName,\n\t\tcommandName,\n\t\toutputType,\n\t\t...restApiParams\n\t} = params;\n\tif (getDebugFlag()) {\n\t\tlog('info', `${serviceType}Service called`, {\n\t\t\trole: initialRole,\n\t\t\tcommandName,\n\t\t\toutputType,\n\t\t\tprojectRoot\n\t\t});\n\t}\n\n\tconst effectiveProjectRoot = projectRoot || findProjectRoot();\n\tconst userId = getUserId(effectiveProjectRoot);\n\n\tlet sequence;\n\tif (initialRole === 'main') {\n\t\tsequence = ['main', 'fallback', 'research'];\n\t} else if (initialRole === 'research') {\n\t\tsequence = ['research', 'fallback', 'main'];\n\t} else if (initialRole === 'fallback') {\n\t\tsequence = ['fallback', 'main', 'research'];\n\t} else {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`\n\t\t);\n\t\tsequence = ['main', 'fallback', 'research'];\n\t}\n\n\tlet lastError = null;\n\tlet lastCleanErrorMessage =\n\t\t'AI service call failed for all configured roles.';\n\n\tfor (const currentRole of sequence) {\n\t\tlet providerName;\n\t\tlet modelId;\n\t\tlet apiKey;\n\t\tlet roleParams;\n\t\tlet provider;\n\t\tlet baseURL;\n\t\tlet providerResponse;\n\t\tlet telemetryData = null;\n\n\t\ttry {\n\t\t\tlog('info', `New AI service call with role: ${currentRole}`);\n\n\t\t\tif (currentRole === 'main') {\n\t\t\t\tproviderName = getMainProvider(effectiveProjectRoot);\n\t\t\t\tmodelId = getMainModelId(effectiveProjectRoot);\n\t\t\t} else if (currentRole === 'research') {\n\t\t\t\tproviderName = getResearchProvider(effectiveProjectRoot);\n\t\t\t\tmodelId = getResearchModelId(effectiveProjectRoot);\n\t\t\t} else if (currentRole === 'fallback') {\n\t\t\t\tproviderName = getFallbackProvider(effectiveProjectRoot);\n\t\t\t\tmodelId = getFallbackModelId(effectiveProjectRoot);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`Unknown role encountered in _unifiedServiceRunner: ${currentRole}`\n\t\t\t\t);\n\t\t\t\tlastError =\n\t\t\t\t\tlastError || new Error(`Unknown AI role specified: ${currentRole}`);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (!providerName || !modelId) {\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Skipping role '${currentRole}': Provider or Model ID not configured.`\n\t\t\t\t);\n\t\t\t\tlastError =\n\t\t\t\t\tlastError ||\n\t\t\t\t\tnew Error(\n\t\t\t\t\t\t`Configuration missing for role '${currentRole}'. Provider: ${providerName}, Model: ${modelId}`\n\t\t\t\t\t);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Get provider instance\n\t\t\tprovider = _getProvider(providerName?.toLowerCase());\n\t\t\tif (!provider) {\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Skipping role '${currentRole}': Provider '${providerName}' not supported.`\n\t\t\t\t);\n\t\t\t\tlastError =\n\t\t\t\t\tlastError ||\n\t\t\t\t\tnew Error(`Unsupported provider configured: ${providerName}`);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Check API key if needed\n\t\t\tif (!providersWithoutApiKeys.includes(providerName?.toLowerCase())) {\n\t\t\t\tif (!isApiKeySet(providerName, session, effectiveProjectRoot)) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t`Skipping role '${currentRole}' (Provider: ${providerName}): API key not set or invalid.`\n\t\t\t\t\t);\n\t\t\t\t\tlastError =\n\t\t\t\t\t\tlastError ||\n\t\t\t\t\t\tnew Error(\n\t\t\t\t\t\t\t`API key for provider '${providerName}' (role: ${currentRole}) is not set.`\n\t\t\t\t\t\t);\n\t\t\t\t\tcontinue; // Skip to the next role in the sequence\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Get base URL if configured (optional for most providers)\n\t\t\tbaseURL = getBaseUrlForRole(currentRole, effectiveProjectRoot);\n\n\t\t\t// For Azure, use the global Azure base URL if role-specific URL is not configured\n\t\t\tif (providerName?.toLowerCase() === 'azure' && !baseURL) {\n\t\t\t\tbaseURL = getAzureBaseURL(effectiveProjectRoot);\n\t\t\t\tlog('debug', `Using global Azure base URL: ${baseURL}`);\n\t\t\t} else if (providerName?.toLowerCase() === 'ollama' && !baseURL) {\n\t\t\t\t// For Ollama, use the global Ollama base URL if role-specific URL is not configured\n\t\t\t\tbaseURL = getOllamaBaseURL(effectiveProjectRoot);\n\t\t\t\tlog('debug', `Using global Ollama base URL: ${baseURL}`);\n\t\t\t} else if (providerName?.toLowerCase() === 'bedrock' && !baseURL) {\n\t\t\t\t// For Bedrock, use the global Bedrock base URL if role-specific URL is not configured\n\t\t\t\tbaseURL = getBedrockBaseURL(effectiveProjectRoot);\n\t\t\t\tlog('debug', `Using global Bedrock base URL: ${baseURL}`);\n\t\t\t}\n\n\t\t\t// Get AI parameters for the current role\n\t\t\troleParams = getParametersForRole(currentRole, effectiveProjectRoot);\n\t\t\tapiKey = _resolveApiKey(\n\t\t\t\tproviderName?.toLowerCase(),\n\t\t\t\tsession,\n\t\t\t\teffectiveProjectRoot\n\t\t\t);\n\n\t\t\t// Prepare provider-specific configuration\n\t\t\tlet providerSpecificParams = {};\n\n\t\t\t// Handle Vertex AI specific configuration\n\t\t\tif (providerName?.toLowerCase() === 'vertex') {\n\t\t\t\t// Get Vertex project ID and location\n\t\t\t\tconst projectId =\n\t\t\t\t\tgetVertexProjectId(effectiveProjectRoot) ||\n\t\t\t\t\tresolveEnvVariable(\n\t\t\t\t\t\t'VERTEX_PROJECT_ID',\n\t\t\t\t\t\tsession,\n\t\t\t\t\t\teffectiveProjectRoot\n\t\t\t\t\t);\n\n\t\t\t\tconst location =\n\t\t\t\t\tgetVertexLocation(effectiveProjectRoot) ||\n\t\t\t\t\tresolveEnvVariable(\n\t\t\t\t\t\t'VERTEX_LOCATION',\n\t\t\t\t\t\tsession,\n\t\t\t\t\t\teffectiveProjectRoot\n\t\t\t\t\t) ||\n\t\t\t\t\t'us-central1';\n\n\t\t\t\t// Get credentials path if available\n\t\t\t\tconst credentialsPath = resolveEnvVariable(\n\t\t\t\t\t'GOOGLE_APPLICATION_CREDENTIALS',\n\t\t\t\t\tsession,\n\t\t\t\t\teffectiveProjectRoot\n\t\t\t\t);\n\n\t\t\t\t// Add Vertex-specific parameters\n\t\t\t\tproviderSpecificParams = {\n\t\t\t\t\tprojectId,\n\t\t\t\t\tlocation,\n\t\t\t\t\t...(credentialsPath && { credentials: { credentialsFromEnv: true } })\n\t\t\t\t};\n\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Using Vertex AI configuration: Project ID=${projectId}, Location=${location}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst messages = [];\n\t\t\tconst responseLanguage = getResponseLanguage(effectiveProjectRoot);\n\t\t\tconst systemPromptWithLanguage = `${systemPrompt} \\n\\n Always respond in ${responseLanguage}.`;\n\t\t\tmessages.push({\n\t\t\t\trole: 'system',\n\t\t\t\tcontent: systemPromptWithLanguage.trim()\n\t\t\t});\n\n\t\t\t// IN THE FUTURE WHEN DOING CONTEXT IMPROVEMENTS\n\t\t\t// {\n\t\t\t// type: 'text',\n\t\t\t// text: 'Large cached context here like a tasks json',\n\t\t\t// providerOptions: {\n\t\t\t// anthropic: { cacheControl: { type: 'ephemeral' } }\n\t\t\t// }\n\t\t\t// }\n\n\t\t\t// Example\n\t\t\t// if (params.context) { // context is a json string of a tasks object or some other stu\n\t\t\t// messages.push({\n\t\t\t// type: 'text',\n\t\t\t// text: params.context,\n\t\t\t// providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' } } }\n\t\t\t// });\n\t\t\t// }\n\n\t\t\tif (prompt) {\n\t\t\t\tmessages.push({ role: 'user', content: prompt });\n\t\t\t} else {\n\t\t\t\tthrow new Error('User prompt content is missing.');\n\t\t\t}\n\n\t\t\tconst callParams = {\n\t\t\t\tapiKey,\n\t\t\t\tmodelId,\n\t\t\t\tmaxTokens: roleParams.maxTokens,\n\t\t\t\ttemperature: roleParams.temperature,\n\t\t\t\tmessages,\n\t\t\t\t...(baseURL && { baseURL }),\n\t\t\t\t...(serviceType === 'generateObject' && { schema, objectName }),\n\t\t\t\t...providerSpecificParams,\n\t\t\t\t...restApiParams\n\t\t\t};\n\n\t\t\tproviderResponse = await _attemptProviderCallWithRetries(\n\t\t\t\tprovider,\n\t\t\t\tserviceType,\n\t\t\t\tcallParams,\n\t\t\t\tproviderName,\n\t\t\t\tmodelId,\n\t\t\t\tcurrentRole\n\t\t\t);\n\n\t\t\tif (userId && providerResponse && providerResponse.usage) {\n\t\t\t\ttry {\n\t\t\t\t\ttelemetryData = await logAiUsage({\n\t\t\t\t\t\tuserId,\n\t\t\t\t\t\tcommandName,\n\t\t\t\t\t\tproviderName,\n\t\t\t\t\t\tmodelId,\n\t\t\t\t\t\tinputTokens: providerResponse.usage.inputTokens,\n\t\t\t\t\t\toutputTokens: providerResponse.usage.outputTokens,\n\t\t\t\t\t\toutputType\n\t\t\t\t\t});\n\t\t\t\t} catch (telemetryError) {\n\t\t\t\t\t// logAiUsage already logs its own errors and returns null on failure\n\t\t\t\t\t// No need to log again here, telemetryData will remain null\n\t\t\t\t}\n\t\t\t} else if (userId && providerResponse && !providerResponse.usage) {\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data. (May be expected for streams)`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tlet finalMainResult;\n\t\t\tif (serviceType === 'generateText') {\n\t\t\t\tfinalMainResult = providerResponse.text;\n\t\t\t} else if (serviceType === 'generateObject') {\n\t\t\t\tfinalMainResult = providerResponse.object;\n\t\t\t} else if (serviceType === 'streamText') {\n\t\t\t\tfinalMainResult = providerResponse;\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`Unknown serviceType in _unifiedServiceRunner: ${serviceType}`\n\t\t\t\t);\n\t\t\t\tfinalMainResult = providerResponse;\n\t\t\t}\n\n\t\t\t// Get tag information for the response\n\t\t\tconst tagInfo = _getTagInfo(effectiveProjectRoot);\n\n\t\t\treturn {\n\t\t\t\tmainResult: finalMainResult,\n\t\t\t\ttelemetryData: telemetryData,\n\t\t\t\ttagInfo: tagInfo\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tconst cleanMessage = _extractErrorMessage(error);\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}, Model: ${modelId || 'unknown'}): ${cleanMessage}`\n\t\t\t);\n\t\t\tlastError = error;\n\t\t\tlastCleanErrorMessage = cleanMessage;\n\n\t\t\tif (serviceType === 'generateObject') {\n\t\t\t\tconst lowerCaseMessage = cleanMessage.toLowerCase();\n\t\t\t\tif (\n\t\t\t\t\tlowerCaseMessage.includes(\n\t\t\t\t\t\t'no endpoints found that support tool use'\n\t\t\t\t\t) ||\n\t\t\t\t\tlowerCaseMessage.includes('does not support tool_use') ||\n\t\t\t\t\tlowerCaseMessage.includes('tool use is not supported') ||\n\t\t\t\t\tlowerCaseMessage.includes('tools are not supported') ||\n\t\t\t\t\tlowerCaseMessage.includes('function calling is not supported') ||\n\t\t\t\t\tlowerCaseMessage.includes('tool use is not supported')\n\t\t\t\t) {\n\t\t\t\t\tconst specificErrorMsg = `Model '${modelId || 'unknown'}' via provider '${providerName || 'unknown'}' does not support the 'tool use' required by generateObjectService. Please configure a model that supports tool/function calling for the '${currentRole}' role, or use generateTextService if structured output is not strictly required.`;\n\t\t\t\t\tlog('error', `[Tool Support Error] ${specificErrorMsg}`);\n\t\t\t\t\tthrow new Error(specificErrorMsg);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tlog('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);\n\tthrow new Error(lastCleanErrorMessage);\n}\n\n/**\n * Unified service function for generating text.\n * Handles client retrieval, retries, and fallback sequence.\n *\n * @param {object} params - Parameters for the service call.\n * @param {string} params.role - The initial client role ('main', 'research', 'fallback').\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.\n * @param {string} params.prompt - The prompt for the AI.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'.\n * @returns {Promise<object>} Result object containing generated text and usage data.\n */\nasync function generateTextService(params) {\n\t// Ensure default outputType if not provided\n\tconst defaults = { outputType: 'cli' };\n\tconst combinedParams = { ...defaults, ...params };\n\t// TODO: Validate commandName exists?\n\treturn _unifiedServiceRunner('generateText', combinedParams);\n}\n\n/**\n * Unified service function for streaming text.\n * Handles client retrieval, retries, and fallback sequence.\n *\n * @param {object} params - Parameters for the service call.\n * @param {string} params.role - The initial client role ('main', 'research', 'fallback').\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.\n * @param {string} params.prompt - The prompt for the AI.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'.\n * @returns {Promise<object>} Result object containing the stream and usage data.\n */\nasync function streamTextService(params) {\n\tconst defaults = { outputType: 'cli' };\n\tconst combinedParams = { ...defaults, ...params };\n\t// TODO: Validate commandName exists?\n\t// NOTE: Telemetry for streaming might be tricky as usage data often comes at the end.\n\t// The current implementation logs *after* the stream is returned.\n\t// We might need to adjust how usage is captured/logged for streams.\n\treturn _unifiedServiceRunner('streamText', combinedParams);\n}\n\n/**\n * Unified service function for generating structured objects.\n * Handles client retrieval, retries, and fallback sequence.\n *\n * @param {object} params - Parameters for the service call.\n * @param {string} params.role - The initial client role ('main', 'research', 'fallback').\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.\n * @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object.\n * @param {string} params.prompt - The prompt for the AI.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} [params.objectName='generated_object'] - Name for object/tool.\n * @param {number} [params.maxRetries=3] - Max retries for object generation.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'.\n * @returns {Promise<object>} Result object containing the generated object and usage data.\n */\nasync function generateObjectService(params) {\n\tconst defaults = {\n\t\tobjectName: 'generated_object',\n\t\tmaxRetries: 3,\n\t\toutputType: 'cli'\n\t};\n\tconst combinedParams = { ...defaults, ...params };\n\t// TODO: Validate commandName exists?\n\treturn _unifiedServiceRunner('generateObject', combinedParams);\n}\n\n// --- Telemetry Function ---\n/**\n * Logs AI usage telemetry data.\n * For now, it just logs to the console. Sending will be implemented later.\n * @param {object} params - Telemetry parameters.\n * @param {string} params.userId - Unique user identifier.\n * @param {string} params.commandName - The command that triggered the AI call.\n * @param {string} params.providerName - The AI provider used (e.g., 'openai').\n * @param {string} params.modelId - The specific AI model ID used.\n * @param {number} params.inputTokens - Number of input tokens.\n * @param {number} params.outputTokens - Number of output tokens.\n */\nasync function logAiUsage({\n\tuserId,\n\tcommandName,\n\tproviderName,\n\tmodelId,\n\tinputTokens,\n\toutputTokens,\n\toutputType\n}) {\n\ttry {\n\t\tconst isMCP = outputType === 'mcp';\n\t\tconst timestamp = new Date().toISOString();\n\t\tconst totalTokens = (inputTokens || 0) + (outputTokens || 0);\n\n\t\t// Destructure currency along with costs\n\t\tconst { inputCost, outputCost, currency } = _getCostForModel(\n\t\t\tproviderName,\n\t\t\tmodelId\n\t\t);\n\n\t\tconst totalCost =\n\t\t\t((inputTokens || 0) / 1_000_000) * inputCost +\n\t\t\t((outputTokens || 0) / 1_000_000) * outputCost;\n\n\t\tconst telemetryData = {\n\t\t\ttimestamp,\n\t\t\tuserId,\n\t\t\tcommandName,\n\t\t\tmodelUsed: modelId, // Consistent field name from requirements\n\t\t\tproviderName, // Keep provider name for context\n\t\t\tinputTokens: inputTokens || 0,\n\t\t\toutputTokens: outputTokens || 0,\n\t\t\ttotalTokens,\n\t\t\ttotalCost: parseFloat(totalCost.toFixed(6)),\n\t\t\tcurrency // Add currency to the telemetry data\n\t\t};\n\n\t\tif (getDebugFlag()) {\n\t\t\tlog('info', 'AI Usage Telemetry:', telemetryData);\n\t\t}\n\n\t\t// TODO (Subtask 77.2): Send telemetryData securely to the external endpoint.\n\n\t\treturn telemetryData;\n\t} catch (error) {\n\t\tlog('error', `Failed to log AI usage telemetry: ${error.message}`, {\n\t\t\terror\n\t\t});\n\t\t// Don't re-throw; telemetry failure shouldn't block core functionality.\n\t\treturn null;\n\t}\n}\n\nexport {\n\tgenerateTextService,\n\tstreamTextService,\n\tgenerateObjectService,\n\tlogAiUsage\n};\n"], ["/claude-task-master/scripts/modules/config-manager.js", "import fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport chalk from 'chalk';\nimport { z } from 'zod';\nimport { AI_COMMAND_NAMES } from '../../src/constants/commands.js';\nimport {\n\tLEGACY_CONFIG_FILE,\n\tTASKMASTER_DIR\n} from '../../src/constants/paths.js';\nimport {\n\tALL_PROVIDERS,\n\tCUSTOM_PROVIDERS,\n\tCUSTOM_PROVIDERS_ARRAY,\n\tVALIDATED_PROVIDERS\n} from '../../src/constants/providers.js';\nimport { findConfigPath } from '../../src/utils/path-utils.js';\nimport { findProjectRoot, isEmpty, log, resolveEnvVariable } from './utils.js';\n\n// Calculate __dirname in ESM\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\n// Load supported models from JSON file using the calculated __dirname\nlet MODEL_MAP;\ntry {\n\tconst supportedModelsRaw = fs.readFileSync(\n\t\tpath.join(__dirname, 'supported-models.json'),\n\t\t'utf-8'\n\t);\n\tMODEL_MAP = JSON.parse(supportedModelsRaw);\n} catch (error) {\n\tconsole.error(\n\t\tchalk.red(\n\t\t\t'FATAL ERROR: Could not load supported-models.json. Please ensure the file exists and is valid JSON.'\n\t\t),\n\t\terror\n\t);\n\tMODEL_MAP = {}; // Default to empty map on error to avoid crashing, though functionality will be limited\n\tprocess.exit(1); // Exit if models can't be loaded\n}\n\n// Default configuration values (used if config file is missing or incomplete)\nconst DEFAULTS = {\n\tmodels: {\n\t\tmain: {\n\t\t\tprovider: 'anthropic',\n\t\t\tmodelId: 'claude-3-7-sonnet-20250219',\n\t\t\tmaxTokens: 64000,\n\t\t\ttemperature: 0.2\n\t\t},\n\t\tresearch: {\n\t\t\tprovider: 'perplexity',\n\t\t\tmodelId: 'sonar-pro',\n\t\t\tmaxTokens: 8700,\n\t\t\ttemperature: 0.1\n\t\t},\n\t\tfallback: {\n\t\t\t// No default fallback provider/model initially\n\t\t\tprovider: 'anthropic',\n\t\t\tmodelId: 'claude-3-5-sonnet',\n\t\t\tmaxTokens: 8192, // Default parameters if fallback IS configured\n\t\t\ttemperature: 0.2\n\t\t}\n\t},\n\tglobal: {\n\t\tlogLevel: 'info',\n\t\tdebug: false,\n\t\tdefaultNumTasks: 10,\n\t\tdefaultSubtasks: 5,\n\t\tdefaultPriority: 'medium',\n\t\tprojectName: 'Task Master',\n\t\tollamaBaseURL: 'http://localhost:11434/api',\n\t\tbedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com',\n\t\tresponseLanguage: 'English'\n\t},\n\tclaudeCode: {}\n};\n\n// --- Internal Config Loading ---\nlet loadedConfig = null;\nlet loadedConfigRoot = null; // Track which root loaded the config\n\n// Custom Error for configuration issues\nclass ConfigurationError extends Error {\n\tconstructor(message) {\n\t\tsuper(message);\n\t\tthis.name = 'ConfigurationError';\n\t}\n}\n\nfunction _loadAndValidateConfig(explicitRoot = null) {\n\tconst defaults = DEFAULTS; // Use the defined defaults\n\tlet rootToUse = explicitRoot;\n\tlet configSource = explicitRoot\n\t\t? `explicit root (${explicitRoot})`\n\t\t: 'defaults (no root provided yet)';\n\n\t// ---> If no explicit root, TRY to find it <---\n\tif (!rootToUse) {\n\t\trootToUse = findProjectRoot();\n\t\tif (rootToUse) {\n\t\t\tconfigSource = `found root (${rootToUse})`;\n\t\t} else {\n\t\t\t// No root found, use current working directory as fallback\n\t\t\t// This prevents infinite loops during initialization\n\t\t\trootToUse = process.cwd();\n\t\t\tconfigSource = `current directory (${rootToUse}) - no project markers found`;\n\t\t}\n\t}\n\t// ---> End find project root logic <---\n\n\t// --- Find configuration file ---\n\tlet configPath = null;\n\tlet config = { ...defaults }; // Start with a deep copy of defaults\n\tlet configExists = false;\n\n\t// During initialization (no project markers), skip config file search entirely\n\tconst hasProjectMarkers =\n\t\tfs.existsSync(path.join(rootToUse, TASKMASTER_DIR)) ||\n\t\tfs.existsSync(path.join(rootToUse, LEGACY_CONFIG_FILE));\n\n\tif (hasProjectMarkers) {\n\t\t// Only try to find config if we have project markers\n\t\t// This prevents the repeated warnings during init\n\t\tconfigPath = findConfigPath(null, { projectRoot: rootToUse });\n\t}\n\n\tif (configPath) {\n\t\tconfigExists = true;\n\t\tconst isLegacy = configPath.endsWith(LEGACY_CONFIG_FILE);\n\n\t\ttry {\n\t\t\tconst rawData = fs.readFileSync(configPath, 'utf-8');\n\t\t\tconst parsedConfig = JSON.parse(rawData);\n\n\t\t\t// Deep merge parsed config onto defaults\n\t\t\tconfig = {\n\t\t\t\tmodels: {\n\t\t\t\t\tmain: { ...defaults.models.main, ...parsedConfig?.models?.main },\n\t\t\t\t\tresearch: {\n\t\t\t\t\t\t...defaults.models.research,\n\t\t\t\t\t\t...parsedConfig?.models?.research\n\t\t\t\t\t},\n\t\t\t\t\tfallback:\n\t\t\t\t\t\tparsedConfig?.models?.fallback?.provider &&\n\t\t\t\t\t\tparsedConfig?.models?.fallback?.modelId\n\t\t\t\t\t\t\t? { ...defaults.models.fallback, ...parsedConfig.models.fallback }\n\t\t\t\t\t\t\t: { ...defaults.models.fallback }\n\t\t\t\t},\n\t\t\t\tglobal: { ...defaults.global, ...parsedConfig?.global },\n\t\t\t\tclaudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode }\n\t\t\t};\n\t\t\tconfigSource = `file (${configPath})`; // Update source info\n\n\t\t\t// Issue deprecation warning if using legacy config file\n\t\t\tif (isLegacy) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found configuration in legacy location '${configPath}'. Please migrate to .taskmaster/config.json. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// --- Validation (Warn if file content is invalid) ---\n\t\t\t// Use log.warn for consistency\n\t\t\tif (!validateProvider(config.models.main.provider)) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Invalid main provider \"${config.models.main.provider}\" in ${configPath}. Falling back to default.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconfig.models.main = { ...defaults.models.main };\n\t\t\t}\n\t\t\tif (!validateProvider(config.models.research.provider)) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Invalid research provider \"${config.models.research.provider}\" in ${configPath}. Falling back to default.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconfig.models.research = { ...defaults.models.research };\n\t\t\t}\n\t\t\tif (\n\t\t\t\tconfig.models.fallback?.provider &&\n\t\t\t\t!validateProvider(config.models.fallback.provider)\n\t\t\t) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Invalid fallback provider \"${config.models.fallback.provider}\" in ${configPath}. Fallback model configuration will be ignored.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconfig.models.fallback.provider = undefined;\n\t\t\t\tconfig.models.fallback.modelId = undefined;\n\t\t\t}\n\t\t\tif (config.claudeCode && !isEmpty(config.claudeCode)) {\n\t\t\t\tconfig.claudeCode = validateClaudeCodeSettings(config.claudeCode);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Use console.error for actual errors during parsing\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(\n\t\t\t\t\t`Error reading or parsing ${configPath}: ${error.message}. Using default configuration.`\n\t\t\t\t)\n\t\t\t);\n\t\t\tconfig = { ...defaults }; // Reset to defaults on parse error\n\t\t\tconfigSource = `defaults (parse error at ${configPath})`;\n\t\t}\n\t} else {\n\t\t// Config file doesn't exist at the determined rootToUse.\n\t\tif (explicitRoot) {\n\t\t\t// Only warn if an explicit root was *expected*.\n\t\t\tconsole.warn(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t`Warning: Configuration file not found at provided project root (${explicitRoot}). Using default configuration. Run 'task-master models --setup' to configure.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\t// Don't warn about missing config during initialization\n\t\t\t// Only warn if this looks like an existing project (has .taskmaster dir or legacy config marker)\n\t\t\tconst hasTaskmasterDir = fs.existsSync(\n\t\t\t\tpath.join(rootToUse, TASKMASTER_DIR)\n\t\t\t);\n\t\t\tconst hasLegacyMarker = fs.existsSync(\n\t\t\t\tpath.join(rootToUse, LEGACY_CONFIG_FILE)\n\t\t\t);\n\n\t\t\tif (hasTaskmasterDir || hasLegacyMarker) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t\t// Keep config as defaults\n\t\tconfig = { ...defaults };\n\t\tconfigSource = `defaults (no config file found at ${rootToUse})`;\n\t}\n\n\treturn config;\n}\n\n/**\n * Gets the current configuration, loading it if necessary.\n * Handles MCP initialization context gracefully.\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @param {boolean} forceReload - Force reloading the config file.\n * @returns {object} The loaded configuration object.\n */\nfunction getConfig(explicitRoot = null, forceReload = false) {\n\t// Determine if a reload is necessary\n\tconst needsLoad =\n\t\t!loadedConfig ||\n\t\tforceReload ||\n\t\t(explicitRoot && explicitRoot !== loadedConfigRoot);\n\n\tif (needsLoad) {\n\t\tconst newConfig = _loadAndValidateConfig(explicitRoot); // _load handles null explicitRoot\n\n\t\t// Only update the global cache if loading was forced or if an explicit root\n\t\t// was provided (meaning we attempted to load a specific project's config).\n\t\t// We avoid caching the initial default load triggered without an explicitRoot.\n\t\tif (forceReload || explicitRoot) {\n\t\t\tloadedConfig = newConfig;\n\t\t\tloadedConfigRoot = explicitRoot; // Store the root used for this loaded config\n\t\t}\n\t\treturn newConfig; // Return the newly loaded/default config\n\t}\n\n\t// If no load was needed, return the cached config\n\treturn loadedConfig;\n}\n\n/**\n * Validates if a provider name is supported.\n * Custom providers (azure, vertex, bedrock, openrouter, ollama) are always allowed.\n * Validated providers must exist in the MODEL_MAP from supported-models.json.\n * @param {string} providerName The name of the provider.\n * @returns {boolean} True if the provider is valid, false otherwise.\n */\nfunction validateProvider(providerName) {\n\t// Custom providers are always allowed\n\tif (CUSTOM_PROVIDERS_ARRAY.includes(providerName)) {\n\t\treturn true;\n\t}\n\n\t// Validated providers must exist in MODEL_MAP\n\tif (VALIDATED_PROVIDERS.includes(providerName)) {\n\t\treturn !!(MODEL_MAP && MODEL_MAP[providerName]);\n\t}\n\n\t// Unknown providers are not allowed\n\treturn false;\n}\n\n/**\n * Optional: Validates if a modelId is known for a given provider based on MODEL_MAP.\n * This is a non-strict validation; an unknown model might still be valid.\n * @param {string} providerName The name of the provider.\n * @param {string} modelId The model ID.\n * @returns {boolean} True if the modelId is in the map for the provider, false otherwise.\n */\nfunction validateProviderModelCombination(providerName, modelId) {\n\t// If provider isn't even in our map, we can't validate the model\n\tif (!MODEL_MAP[providerName]) {\n\t\treturn true; // Allow unknown providers or those without specific model lists\n\t}\n\t// If the provider is known, check if the model is in its list OR if the list is empty (meaning accept any)\n\treturn (\n\t\tMODEL_MAP[providerName].length === 0 ||\n\t\t// Use .some() to check the 'id' property of objects in the array\n\t\tMODEL_MAP[providerName].some((modelObj) => modelObj.id === modelId)\n\t);\n}\n\n/**\n * Validates Claude Code AI provider custom settings\n * @param {object} settings The settings to validate\n * @returns {object} The validated settings\n */\nfunction validateClaudeCodeSettings(settings) {\n\t// Define the base settings schema without commandSpecific first\n\tconst BaseSettingsSchema = z.object({\n\t\tmaxTurns: z.number().int().positive().optional(),\n\t\tcustomSystemPrompt: z.string().optional(),\n\t\tappendSystemPrompt: z.string().optional(),\n\t\tpermissionMode: z\n\t\t\t.enum(['default', 'acceptEdits', 'plan', 'bypassPermissions'])\n\t\t\t.optional(),\n\t\tallowedTools: z.array(z.string()).optional(),\n\t\tdisallowedTools: z.array(z.string()).optional(),\n\t\tmcpServers: z\n\t\t\t.record(\n\t\t\t\tz.string(),\n\t\t\t\tz.object({\n\t\t\t\t\ttype: z.enum(['stdio', 'sse']).optional(),\n\t\t\t\t\tcommand: z.string(),\n\t\t\t\t\targs: z.array(z.string()).optional(),\n\t\t\t\t\tenv: z.record(z.string()).optional(),\n\t\t\t\t\turl: z.string().url().optional(),\n\t\t\t\t\theaders: z.record(z.string()).optional()\n\t\t\t\t})\n\t\t\t)\n\t\t\t.optional()\n\t});\n\n\t// Define CommandSpecificSchema using the base schema\n\tconst CommandSpecificSchema = z.record(\n\t\tz.enum(AI_COMMAND_NAMES),\n\t\tBaseSettingsSchema\n\t);\n\n\t// Define the full settings schema with commandSpecific\n\tconst SettingsSchema = BaseSettingsSchema.extend({\n\t\tcommandSpecific: CommandSpecificSchema.optional()\n\t});\n\n\tlet validatedSettings = {};\n\n\ttry {\n\t\tvalidatedSettings = SettingsSchema.parse(settings);\n\t} catch (error) {\n\t\tconsole.warn(\n\t\t\tchalk.yellow(\n\t\t\t\t`Warning: Invalid Claude Code settings in config: ${error.message}. Falling back to default.`\n\t\t\t)\n\t\t);\n\n\t\tvalidatedSettings = {};\n\t}\n\n\treturn validatedSettings;\n}\n\n// --- Claude Code Settings Getters ---\n\nfunction getClaudeCodeSettings(explicitRoot = null, forceReload = false) {\n\tconst config = getConfig(explicitRoot, forceReload);\n\t// Ensure Claude Code defaults are applied if Claude Code section is missing\n\treturn { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };\n}\n\nfunction getClaudeCodeSettingsForCommand(\n\tcommandName,\n\texplicitRoot = null,\n\tforceReload = false\n) {\n\tconst settings = getClaudeCodeSettings(explicitRoot, forceReload);\n\tconst commandSpecific = settings?.commandSpecific || {};\n\treturn { ...settings, ...commandSpecific[commandName] };\n}\n\n// --- Role-Specific Getters ---\n\nfunction getModelConfigForRole(role, explicitRoot = null) {\n\tconst config = getConfig(explicitRoot);\n\tconst roleConfig = config?.models?.[role];\n\tif (!roleConfig) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`No model configuration found for role: ${role}. Returning default.`\n\t\t);\n\t\treturn DEFAULTS.models[role] || {};\n\t}\n\treturn roleConfig;\n}\n\nfunction getMainProvider(explicitRoot = null) {\n\treturn getModelConfigForRole('main', explicitRoot).provider;\n}\n\nfunction getMainModelId(explicitRoot = null) {\n\treturn getModelConfigForRole('main', explicitRoot).modelId;\n}\n\nfunction getMainMaxTokens(explicitRoot = null) {\n\t// Directly return value from config (which includes defaults)\n\treturn getModelConfigForRole('main', explicitRoot).maxTokens;\n}\n\nfunction getMainTemperature(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('main', explicitRoot).temperature;\n}\n\nfunction getResearchProvider(explicitRoot = null) {\n\treturn getModelConfigForRole('research', explicitRoot).provider;\n}\n\nfunction getResearchModelId(explicitRoot = null) {\n\treturn getModelConfigForRole('research', explicitRoot).modelId;\n}\n\nfunction getResearchMaxTokens(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('research', explicitRoot).maxTokens;\n}\n\nfunction getResearchTemperature(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('research', explicitRoot).temperature;\n}\n\nfunction getFallbackProvider(explicitRoot = null) {\n\t// Directly return value from config (will be undefined if not set)\n\treturn getModelConfigForRole('fallback', explicitRoot).provider;\n}\n\nfunction getFallbackModelId(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('fallback', explicitRoot).modelId;\n}\n\nfunction getFallbackMaxTokens(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('fallback', explicitRoot).maxTokens;\n}\n\nfunction getFallbackTemperature(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('fallback', explicitRoot).temperature;\n}\n\n// --- Global Settings Getters ---\n\nfunction getGlobalConfig(explicitRoot = null) {\n\tconst config = getConfig(explicitRoot);\n\t// Ensure global defaults are applied if global section is missing\n\treturn { ...DEFAULTS.global, ...(config?.global || {}) };\n}\n\nfunction getLogLevel(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).logLevel.toLowerCase();\n}\n\nfunction getDebugFlag(explicitRoot = null) {\n\t// Directly return value from config, ensure boolean\n\treturn getGlobalConfig(explicitRoot).debug === true;\n}\n\nfunction getDefaultSubtasks(explicitRoot = null) {\n\t// Directly return value from config, ensure integer\n\tconst val = getGlobalConfig(explicitRoot).defaultSubtasks;\n\tconst parsedVal = parseInt(val, 10);\n\treturn Number.isNaN(parsedVal) ? DEFAULTS.global.defaultSubtasks : parsedVal;\n}\n\nfunction getDefaultNumTasks(explicitRoot = null) {\n\tconst val = getGlobalConfig(explicitRoot).defaultNumTasks;\n\tconst parsedVal = parseInt(val, 10);\n\treturn Number.isNaN(parsedVal) ? DEFAULTS.global.defaultNumTasks : parsedVal;\n}\n\nfunction getDefaultPriority(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).defaultPriority;\n}\n\nfunction getProjectName(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).projectName;\n}\n\nfunction getOllamaBaseURL(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).ollamaBaseURL;\n}\n\nfunction getAzureBaseURL(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).azureBaseURL;\n}\n\nfunction getBedrockBaseURL(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).bedrockBaseURL;\n}\n\n/**\n * Gets the Google Cloud project ID for Vertex AI from configuration\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {string|null} The project ID or null if not configured\n */\nfunction getVertexProjectId(explicitRoot = null) {\n\t// Return value from config\n\treturn getGlobalConfig(explicitRoot).vertexProjectId;\n}\n\n/**\n * Gets the Google Cloud location for Vertex AI from configuration\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {string} The location or default value of \"us-central1\"\n */\nfunction getVertexLocation(explicitRoot = null) {\n\t// Return value from config or default\n\treturn getGlobalConfig(explicitRoot).vertexLocation || 'us-central1';\n}\n\nfunction getResponseLanguage(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).responseLanguage;\n}\n\n/**\n * Gets model parameters (maxTokens, temperature) for a specific role,\n * considering model-specific overrides from supported-models.json.\n * @param {string} role - The role ('main', 'research', 'fallback').\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {{maxTokens: number, temperature: number}}\n */\nfunction getParametersForRole(role, explicitRoot = null) {\n\tconst roleConfig = getModelConfigForRole(role, explicitRoot);\n\tconst roleMaxTokens = roleConfig.maxTokens;\n\tconst roleTemperature = roleConfig.temperature;\n\tconst modelId = roleConfig.modelId;\n\tconst providerName = roleConfig.provider;\n\n\tlet effectiveMaxTokens = roleMaxTokens; // Start with the role's default\n\n\ttry {\n\t\t// Find the model definition in MODEL_MAP\n\t\tconst providerModels = MODEL_MAP[providerName];\n\t\tif (providerModels && Array.isArray(providerModels)) {\n\t\t\tconst modelDefinition = providerModels.find((m) => m.id === modelId);\n\n\t\t\t// Check if a model-specific max_tokens is defined and valid\n\t\t\tif (\n\t\t\t\tmodelDefinition &&\n\t\t\t\ttypeof modelDefinition.max_tokens === 'number' &&\n\t\t\t\tmodelDefinition.max_tokens > 0\n\t\t\t) {\n\t\t\t\tconst modelSpecificMaxTokens = modelDefinition.max_tokens;\n\t\t\t\t// Use the minimum of the role default and the model specific limit\n\t\t\t\teffectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens);\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Applying model-specific max_tokens (${modelSpecificMaxTokens}) for ${modelId}. Effective limit: ${effectiveMaxTokens}`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\t// Special handling for custom OpenRouter models\n\t\t\tif (providerName === CUSTOM_PROVIDERS.OPENROUTER) {\n\t\t\t\t// Use a conservative default for OpenRouter models not in our list\n\t\t\t\tconst openrouterDefault = 32768;\n\t\t\t\teffectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (lookupError) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Error looking up model-specific max_tokens for ${modelId}: ${lookupError.message}. Using role default: ${roleMaxTokens}`\n\t\t);\n\t\t// Fallback to role default on error\n\t\teffectiveMaxTokens = roleMaxTokens;\n\t}\n\n\treturn {\n\t\tmaxTokens: effectiveMaxTokens,\n\t\ttemperature: roleTemperature\n\t};\n}\n\n/**\n * Checks if the API key for a given provider is set in the environment.\n * Checks process.env first, then session.env if session is provided, then .env file if projectRoot provided.\n * @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').\n * @param {object|null} [session=null] - The MCP session object (optional).\n * @param {string|null} [projectRoot=null] - The project root directory (optional, for .env file check).\n * @returns {boolean} True if the API key is set, false otherwise.\n */\nfunction isApiKeySet(providerName, session = null, projectRoot = null) {\n\t// Define the expected environment variable name for each provider\n\n\t// Providers that don't require API keys for authentication\n\tconst providersWithoutApiKeys = [\n\t\tCUSTOM_PROVIDERS.OLLAMA,\n\t\tCUSTOM_PROVIDERS.BEDROCK,\n\t\tCUSTOM_PROVIDERS.MCP,\n\t\tCUSTOM_PROVIDERS.GEMINI_CLI\n\t];\n\n\tif (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {\n\t\treturn true; // Indicate key status is effectively \"OK\"\n\t}\n\n\t// Claude Code doesn't require an API key\n\tif (providerName?.toLowerCase() === 'claude-code') {\n\t\treturn true; // No API key needed\n\t}\n\n\tconst keyMap = {\n\t\topenai: 'OPENAI_API_KEY',\n\t\tanthropic: 'ANTHROPIC_API_KEY',\n\t\tgoogle: 'GOOGLE_API_KEY',\n\t\tperplexity: 'PERPLEXITY_API_KEY',\n\t\tmistral: 'MISTRAL_API_KEY',\n\t\tazure: 'AZURE_OPENAI_API_KEY',\n\t\topenrouter: 'OPENROUTER_API_KEY',\n\t\txai: 'XAI_API_KEY',\n\t\tgroq: 'GROQ_API_KEY',\n\t\tvertex: 'GOOGLE_API_KEY', // Vertex uses the same key as Google\n\t\t'claude-code': 'CLAUDE_CODE_API_KEY', // Not actually used, but included for consistency\n\t\tbedrock: 'AWS_ACCESS_KEY_ID' // Bedrock uses AWS credentials\n\t\t// Add other providers as needed\n\t};\n\n\tconst providerKey = providerName?.toLowerCase();\n\tif (!providerKey || !keyMap[providerKey]) {\n\t\tlog('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`);\n\t\treturn false;\n\t}\n\n\tconst envVarName = keyMap[providerKey];\n\tconst apiKeyValue = resolveEnvVariable(envVarName, session, projectRoot);\n\n\t// Check if the key exists, is not empty, and is not a placeholder\n\treturn (\n\t\tapiKeyValue &&\n\t\tapiKeyValue.trim() !== '' &&\n\t\t!/YOUR_.*_API_KEY_HERE/.test(apiKeyValue) && // General placeholder check\n\t\t!apiKeyValue.includes('KEY_HERE')\n\t); // Another common placeholder pattern\n}\n\n/**\n * Checks the API key status within .cursor/mcp.json for a given provider.\n * Reads the mcp.json file, finds the taskmaster-ai server config, and checks the relevant env var.\n * @param {string} providerName The name of the provider.\n * @param {string|null} projectRoot - Optional explicit path to the project root.\n * @returns {boolean} True if the key exists and is not a placeholder, false otherwise.\n */\nfunction getMcpApiKeyStatus(providerName, projectRoot = null) {\n\tconst rootDir = projectRoot || findProjectRoot(); // Use existing root finding\n\tif (!rootDir) {\n\t\tconsole.warn(\n\t\t\tchalk.yellow('Warning: Could not find project root to check mcp.json.')\n\t\t);\n\t\treturn false; // Cannot check without root\n\t}\n\tconst mcpConfigPath = path.join(rootDir, '.cursor', 'mcp.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\t// console.warn(chalk.yellow('Warning: .cursor/mcp.json not found.'));\n\t\treturn false; // File doesn't exist\n\t}\n\n\ttry {\n\t\tconst mcpConfigRaw = fs.readFileSync(mcpConfigPath, 'utf-8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigRaw);\n\n\t\tconst mcpEnv =\n\t\t\tmcpConfig?.mcpServers?.['task-master-ai']?.env ||\n\t\t\tmcpConfig?.mcpServers?.['taskmaster-ai']?.env;\n\t\tif (!mcpEnv) {\n\t\t\treturn false;\n\t\t}\n\n\t\tlet apiKeyToCheck = null;\n\t\tlet placeholderValue = null;\n\n\t\tswitch (providerName) {\n\t\t\tcase 'anthropic':\n\t\t\t\tapiKeyToCheck = mcpEnv.ANTHROPIC_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_ANTHROPIC_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'openai':\n\t\t\t\tapiKeyToCheck = mcpEnv.OPENAI_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_OPENAI_API_KEY_HERE'; // Assuming placeholder matches OPENAI\n\t\t\t\tbreak;\n\t\t\tcase 'openrouter':\n\t\t\t\tapiKeyToCheck = mcpEnv.OPENROUTER_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_OPENROUTER_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'google':\n\t\t\t\tapiKeyToCheck = mcpEnv.GOOGLE_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_GOOGLE_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'perplexity':\n\t\t\t\tapiKeyToCheck = mcpEnv.PERPLEXITY_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_PERPLEXITY_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'xai':\n\t\t\t\tapiKeyToCheck = mcpEnv.XAI_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_XAI_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'groq':\n\t\t\t\tapiKeyToCheck = mcpEnv.GROQ_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_GROQ_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'ollama':\n\t\t\t\treturn true; // No key needed\n\t\t\tcase 'claude-code':\n\t\t\t\treturn true; // No key needed\n\t\t\tcase 'mistral':\n\t\t\t\tapiKeyToCheck = mcpEnv.MISTRAL_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_MISTRAL_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'azure':\n\t\t\t\tapiKeyToCheck = mcpEnv.AZURE_OPENAI_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_AZURE_OPENAI_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'vertex':\n\t\t\t\tapiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key\n\t\t\t\tplaceholderValue = 'YOUR_GOOGLE_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'bedrock':\n\t\t\t\tapiKeyToCheck = mcpEnv.AWS_ACCESS_KEY_ID; // Bedrock uses AWS credentials\n\t\t\t\tplaceholderValue = 'YOUR_AWS_ACCESS_KEY_ID_HERE';\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\treturn false; // Unknown provider\n\t\t}\n\n\t\treturn !!apiKeyToCheck && !/KEY_HERE$/.test(apiKeyToCheck);\n\t} catch (error) {\n\t\tconsole.error(\n\t\t\tchalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`)\n\t\t);\n\t\treturn false;\n\t}\n}\n\n/**\n * Gets a list of available models based on the MODEL_MAP.\n * @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}\n */\nfunction getAvailableModels() {\n\tconst available = [];\n\tfor (const [provider, models] of Object.entries(MODEL_MAP)) {\n\t\tif (models.length > 0) {\n\t\t\tmodels\n\t\t\t\t.filter((modelObj) => Boolean(modelObj.supported))\n\t\t\t\t.forEach((modelObj) => {\n\t\t\t\t\t// Basic name generation - can be improved\n\t\t\t\t\tconst modelId = modelObj.id;\n\t\t\t\t\tconst sweScore = modelObj.swe_score;\n\t\t\t\t\tconst cost = modelObj.cost_per_1m_tokens;\n\t\t\t\t\tconst allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];\n\t\t\t\t\tconst nameParts = modelId\n\t\t\t\t\t\t.split('-')\n\t\t\t\t\t\t.map((p) => p.charAt(0).toUpperCase() + p.slice(1));\n\t\t\t\t\t// Handle specific known names better if needed\n\t\t\t\t\tlet name = nameParts.join(' ');\n\t\t\t\t\tif (modelId === 'claude-3.5-sonnet-20240620')\n\t\t\t\t\t\tname = 'Claude 3.5 Sonnet';\n\t\t\t\t\tif (modelId === 'claude-3-7-sonnet-20250219')\n\t\t\t\t\t\tname = 'Claude 3.7 Sonnet';\n\t\t\t\t\tif (modelId === 'gpt-4o') name = 'GPT-4o';\n\t\t\t\t\tif (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';\n\t\t\t\t\tif (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';\n\t\t\t\t\tif (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';\n\n\t\t\t\t\tavailable.push({\n\t\t\t\t\t\tid: modelId,\n\t\t\t\t\t\tname: name,\n\t\t\t\t\t\tprovider: provider,\n\t\t\t\t\t\tswe_score: sweScore,\n\t\t\t\t\t\tcost_per_1m_tokens: cost,\n\t\t\t\t\t\tallowed_roles: allowedRoles,\n\t\t\t\t\t\tmax_tokens: modelObj.max_tokens\n\t\t\t\t\t});\n\t\t\t\t});\n\t\t} else {\n\t\t\t// For providers with empty lists (like ollama), maybe add a placeholder or skip\n\t\t\tavailable.push({\n\t\t\t\tid: `[${provider}-any]`,\n\t\t\t\tname: `Any (${provider})`,\n\t\t\t\tprovider: provider\n\t\t\t});\n\t\t}\n\t}\n\treturn available;\n}\n\n/**\n * Writes the configuration object to the file.\n * @param {Object} config The configuration object to write.\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {boolean} True if successful, false otherwise.\n */\nfunction writeConfig(config, explicitRoot = null) {\n\t// ---> Determine root path reliably <---\n\tlet rootPath = explicitRoot;\n\tif (explicitRoot === null || explicitRoot === undefined) {\n\t\t// Logic matching _loadAndValidateConfig\n\t\tconst foundRoot = findProjectRoot(); // *** Explicitly call findProjectRoot ***\n\t\tif (!foundRoot) {\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(\n\t\t\t\t\t'Error: Could not determine project root. Configuration not saved.'\n\t\t\t\t)\n\t\t\t);\n\t\t\treturn false;\n\t\t}\n\t\trootPath = foundRoot;\n\t}\n\t// ---> End determine root path logic <---\n\n\t// Use new config location: .taskmaster/config.json\n\tconst taskmasterDir = path.join(rootPath, '.taskmaster');\n\tconst configPath = path.join(taskmasterDir, 'config.json');\n\n\ttry {\n\t\t// Ensure .taskmaster directory exists\n\t\tif (!fs.existsSync(taskmasterDir)) {\n\t\t\tfs.mkdirSync(taskmasterDir, { recursive: true });\n\t\t}\n\n\t\tfs.writeFileSync(configPath, JSON.stringify(config, null, 2));\n\t\tloadedConfig = config; // Update the cache after successful write\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.error(\n\t\t\tchalk.red(\n\t\t\t\t`Error writing configuration to ${configPath}: ${error.message}`\n\t\t\t)\n\t\t);\n\t\treturn false;\n\t}\n}\n\n/**\n * Checks if a configuration file exists at the project root (new or legacy location)\n * @param {string|null} explicitRoot - Optional explicit path to the project root\n * @returns {boolean} True if the file exists, false otherwise\n */\nfunction isConfigFilePresent(explicitRoot = null) {\n\treturn findConfigPath(null, { projectRoot: explicitRoot }) !== null;\n}\n\n/**\n * Gets the user ID from the configuration.\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {string|null} The user ID or null if not found.\n */\nfunction getUserId(explicitRoot = null) {\n\tconst config = getConfig(explicitRoot);\n\tif (!config.global) {\n\t\tconfig.global = {}; // Ensure global object exists\n\t}\n\tif (!config.global.userId) {\n\t\tconfig.global.userId = '1234567890';\n\t\t// Attempt to write the updated config.\n\t\t// It's important that writeConfig correctly resolves the path\n\t\t// using explicitRoot, similar to how getConfig does.\n\t\tconst success = writeConfig(config, explicitRoot);\n\t\tif (!success) {\n\t\t\t// Log an error or handle the failure to write,\n\t\t\t// though for now, we'll proceed with the in-memory default.\n\t\t\tlog(\n\t\t\t\t'warning',\n\t\t\t\t'Failed to write updated configuration with new userId. Please let the developers know.'\n\t\t\t);\n\t\t}\n\t}\n\treturn config.global.userId;\n}\n\n/**\n * Gets a list of all known provider names (both validated and custom).\n * @returns {string[]} An array of all provider names.\n */\nfunction getAllProviders() {\n\treturn ALL_PROVIDERS;\n}\n\nfunction getBaseUrlForRole(role, explicitRoot = null) {\n\tconst roleConfig = getModelConfigForRole(role, explicitRoot);\n\tif (roleConfig && typeof roleConfig.baseURL === 'string') {\n\t\treturn roleConfig.baseURL;\n\t}\n\tconst provider = roleConfig?.provider;\n\tif (provider) {\n\t\tconst envVarName = `${provider.toUpperCase()}_BASE_URL`;\n\t\treturn resolveEnvVariable(envVarName, null, explicitRoot);\n\t}\n\treturn undefined;\n}\n\n// Export the providers without API keys array for use in other modules\nexport const providersWithoutApiKeys = [\n\tCUSTOM_PROVIDERS.OLLAMA,\n\tCUSTOM_PROVIDERS.BEDROCK,\n\tCUSTOM_PROVIDERS.GEMINI_CLI,\n\tCUSTOM_PROVIDERS.MCP\n];\n\nexport {\n\t// Core config access\n\tgetConfig,\n\twriteConfig,\n\tConfigurationError,\n\tisConfigFilePresent,\n\t// Claude Code settings\n\tgetClaudeCodeSettings,\n\tgetClaudeCodeSettingsForCommand,\n\t// Validation\n\tvalidateProvider,\n\tvalidateProviderModelCombination,\n\tvalidateClaudeCodeSettings,\n\tVALIDATED_PROVIDERS,\n\tCUSTOM_PROVIDERS,\n\tALL_PROVIDERS,\n\tMODEL_MAP,\n\tgetAvailableModels,\n\t// Role-specific getters (No env var overrides)\n\tgetMainProvider,\n\tgetMainModelId,\n\tgetMainMaxTokens,\n\tgetMainTemperature,\n\tgetResearchProvider,\n\tgetResearchModelId,\n\tgetResearchMaxTokens,\n\tgetResearchTemperature,\n\tgetFallbackProvider,\n\tgetFallbackModelId,\n\tgetFallbackMaxTokens,\n\tgetFallbackTemperature,\n\tgetBaseUrlForRole,\n\t// Global setting getters (No env var overrides)\n\tgetLogLevel,\n\tgetDebugFlag,\n\tgetDefaultNumTasks,\n\tgetDefaultSubtasks,\n\tgetDefaultPriority,\n\tgetProjectName,\n\tgetOllamaBaseURL,\n\tgetAzureBaseURL,\n\tgetBedrockBaseURL,\n\tgetResponseLanguage,\n\tgetParametersForRole,\n\tgetUserId,\n\t// API Key Checkers (still relevant)\n\tisApiKeySet,\n\tgetMcpApiKeyStatus,\n\t// ADD: Function to get all provider names\n\tgetAllProviders,\n\tgetVertexProjectId,\n\tgetVertexLocation\n};\n"], ["/claude-task-master/mcp-server/src/tools/set-task-status.js", "/**\n * tools/setTaskStatus.js\n * Tool to set the status of a task\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport {\n\tsetTaskStatusDirect,\n\tnextTaskDirect\n} from '../core/task-master-core.js';\nimport {\n\tfindTasksPath,\n\tfindComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the setTaskStatus tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerSetTaskStatusTool(server) {\n\tserver.addTool({\n\t\tname: 'set_task_status',\n\t\tdescription: 'Set the status of one or more tasks or subtasks.',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once.\"\n\t\t\t\t),\n\t\t\tstatus: z\n\t\t\t\t.enum(TASK_STATUS_OPTIONS)\n\t\t\t\t.describe(\n\t\t\t\t\t\"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'.\"\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Optional tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Setting status of task(s) ${args.id} to: ${args.status} ${\n\t\t\t\t\t\targs.tag ? `in tag: ${args.tag}` : 'in current tag'\n\t\t\t\t\t}`\n\t\t\t\t);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = findComplexityReportPath(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\t\tcomplexityReport: args.complexityReport,\n\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t}\n\n\t\t\t\tconst result = await setTaskStatusDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tstatus: args.status,\n\t\t\t\t\t\tcomplexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(\n\t\t\t\t\t\t`Successfully updated status for task(s) ${args.id} to \"${args.status}\": ${result.data.message}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to update task status: ${result.error?.message || 'Unknown error'}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error setting task status',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in setTaskStatus tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Error setting task status: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-task.js", "/**\n * add-task.js\n * Direct function implementation for adding a new task\n */\n\nimport { addTask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for adding a new task with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} [args.prompt] - Description of the task to add (required if not using manual fields)\n * @param {string} [args.title] - Task title (for manual task creation)\n * @param {string} [args.description] - Task description (for manual task creation)\n * @param {string} [args.details] - Implementation details (for manual task creation)\n * @param {string} [args.testStrategy] - Test strategy (for manual task creation)\n * @param {string} [args.dependencies] - Comma-separated list of task IDs this task depends on\n * @param {string} [args.priority='medium'] - Task priority (high, medium, low)\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {boolean} [args.research=false] - Whether to use research capabilities for task creation\n * @param {string} [args.projectRoot] - Project root path\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function addTaskDirect(args, log, context = {}) {\n\t// Destructure expected args (including research and projectRoot)\n\tconst {\n\t\ttasksJsonPath,\n\t\tprompt,\n\t\tdependencies,\n\t\tpriority,\n\t\tresearch,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\tconst { session } = context; // Destructure session from context\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addTaskDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Check if this is manual task creation or AI-driven task creation\n\t\tconst isManualCreation = args.title && args.description;\n\n\t\t// Check required parameters\n\t\tif (!args.prompt && !isManualCreation) {\n\t\t\tlog.error(\n\t\t\t\t'Missing required parameters: either prompt or title+description must be provided'\n\t\t\t);\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Either the prompt parameter or both title and description parameters are required for adding a task'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Extract and prepare parameters\n\t\tconst taskDependencies = Array.isArray(dependencies)\n\t\t\t? dependencies // Already an array if passed directly\n\t\t\t: dependencies // Check if dependencies exist and are a string\n\t\t\t\t? String(dependencies)\n\t\t\t\t\t\t.split(',')\n\t\t\t\t\t\t.map((id) => parseInt(id.trim(), 10)) // Split, trim, and parse\n\t\t\t\t: []; // Default to empty array if null/undefined\n\t\tconst taskPriority = priority || 'medium'; // Default priority\n\n\t\tlet manualTaskData = null;\n\t\tlet newTaskId;\n\t\tlet telemetryData;\n\t\tlet tagInfo;\n\n\t\tif (isManualCreation) {\n\t\t\t// Create manual task data object\n\t\t\tmanualTaskData = {\n\t\t\t\ttitle: args.title,\n\t\t\t\tdescription: args.description,\n\t\t\t\tdetails: args.details || '',\n\t\t\t\ttestStrategy: args.testStrategy || ''\n\t\t\t};\n\n\t\t\tlog.info(\n\t\t\t\t`Adding new task manually with title: \"${args.title}\", dependencies: [${taskDependencies.join(', ')}], priority: ${priority}`\n\t\t\t);\n\n\t\t\t// Call the addTask function with manual task data\n\t\t\tconst result = await addTask(\n\t\t\t\ttasksPath,\n\t\t\t\tnull, // prompt is null for manual creation\n\t\t\t\ttaskDependencies,\n\t\t\t\ttaskPriority,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcommandName: 'add-task',\n\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\ttag\n\t\t\t\t},\n\t\t\t\t'json', // outputFormat\n\t\t\t\tmanualTaskData, // Pass the manual task data\n\t\t\t\tfalse // research flag is false for manual creation\n\t\t\t);\n\t\t\tnewTaskId = result.newTaskId;\n\t\t\ttelemetryData = result.telemetryData;\n\t\t\ttagInfo = result.tagInfo;\n\t\t} else {\n\t\t\t// AI-driven task creation\n\t\t\tlog.info(\n\t\t\t\t`Adding new task with prompt: \"${prompt}\", dependencies: [${taskDependencies.join(', ')}], priority: ${taskPriority}, research: ${research}`\n\t\t\t);\n\n\t\t\t// Call the addTask function, passing the research flag\n\t\t\tconst result = await addTask(\n\t\t\t\ttasksPath,\n\t\t\t\tprompt, // Use the prompt for AI creation\n\t\t\t\ttaskDependencies,\n\t\t\t\ttaskPriority,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcommandName: 'add-task',\n\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\ttag\n\t\t\t\t},\n\t\t\t\t'json', // outputFormat\n\t\t\t\tnull, // manualTaskData is null for AI creation\n\t\t\t\tresearch // Pass the research flag\n\t\t\t);\n\t\t\tnewTaskId = result.newTaskId;\n\t\t\ttelemetryData = result.telemetryData;\n\t\t\ttagInfo = result.tagInfo;\n\t\t}\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttaskId: newTaskId,\n\t\t\t\tmessage: `Successfully added new task #${newTaskId}`,\n\t\t\t\ttelemetryData: telemetryData,\n\t\t\t\ttagInfo: tagInfo\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addTaskDirect: ${error.message}`);\n\t\t// Add specific error code checks if needed\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'ADD_TASK_ERROR', // Use error code if available\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/update-subtask-by-id.js", "/**\n * update-subtask-by-id.js\n * Direct function implementation for appending information to a specific subtask\n */\n\nimport { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for updateSubtaskById with error handling.\n *\n * @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot.\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Subtask ID in format \"parent.sub\".\n * @param {string} args.prompt - Information to append to the subtask.\n * @param {boolean} [args.research] - Whether to use research role.\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function updateSubtaskByIdDirect(args, log, context = {}) {\n\tconst { session } = context;\n\t// Destructure expected args, including projectRoot\n\tconst { tasksJsonPath, id, prompt, research, projectRoot, tag } = args;\n\n\tconst logWrapper = createLogWrapper(log);\n\n\ttry {\n\t\tlogWrapper.info(\n\t\t\t`Updating subtask by ID via direct function. ID: ${id}, ProjectRoot: ${projectRoot}`\n\t\t);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Basic validation for ID format (e.g., '5.2')\n\t\tif (!id || typeof id !== 'string' || !id.includes('.')) {\n\t\t\tconst errorMessage =\n\t\t\t\t'Invalid subtask ID format. Must be in format \"parentId.subtaskId\" (e.g., \"5.2\").';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_SUBTASK_ID', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tif (!prompt) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No prompt specified. Please provide the information to append.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_PROMPT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Validate subtask ID format\n\t\tconst subtaskId = id;\n\t\tif (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') {\n\t\t\tconst errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`;\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tconst subtaskIdStr = String(subtaskId);\n\t\tif (!subtaskIdStr.includes('.')) {\n\t\t\tconst errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format \"parentId.subtaskId\" (e.g., \"5.2\").`;\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided path\n\t\tconst tasksPath = tasksJsonPath;\n\t\tconst useResearch = research === true;\n\n\t\tlog.info(\n\t\t\t`Updating subtask with ID ${subtaskIdStr} with prompt \"${prompt}\" and research: ${useResearch}`\n\t\t);\n\n\t\tconst wasSilent = isSilentMode();\n\t\tif (!wasSilent) {\n\t\t\tenableSilentMode();\n\t\t}\n\n\t\ttry {\n\t\t\t// Execute core updateSubtaskById function\n\t\t\tconst coreResult = await updateSubtaskById(\n\t\t\t\ttasksPath,\n\t\t\t\tsubtaskIdStr,\n\t\t\t\tprompt,\n\t\t\t\tuseResearch,\n\t\t\t\t{\n\t\t\t\t\tmcpLog: logWrapper,\n\t\t\t\t\tsession,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\ttag,\n\t\t\t\t\tcommandName: 'update-subtask',\n\t\t\t\t\toutputType: 'mcp'\n\t\t\t\t},\n\t\t\t\t'json'\n\t\t\t);\n\n\t\t\tif (!coreResult || coreResult.updatedSubtask === null) {\n\t\t\t\tconst message = `Subtask ${id} or its parent task not found.`;\n\t\t\t\tlogWrapper.error(message);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: { code: 'SUBTASK_NOT_FOUND', message: message }\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Subtask updated successfully\n\t\t\tconst successMessage = `Successfully updated subtask with ID ${subtaskIdStr}`;\n\t\t\tlogWrapper.success(successMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Successfully updated subtask with ID ${subtaskIdStr}`,\n\t\t\t\t\tsubtaskId: subtaskIdStr,\n\t\t\t\t\tparentId: subtaskIdStr.split('.')[0],\n\t\t\t\t\tsubtask: coreResult.updatedSubtask,\n\t\t\t\t\ttasksPath,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tlogWrapper.error(`Error updating subtask by ID: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'UPDATE_SUBTASK_CORE_ERROR',\n\t\t\t\t\tmessage: error.message || 'Unknown error updating subtask'\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(\n\t\t\t`Setup error in updateSubtaskByIdDirect: ${error.message}`\n\t\t);\n\t\tif (isSilentMode()) disableSilentMode();\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_SETUP_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown setup error'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/remove-task.js", "/**\n * remove-task.js\n * Direct function implementation for removing a task\n */\n\nimport {\n\tremoveTask,\n\ttaskExists\n} from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\treadJSON\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for removeTask with error handling.\n * Supports removing multiple tasks at once with comma-separated IDs.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - The ID(s) of the task(s) or subtask(s) to remove (comma-separated for multiple).\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function removeTaskDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, projectRoot, tag } = args;\n\tconst { session } = context;\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('removeTaskDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate task ID parameter\n\t\tif (!id) {\n\t\t\tlog.error('Task ID is required');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Task ID is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Split task IDs if comma-separated\n\t\tconst taskIdArray = id.split(',').map((taskId) => taskId.trim());\n\n\t\tlog.info(\n\t\t\t`Removing ${taskIdArray.length} task(s) with ID(s): ${taskIdArray.join(', ')} from ${tasksJsonPath}${tag ? ` in tag '${tag}'` : ''}`\n\t\t);\n\n\t\t// Validate all task IDs exist before proceeding\n\t\tconst data = readJSON(tasksJsonPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASKS_FILE',\n\t\t\t\t\tmessage: `No valid tasks found in ${tasksJsonPath}${tag ? ` for tag '${tag}'` : ''}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst invalidTasks = taskIdArray.filter(\n\t\t\t(taskId) => !taskExists(data.tasks, taskId)\n\t\t);\n\n\t\tif (invalidTasks.length > 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASK_ID',\n\t\t\t\t\tmessage: `The following tasks were not found${tag ? ` in tag '${tag}'` : ''}: ${invalidTasks.join(', ')}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\ttry {\n\t\t\t// Call removeTask with proper context including tag\n\t\t\tconst result = await removeTask(tasksJsonPath, id, {\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t});\n\n\t\t\tif (!result.success) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'REMOVE_TASK_ERROR',\n\t\t\t\t\t\tmessage: result.error || 'Failed to remove tasks'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(`Successfully removed ${result.removedTasks.length} task(s)`);\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\ttotalTasks: taskIdArray.length,\n\t\t\t\t\tsuccessful: result.removedTasks.length,\n\t\t\t\t\tfailed: taskIdArray.length - result.removedTasks.length,\n\t\t\t\t\tremovedTasks: result.removedTasks,\n\t\t\t\t\tmessage: result.message,\n\t\t\t\t\ttasksPath: tasksJsonPath,\n\t\t\t\t\ttag\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled even if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\t// Catch any unexpected errors\n\t\tlog.error(`Unexpected error in removeTaskDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/analyze-task-complexity.js", "/**\n * Direct function wrapper for analyzeTaskComplexity\n */\n\nimport analyzeTaskComplexity from '../../../../scripts/modules/task-manager/analyze-task-complexity.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\nimport { createLogWrapper } from '../../tools/utils.js'; // Import the new utility\n\n/**\n * Analyze task complexity and generate recommendations\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.outputPath - Explicit absolute path to save the report.\n * @param {string|number} [args.threshold] - Minimum complexity score to recommend expansion (1-10)\n * @param {boolean} [args.research] - Use Perplexity AI for research-backed complexity analysis\n * @param {string} [args.ids] - Comma-separated list of task IDs to analyze\n * @param {number} [args.from] - Starting task ID in a range to analyze\n * @param {number} [args.to] - Ending task ID in a range to analyze\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @param {Object} [context={}] - Context object containing session data\n * @param {Object} [context.session] - MCP session object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function analyzeTaskComplexityDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst {\n\t\ttasksJsonPath,\n\t\toutputPath,\n\t\tthreshold,\n\t\tresearch,\n\t\tprojectRoot,\n\t\tids,\n\t\tfrom,\n\t\tto,\n\t\ttag\n\t} = args;\n\n\tconst logWrapper = createLogWrapper(log);\n\n\t// --- Initial Checks (remain the same) ---\n\ttry {\n\t\tlog.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);\n\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('analyzeTaskComplexityDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\tif (!outputPath) {\n\t\t\tlog.error('analyzeTaskComplexityDirect called without outputPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: 'outputPath is required' }\n\t\t\t};\n\t\t}\n\n\t\tconst tasksPath = tasksJsonPath;\n\t\tconst resolvedOutputPath = outputPath;\n\n\t\tlog.info(`Analyzing task complexity from: ${tasksPath}`);\n\t\tlog.info(`Output report will be saved to: ${resolvedOutputPath}`);\n\n\t\tif (ids) {\n\t\t\tlog.info(`Analyzing specific task IDs: ${ids}`);\n\t\t} else if (from || to) {\n\t\t\tconst fromStr = from !== undefined ? from : 'first';\n\t\t\tconst toStr = to !== undefined ? to : 'last';\n\t\t\tlog.info(`Analyzing tasks in range: ${fromStr} to ${toStr}`);\n\t\t}\n\n\t\tif (research) {\n\t\t\tlog.info('Using research role for complexity analysis');\n\t\t}\n\n\t\t// Prepare options for the core function - REMOVED mcpLog and session here\n\t\tconst coreOptions = {\n\t\t\tfile: tasksJsonPath,\n\t\t\toutput: outputPath,\n\t\t\tthreshold: threshold,\n\t\t\tresearch: research === true, // Ensure boolean\n\t\t\tprojectRoot: projectRoot, // Pass projectRoot here\n\t\t\tid: ids, // Pass the ids parameter to the core function as 'id'\n\t\t\tfrom: from, // Pass from parameter\n\t\t\tto: to, // Pass to parameter\n\t\t\ttag // forward tag\n\t\t};\n\t\t// --- End Initial Checks ---\n\n\t\t// --- Silent Mode and Logger Wrapper ---\n\t\tconst wasSilent = isSilentMode();\n\t\tif (!wasSilent) {\n\t\t\tenableSilentMode(); // Still enable silent mode as a backup\n\t\t}\n\n\t\tlet report;\n\t\tlet coreResult;\n\n\t\ttry {\n\t\t\t// --- Call Core Function (Pass context separately) ---\n\t\t\t// Pass coreOptions as the first argument\n\t\t\t// Pass context object { session, mcpLog } as the second argument\n\t\t\tcoreResult = await analyzeTaskComplexity(coreOptions, {\n\t\t\t\tsession,\n\t\t\t\tmcpLog: logWrapper,\n\t\t\t\tcommandName: 'analyze-complexity',\n\t\t\t\toutputType: 'mcp',\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t});\n\t\t\treport = coreResult.report;\n\t\t} catch (error) {\n\t\t\tlog.error(\n\t\t\t\t`Error in analyzeTaskComplexity core function: ${error.message}`\n\t\t\t);\n\t\t\t// Restore logging if we changed it\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'ANALYZE_CORE_ERROR',\n\t\t\t\t\tmessage: `Error running core complexity analysis: ${error.message}`\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\t// Always restore normal logging in finally block if we enabled silent mode\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t}\n\n\t\t// --- Result Handling (remains largely the same) ---\n\t\t// Verify the report file was created (core function writes it)\n\t\tif (!fs.existsSync(resolvedOutputPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'ANALYZE_REPORT_MISSING', // Specific code\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Analysis completed but no report file was created at the expected path.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (\n\t\t\t!coreResult ||\n\t\t\t!coreResult.report ||\n\t\t\ttypeof coreResult.report !== 'object'\n\t\t) {\n\t\t\tlog.error(\n\t\t\t\t'Core analysis function returned an invalid or undefined response.'\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_CORE_RESPONSE',\n\t\t\t\t\tmessage: 'Core analysis function returned an invalid response.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\ttry {\n\t\t\t// Ensure complexityAnalysis exists and is an array\n\t\t\tconst analysisArray = Array.isArray(coreResult.report.complexityAnalysis)\n\t\t\t\t? coreResult.report.complexityAnalysis\n\t\t\t\t: [];\n\n\t\t\t// Count tasks by complexity (remains the same)\n\t\t\tconst highComplexityTasks = analysisArray.filter(\n\t\t\t\t(t) => t.complexityScore >= 8\n\t\t\t).length;\n\t\t\tconst mediumComplexityTasks = analysisArray.filter(\n\t\t\t\t(t) => t.complexityScore >= 5 && t.complexityScore < 8\n\t\t\t).length;\n\t\t\tconst lowComplexityTasks = analysisArray.filter(\n\t\t\t\t(t) => t.complexityScore < 5\n\t\t\t).length;\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Task complexity analysis complete. Report saved to ${outputPath}`,\n\t\t\t\t\treportPath: outputPath,\n\t\t\t\t\treportSummary: {\n\t\t\t\t\t\ttaskCount: analysisArray.length,\n\t\t\t\t\t\thighComplexityTasks,\n\t\t\t\t\t\tmediumComplexityTasks,\n\t\t\t\t\t\tlowComplexityTasks\n\t\t\t\t\t},\n\t\t\t\t\tfullReport: coreResult.report,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (parseError) {\n\t\t\t// Should not happen if core function returns object, but good safety check\n\t\t\tlog.error(`Internal error processing report data: ${parseError.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'REPORT_PROCESS_ERROR',\n\t\t\t\t\tmessage: `Internal error processing complexity report: ${parseError.message}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\t// --- End Result Handling ---\n\t} catch (error) {\n\t\t// Catch errors from initial checks or path resolution\n\t\t// Make sure to restore normal logging if silent mode was enabled\n\t\tif (isSilentMode()) {\n\t\t\tdisableSilentMode();\n\t\t}\n\t\tlog.error(`Error in analyzeTaskComplexityDirect setup: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_SETUP_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/find-next-task.js", "import { log } from '../utils.js';\nimport { addComplexityToTask } from '../utils.js';\n\n/**\n * Return the next work item:\n * • Prefer an eligible SUBTASK that belongs to any parent task\n * whose own status is `in-progress`.\n * • If no such subtask exists, fall back to the best top-level task\n * (previous behaviour).\n *\n * The function still exports the same name (`findNextTask`) so callers\n * don't need to change. It now always returns an object with\n * ─ id → number (task) or \"parentId.subId\" (subtask)\n * ─ title → string\n * ─ status → string\n * ─ priority → string (\"high\" | \"medium\" | \"low\")\n * ─ dependencies → array (all IDs expressed in the same dotted form)\n * ─ parentId → number (present only when it's a subtask)\n *\n * @param {Object[]} tasks – full array of top-level tasks, each may contain .subtasks[]\n * @param {Object} [complexityReport=null] - Optional complexity report object\n * @returns {Object|null} – next work item or null if nothing is eligible\n */\nfunction findNextTask(tasks, complexityReport = null) {\n\t// ---------- helpers ----------------------------------------------------\n\tconst priorityValues = { high: 3, medium: 2, low: 1 };\n\n\tconst toFullSubId = (parentId, maybeDotId) => {\n\t\t// \"12.3\" -> \"12.3\"\n\t\t// 4 -> \"12.4\" (numeric / short form)\n\t\tif (typeof maybeDotId === 'string' && maybeDotId.includes('.')) {\n\t\t\treturn maybeDotId;\n\t\t}\n\t\treturn `${parentId}.${maybeDotId}`;\n\t};\n\n\t// ---------- build completed-ID set (tasks *and* subtasks) --------------\n\tconst completedIds = new Set();\n\ttasks.forEach((t) => {\n\t\tif (t.status === 'done' || t.status === 'completed') {\n\t\t\tcompletedIds.add(String(t.id));\n\t\t}\n\t\tif (Array.isArray(t.subtasks)) {\n\t\t\tt.subtasks.forEach((st) => {\n\t\t\t\tif (st.status === 'done' || st.status === 'completed') {\n\t\t\t\t\tcompletedIds.add(`${t.id}.${st.id}`);\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\t// ---------- 1) look for eligible subtasks ------------------------------\n\tconst candidateSubtasks = [];\n\n\ttasks\n\t\t.filter((t) => t.status === 'in-progress' && Array.isArray(t.subtasks))\n\t\t.forEach((parent) => {\n\t\t\tparent.subtasks.forEach((st) => {\n\t\t\t\tconst stStatus = (st.status || 'pending').toLowerCase();\n\t\t\t\tif (stStatus !== 'pending' && stStatus !== 'in-progress') return;\n\n\t\t\t\tconst fullDeps =\n\t\t\t\t\tst.dependencies?.map((d) => toFullSubId(parent.id, d)) ?? [];\n\n\t\t\t\tconst depsSatisfied =\n\t\t\t\t\tfullDeps.length === 0 ||\n\t\t\t\t\tfullDeps.every((depId) => completedIds.has(String(depId)));\n\n\t\t\t\tif (depsSatisfied) {\n\t\t\t\t\tcandidateSubtasks.push({\n\t\t\t\t\t\tid: `${parent.id}.${st.id}`,\n\t\t\t\t\t\ttitle: st.title || `Subtask ${st.id}`,\n\t\t\t\t\t\tstatus: st.status || 'pending',\n\t\t\t\t\t\tpriority: st.priority || parent.priority || 'medium',\n\t\t\t\t\t\tdependencies: fullDeps,\n\t\t\t\t\t\tparentId: parent.id\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\n\tif (candidateSubtasks.length > 0) {\n\t\t// sort by priority → dep-count → parent-id → sub-id\n\t\tcandidateSubtasks.sort((a, b) => {\n\t\t\tconst pa = priorityValues[a.priority] ?? 2;\n\t\t\tconst pb = priorityValues[b.priority] ?? 2;\n\t\t\tif (pb !== pa) return pb - pa;\n\n\t\t\tif (a.dependencies.length !== b.dependencies.length)\n\t\t\t\treturn a.dependencies.length - b.dependencies.length;\n\n\t\t\t// compare parent then sub-id numerically\n\t\t\tconst [aPar, aSub] = a.id.split('.').map(Number);\n\t\t\tconst [bPar, bSub] = b.id.split('.').map(Number);\n\t\t\tif (aPar !== bPar) return aPar - bPar;\n\t\t\treturn aSub - bSub;\n\t\t});\n\t\tconst nextTask = candidateSubtasks[0];\n\n\t\t// Add complexity to the task before returning\n\t\tif (nextTask && complexityReport) {\n\t\t\taddComplexityToTask(nextTask, complexityReport);\n\t\t}\n\n\t\treturn nextTask;\n\t}\n\n\t// ---------- 2) fall back to top-level tasks (original logic) ------------\n\tconst eligibleTasks = tasks.filter((task) => {\n\t\tconst status = (task.status || 'pending').toLowerCase();\n\t\tif (status !== 'pending' && status !== 'in-progress') return false;\n\t\tconst deps = task.dependencies ?? [];\n\t\treturn deps.every((depId) => completedIds.has(String(depId)));\n\t});\n\n\tif (eligibleTasks.length === 0) return null;\n\n\tconst nextTask = eligibleTasks.sort((a, b) => {\n\t\tconst pa = priorityValues[a.priority || 'medium'] ?? 2;\n\t\tconst pb = priorityValues[b.priority || 'medium'] ?? 2;\n\t\tif (pb !== pa) return pb - pa;\n\n\t\tconst da = (a.dependencies ?? []).length;\n\t\tconst db = (b.dependencies ?? []).length;\n\t\tif (da !== db) return da - db;\n\n\t\treturn a.id - b.id;\n\t})[0];\n\n\t// Add complexity to the task before returning\n\tif (nextTask && complexityReport) {\n\t\taddComplexityToTask(nextTask, complexityReport);\n\t}\n\n\treturn nextTask;\n}\n\nexport default findNextTask;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-subtask.js", "/**\n * Direct function wrapper for addSubtask\n */\n\nimport { addSubtask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Add a subtask to an existing task\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Parent task ID\n * @param {string} [args.taskId] - Existing task ID to convert to subtask (optional)\n * @param {string} [args.title] - Title for new subtask (when creating a new subtask)\n * @param {string} [args.description] - Description for new subtask\n * @param {string} [args.details] - Implementation details for new subtask\n * @param {string} [args.status] - Status for new subtask (default: 'pending')\n * @param {string} [args.dependencies] - Comma-separated list of dependency IDs\n * @param {boolean} [args.skipGenerate] - Skip regenerating task files\n * @param {string} [args.projectRoot] - Project root directory\n * @param {string} [args.tag] - Tag for the task\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: string}>}\n */\nexport async function addSubtaskDirect(args, log) {\n\t// Destructure expected args\n\tconst {\n\t\ttasksJsonPath,\n\t\tid,\n\t\ttaskId,\n\t\ttitle,\n\t\tdescription,\n\t\tdetails,\n\t\tstatus,\n\t\tdependencies: dependenciesStr,\n\t\tskipGenerate,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\ttry {\n\t\tlog.info(`Adding subtask with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addSubtaskDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!id) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Parent task ID is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Either taskId or title must be provided\n\t\tif (!taskId && !title) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Either taskId or title must be provided'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Parse dependencies if provided\n\t\tlet dependencies = [];\n\t\tif (dependenciesStr) {\n\t\t\tdependencies = dependenciesStr.split(',').map((depId) => {\n\t\t\t\t// Handle both regular IDs and dot notation\n\t\t\t\treturn depId.includes('.') ? depId.trim() : parseInt(depId.trim(), 10);\n\t\t\t});\n\t\t}\n\n\t\t// Convert existingTaskId to a number if provided\n\t\tconst existingTaskId = taskId ? parseInt(taskId, 10) : null;\n\n\t\t// Convert parent ID to a number\n\t\tconst parentId = parseInt(id, 10);\n\n\t\t// Determine if we should generate files\n\t\tconst generateFiles = !skipGenerate;\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tconst context = { projectRoot, tag };\n\n\t\t// Case 1: Convert existing task to subtask\n\t\tif (existingTaskId) {\n\t\t\tlog.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);\n\t\t\tconst result = await addSubtask(\n\t\t\t\ttasksPath,\n\t\t\t\tparentId,\n\t\t\t\texistingTaskId,\n\t\t\t\tnull,\n\t\t\t\tgenerateFiles,\n\t\t\t\tcontext\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,\n\t\t\t\t\tsubtask: result\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\t// Case 2: Create new subtask\n\t\telse {\n\t\t\tlog.info(`Creating new subtask for parent task ${parentId}`);\n\n\t\t\tconst newSubtaskData = {\n\t\t\t\ttitle: title,\n\t\t\t\tdescription: description || '',\n\t\t\t\tdetails: details || '',\n\t\t\t\tstatus: status || 'pending',\n\t\t\t\tdependencies: dependencies\n\t\t\t};\n\n\t\t\tconst result = await addSubtask(\n\t\t\t\ttasksPath,\n\t\t\t\tparentId,\n\t\t\t\tnull,\n\t\t\t\tnewSubtaskData,\n\t\t\t\tgenerateFiles,\n\t\t\t\tcontext\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `New subtask ${parentId}.${result.id} successfully created`,\n\t\t\t\t\tsubtask: result\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addSubtaskDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/next-task.js", "/**\n * next-task.js\n * Direct function implementation for finding the next task to work on\n */\n\nimport { findNextTask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\treadJSON,\n\treadComplexityReport\n} from '../../../../scripts/modules/utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for finding the next task to work on with error handling and caching.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.reportPath - Path to the report file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function nextTaskDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, reportPath, projectRoot, tag } = args;\n\tconst { session } = context;\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('nextTaskDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Define the action function to be executed on cache miss\n\tconst coreNextTaskAction = async () => {\n\t\ttry {\n\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\tenableSilentMode();\n\n\t\t\tlog.info(`Finding next task from ${tasksJsonPath}`);\n\n\t\t\t// Read tasks data using the provided path\n\t\t\tconst data = readJSON(tasksJsonPath, projectRoot, tag);\n\t\t\tif (!data || !data.tasks) {\n\t\t\t\tdisableSilentMode(); // Disable before return\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'INVALID_TASKS_FILE',\n\t\t\t\t\t\tmessage: `No valid tasks found in ${tasksJsonPath}`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Read the complexity report\n\t\t\tconst complexityReport = readComplexityReport(reportPath);\n\n\t\t\t// Find the next task\n\t\t\tconst nextTask = findNextTask(data.tasks, complexityReport);\n\n\t\t\tif (!nextTask) {\n\t\t\t\tlog.info(\n\t\t\t\t\t'No eligible next task found. All tasks are either completed or have unsatisfied dependencies'\n\t\t\t\t);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: true,\n\t\t\t\t\tdata: {\n\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\t'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',\n\t\t\t\t\t\tnextTask: null\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Check if it's a subtask\n\t\t\tconst isSubtask =\n\t\t\t\ttypeof nextTask.id === 'string' && nextTask.id.includes('.');\n\n\t\t\tconst taskOrSubtask = isSubtask ? 'subtask' : 'task';\n\n\t\t\tconst additionalAdvice = isSubtask\n\t\t\t\t? 'Subtasks can be updated with timestamped details as you implement them. This is useful for tracking progress, marking milestones and insights (of successful or successive falures in attempting to implement the subtask). Research can be used when updating the subtask to collect up-to-date information, and can be helpful to solve a repeating problem the agent is unable to solve. It is a good idea to get-task the parent task to collect the overall context of the task, and to get-task the subtask to collect the specific details of the subtask.'\n\t\t\t\t: 'Tasks can be updated to reflect a change in the direction of the task, or to reformulate the task per your prompt. Research can be used when updating the task to collect up-to-date information. It is best to update subtasks as you work on them, and to update the task for more high-level changes that may affect pending subtasks or the general direction of the task.';\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\t// Return the next task data with the full tasks array for reference\n\t\t\tlog.info(\n\t\t\t\t`Successfully found next task ${nextTask.id}: ${nextTask.title}. Is subtask: ${isSubtask}`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tnextTask,\n\t\t\t\t\tisSubtask,\n\t\t\t\t\tnextSteps: `When ready to work on the ${taskOrSubtask}, use set-status to set the status to \"in progress\" ${additionalAdvice}`\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Error finding next task: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage: error.message || 'Failed to find next task'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t};\n\n\t// Use the caching utility\n\ttry {\n\t\tconst result = await coreNextTaskAction();\n\t\tlog.info('nextTaskDirect completed.');\n\t\treturn result;\n\t} catch (error) {\n\t\tlog.error(`Unexpected error during nextTask: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/update-tasks.js", "/**\n * update-tasks.js\n * Direct function implementation for updating tasks based on new context\n */\n\nimport path from 'path';\nimport { updateTasks } from '../../../../scripts/modules/task-manager.js';\nimport { createLogWrapper } from '../../tools/utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for updating tasks based on new context.\n *\n * @param {Object} args - Command arguments containing projectRoot, from, prompt, research options.\n * @param {string} args.from - The ID of the task to update.\n * @param {string} args.prompt - The prompt to update the task with.\n * @param {boolean} args.research - Whether to use research mode.\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function updateTasksDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst { from, prompt, research, tasksJsonPath, projectRoot, tag } = args;\n\n\t// Create the standard logger wrapper\n\tconst logWrapper = createLogWrapper(log);\n\n\t// --- Input Validation ---\n\tif (!projectRoot) {\n\t\tlogWrapper.error('updateTasksDirect requires a projectRoot argument.');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'projectRoot is required.'\n\t\t\t}\n\t\t};\n\t}\n\n\tif (!from) {\n\t\tlogWrapper.error('updateTasksDirect called without from ID');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'Starting task ID (from) is required'\n\t\t\t}\n\t\t};\n\t}\n\n\tif (!prompt) {\n\t\tlogWrapper.error('updateTasksDirect called without prompt');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'Update prompt is required'\n\t\t\t}\n\t\t};\n\t}\n\n\tlogWrapper.info(\n\t\t`Updating tasks via direct function. From: ${from}, Research: ${research}, File: ${tasksJsonPath}, ProjectRoot: ${projectRoot}`\n\t);\n\n\tenableSilentMode(); // Enable silent mode\n\ttry {\n\t\t// Call the core updateTasks function\n\t\tconst result = await updateTasks(\n\t\t\ttasksJsonPath,\n\t\t\tfrom,\n\t\t\tprompt,\n\t\t\tresearch,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog: logWrapper,\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t},\n\t\t\t'json'\n\t\t);\n\n\t\tif (result && result.success && Array.isArray(result.updatedTasks)) {\n\t\t\tlogWrapper.success(\n\t\t\t\t`Successfully updated ${result.updatedTasks.length} tasks.`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Successfully updated ${result.updatedTasks.length} tasks.`,\n\t\t\t\t\ttasksPath: tasksJsonPath,\n\t\t\t\t\tupdatedCount: result.updatedTasks.length,\n\t\t\t\t\ttelemetryData: result.telemetryData,\n\t\t\t\t\ttagInfo: result.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Handle case where core function didn't return expected success structure\n\t\t\tlogWrapper.error(\n\t\t\t\t'Core updateTasks function did not return a successful structure.'\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\tresult?.message ||\n\t\t\t\t\t\t'Core function failed to update tasks or returned unexpected result.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(`Error executing core updateTasks: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UPDATE_TASKS_CORE_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error updating tasks'\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode(); // Ensure silent mode is disabled\n\t}\n}\n"], ["/claude-task-master/bin/task-master.js", "#!/usr/bin/env node\n\n/**\n * Task Master\n * Copyright (c) 2025 Eyal Toledano, Ralph Khreish\n *\n * This software is licensed under the MIT License with Commons Clause.\n * You may use this software for any purpose, including commercial applications,\n * and modify and redistribute it freely, subject to the following restrictions:\n *\n * 1. You may not sell this software or offer it as a service.\n * 2. The origin of this software must not be misrepresented.\n * 3. Altered source versions must be plainly marked as such.\n *\n * For the full license text, see the LICENSE file in the root directory.\n */\n\n/**\n * Claude Task Master CLI\n * Main entry point for globally installed package\n */\n\nimport { fileURLToPath } from 'url';\nimport { dirname, resolve } from 'path';\nimport { createRequire } from 'module';\nimport { spawn } from 'child_process';\nimport { Command } from 'commander';\nimport { displayHelp, displayBanner } from '../scripts/modules/ui.js';\nimport { registerCommands } from '../scripts/modules/commands.js';\nimport { detectCamelCaseFlags } from '../scripts/modules/utils.js';\nimport chalk from 'chalk';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\nconst require = createRequire(import.meta.url);\n\n// Get package information\nconst packageJson = require('../package.json');\nconst version = packageJson.version;\n\n// Get paths to script files\nconst devScriptPath = resolve(__dirname, '../scripts/dev.js');\nconst initScriptPath = resolve(__dirname, '../scripts/init.js');\n\n// Helper function to run dev.js with arguments\nfunction runDevScript(args) {\n\t// Debug: Show the transformed arguments when DEBUG=1 is set\n\tif (process.env.DEBUG === '1') {\n\t\tconsole.error('\\nDEBUG - CLI Wrapper Analysis:');\n\t\tconsole.error('- Original command: ' + process.argv.join(' '));\n\t\tconsole.error('- Transformed args: ' + args.join(' '));\n\t\tconsole.error(\n\t\t\t'- dev.js will receive: node ' +\n\t\t\t\tdevScriptPath +\n\t\t\t\t' ' +\n\t\t\t\targs.join(' ') +\n\t\t\t\t'\\n'\n\t\t);\n\t}\n\n\t// For testing: If TEST_MODE is set, just print args and exit\n\tif (process.env.TEST_MODE === '1') {\n\t\tconsole.log('Would execute:');\n\t\tconsole.log(`node ${devScriptPath} ${args.join(' ')}`);\n\t\tprocess.exit(0);\n\t\treturn;\n\t}\n\n\tconst child = spawn('node', [devScriptPath, ...args], {\n\t\tstdio: 'inherit',\n\t\tcwd: process.cwd()\n\t});\n\n\tchild.on('close', (code) => {\n\t\tprocess.exit(code);\n\t});\n}\n\n// Helper function to detect camelCase and convert to kebab-case\nconst toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();\n\n/**\n * Create a wrapper action that passes the command to dev.js\n * @param {string} commandName - The name of the command\n * @returns {Function} Wrapper action function\n */\nfunction createDevScriptAction(commandName) {\n\treturn (options, cmd) => {\n\t\t// Check for camelCase flags and error out with helpful message\n\t\tconst camelCaseFlags = detectCamelCaseFlags(process.argv);\n\n\t\t// If camelCase flags were found, show error and exit\n\t\tif (camelCaseFlags.length > 0) {\n\t\t\tconsole.error('\\nError: Please use kebab-case for CLI flags:');\n\t\t\tcamelCaseFlags.forEach((flag) => {\n\t\t\t\tconsole.error(` Instead of: --${flag.original}`);\n\t\t\t\tconsole.error(` Use: --${flag.kebabCase}`);\n\t\t\t});\n\t\t\tconsole.error(\n\t\t\t\t'\\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\\n'\n\t\t\t);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\t// Since we've ensured no camelCase flags, we can now just:\n\t\t// 1. Start with the command name\n\t\tconst args = [commandName];\n\n\t\t// 3. Get positional arguments and explicit flags from the command line\n\t\tconst commandArgs = [];\n\t\tconst positionals = new Set(); // Track positional args we've seen\n\n\t\t// Find the command in raw process.argv to extract args\n\t\tconst commandIndex = process.argv.indexOf(commandName);\n\t\tif (commandIndex !== -1) {\n\t\t\t// Process all args after the command name\n\t\t\tfor (let i = commandIndex + 1; i < process.argv.length; i++) {\n\t\t\t\tconst arg = process.argv[i];\n\n\t\t\t\tif (arg.startsWith('--')) {\n\t\t\t\t\t// It's a flag - pass through as is\n\t\t\t\t\tcommandArgs.push(arg);\n\t\t\t\t\t// Skip the next arg if this is a flag with a value (not --flag=value format)\n\t\t\t\t\tif (\n\t\t\t\t\t\t!arg.includes('=') &&\n\t\t\t\t\t\ti + 1 < process.argv.length &&\n\t\t\t\t\t\t!process.argv[i + 1].startsWith('--')\n\t\t\t\t\t) {\n\t\t\t\t\t\tcommandArgs.push(process.argv[++i]);\n\t\t\t\t\t}\n\t\t\t\t} else if (!positionals.has(arg)) {\n\t\t\t\t\t// It's a positional argument we haven't seen\n\t\t\t\t\tcommandArgs.push(arg);\n\t\t\t\t\tpositionals.add(arg);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Add all command line args we collected\n\t\targs.push(...commandArgs);\n\n\t\t// 4. Add default options from Commander if not specified on command line\n\t\t// Track which options we've seen on the command line\n\t\tconst userOptions = new Set();\n\t\tfor (const arg of commandArgs) {\n\t\t\tif (arg.startsWith('--')) {\n\t\t\t\t// Extract option name (without -- and value)\n\t\t\t\tconst name = arg.split('=')[0].slice(2);\n\t\t\t\tuserOptions.add(name);\n\n\t\t\t\t// Add the kebab-case version too, to prevent duplicates\n\t\t\t\tconst kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();\n\t\t\t\tuserOptions.add(kebabName);\n\n\t\t\t\t// Add the camelCase version as well\n\t\t\t\tconst camelName = kebabName.replace(/-([a-z])/g, (_, letter) =>\n\t\t\t\t\tletter.toUpperCase()\n\t\t\t\t);\n\t\t\t\tuserOptions.add(camelName);\n\t\t\t}\n\t\t}\n\n\t\t// Add Commander-provided defaults for options not specified by user\n\t\tObject.entries(options).forEach(([key, value]) => {\n\t\t\t// Debug output to see what keys we're getting\n\t\t\tif (process.env.DEBUG === '1') {\n\t\t\t\tconsole.error(`DEBUG - Processing option: ${key} = ${value}`);\n\t\t\t}\n\n\t\t\t// Special case for numTasks > num-tasks (a known problem case)\n\t\t\tif (key === 'numTasks') {\n\t\t\t\tif (process.env.DEBUG === '1') {\n\t\t\t\t\tconsole.error('DEBUG - Converting numTasks to num-tasks');\n\t\t\t\t}\n\t\t\t\tif (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {\n\t\t\t\t\targs.push(`--num-tasks=${value}`);\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Skip built-in Commander properties and options the user provided\n\t\t\tif (\n\t\t\t\t['parent', 'commands', 'options', 'rawArgs'].includes(key) ||\n\t\t\t\tuserOptions.has(key)\n\t\t\t) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Also check the kebab-case version of this key\n\t\t\tconst kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();\n\t\t\tif (userOptions.has(kebabKey)) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Add default values, using kebab-case for the parameter name\n\t\t\tif (value !== undefined) {\n\t\t\t\tif (typeof value === 'boolean') {\n\t\t\t\t\tif (value === true) {\n\t\t\t\t\t\targs.push(`--${kebabKey}`);\n\t\t\t\t\t} else if (value === false && key === 'generate') {\n\t\t\t\t\t\targs.push('--skip-generate');\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Always use kebab-case for option names\n\t\t\t\t\targs.push(`--${kebabKey}=${value}`);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\t// Special handling for parent parameter (uses -p)\n\t\tif (options.parent && !args.includes('-p') && !userOptions.has('parent')) {\n\t\t\targs.push('-p', options.parent);\n\t\t}\n\n\t\t// Debug output for troubleshooting\n\t\tif (process.env.DEBUG === '1') {\n\t\t\tconsole.error('DEBUG - Command args:', commandArgs);\n\t\t\tconsole.error('DEBUG - User options:', Array.from(userOptions));\n\t\t\tconsole.error('DEBUG - Commander options:', options);\n\t\t\tconsole.error('DEBUG - Final args:', args);\n\t\t}\n\n\t\t// Run the script with our processed args\n\t\trunDevScript(args);\n\t};\n}\n\n// // Special case for the 'init' command which uses a different script\n// function registerInitCommand(program) {\n// \tprogram\n// \t\t.command('init')\n// \t\t.description('Initialize a new project')\n// \t\t.option('-y, --yes', 'Skip prompts and use default values')\n// \t\t.option('-n, --name <name>', 'Project name')\n// \t\t.option('-d, --description <description>', 'Project description')\n// \t\t.option('-v, --version <version>', 'Project version')\n// \t\t.option('-a, --author <author>', 'Author name')\n// \t\t.option('--skip-install', 'Skip installing dependencies')\n// \t\t.option('--dry-run', 'Show what would be done without making changes')\n// \t\t.action((options) => {\n// \t\t\t// Pass through any options to the init script\n// \t\t\tconst args = [\n// \t\t\t\t'--yes',\n// \t\t\t\t'name',\n// \t\t\t\t'description',\n// \t\t\t\t'version',\n// \t\t\t\t'author',\n// \t\t\t\t'skip-install',\n// \t\t\t\t'dry-run'\n// \t\t\t]\n// \t\t\t\t.filter((opt) => options[opt])\n// \t\t\t\t.map((opt) => {\n// \t\t\t\t\tif (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {\n// \t\t\t\t\t\treturn `--${opt}`;\n// \t\t\t\t\t}\n// \t\t\t\t\treturn `--${opt}=${options[opt]}`;\n// \t\t\t\t});\n\n// \t\t\tconst child = spawn('node', [initScriptPath, ...args], {\n// \t\t\t\tstdio: 'inherit',\n// \t\t\t\tcwd: process.cwd()\n// \t\t\t});\n\n// \t\t\tchild.on('close', (code) => {\n// \t\t\t\tprocess.exit(code);\n// \t\t\t});\n// \t\t});\n// }\n\n// Set up the command-line interface\nconst program = new Command();\n\nprogram\n\t.name('task-master')\n\t.description('Claude Task Master CLI')\n\t.version(version)\n\t.addHelpText('afterAll', () => {\n\t\t// Use the same help display function as dev.js for consistency\n\t\tdisplayHelp();\n\t\treturn ''; // Return empty string to prevent commander's default help\n\t});\n\n// Add custom help option to directly call our help display\nprogram.helpOption('-h, --help', 'Display help information');\nprogram.on('--help', () => {\n\tdisplayHelp();\n});\n\n// // Add special case commands\n// registerInitCommand(program);\n\nprogram\n\t.command('dev')\n\t.description('Run the dev.js script')\n\t.action(() => {\n\t\tconst args = process.argv.slice(process.argv.indexOf('dev') + 1);\n\t\trunDevScript(args);\n\t});\n\n// Use a temporary Command instance to get all command definitions\nconst tempProgram = new Command();\nregisterCommands(tempProgram);\n\n// For each command in the temp instance, add a modified version to our actual program\ntempProgram.commands.forEach((cmd) => {\n\tif (['dev'].includes(cmd.name())) {\n\t\t// Skip commands we've already defined specially\n\t\treturn;\n\t}\n\n\t// Create a new command with the same name and description\n\tconst newCmd = program.command(cmd.name()).description(cmd.description());\n\n\t// Copy all options\n\tcmd.options.forEach((opt) => {\n\t\tnewCmd.option(opt.flags, opt.description, opt.defaultValue);\n\t});\n\n\t// Set the action to proxy to dev.js\n\tnewCmd.action(createDevScriptAction(cmd.name()));\n});\n\n// Parse the command line arguments\nprogram.parse(process.argv);\n\n// Add global error handling for unknown commands and options\nprocess.on('uncaughtException', (err) => {\n\t// Check if this is a commander.js unknown option error\n\tif (err.code === 'commander.unknownOption') {\n\t\tconst option = err.message.match(/'([^']+)'/)?.[1];\n\t\tconst commandArg = process.argv.find(\n\t\t\t(arg) =>\n\t\t\t\t!arg.startsWith('-') &&\n\t\t\t\targ !== 'task-master' &&\n\t\t\t\t!arg.includes('/') &&\n\t\t\t\targ !== 'node'\n\t\t);\n\t\tconst command = commandArg || 'unknown';\n\n\t\tconsole.error(chalk.red(`Error: Unknown option '${option}'`));\n\t\tconsole.error(\n\t\t\tchalk.yellow(\n\t\t\t\t`Run 'task-master ${command} --help' to see available options for this command`\n\t\t\t)\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\t// Check if this is a commander.js unknown command error\n\tif (err.code === 'commander.unknownCommand') {\n\t\tconst command = err.message.match(/'([^']+)'/)?.[1];\n\n\t\tconsole.error(chalk.red(`Error: Unknown command '${command}'`));\n\t\tconsole.error(\n\t\t\tchalk.yellow(`Run 'task-master --help' to see available commands`)\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\t// Handle other uncaught exceptions\n\tconsole.error(chalk.red(`Error: ${err.message}`));\n\tif (process.env.DEBUG === '1') {\n\t\tconsole.error(err);\n\t}\n\tprocess.exit(1);\n});\n\n// Show help if no command was provided (just 'task-master' with no args)\nif (process.argv.length <= 2) {\n\tdisplayBanner();\n\tdisplayHelp();\n\tprocess.exit(0);\n}\n\n// Add exports at the end of the file\nexport { detectCamelCaseFlags };\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/parse-prd.js", "/**\n * parse-prd.js\n * Direct function implementation for parsing PRD documents\n */\n\nimport path from 'path';\nimport fs from 'fs';\nimport { parsePRD } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\nimport { getDefaultNumTasks } from '../../../../scripts/modules/config-manager.js';\nimport { resolvePrdPath, resolveProjectPath } from '../utils/path-utils.js';\nimport { TASKMASTER_TASKS_FILE } from '../../../../src/constants/paths.js';\n\n/**\n * Direct function wrapper for parsing PRD documents and generating tasks.\n *\n * @param {Object} args - Command arguments containing projectRoot, input, output, numTasks options.\n * @param {string} args.input - Path to the input PRD file.\n * @param {string} args.output - Path to the output directory.\n * @param {string} args.numTasks - Number of tasks to generate.\n * @param {boolean} args.force - Whether to force parsing.\n * @param {boolean} args.append - Whether to append to the output file.\n * @param {boolean} args.research - Whether to use research mode.\n * @param {string} args.tag - Tag context for organizing tasks into separate task lists.\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function parsePRDDirect(args, log, context = {}) {\n\tconst { session } = context;\n\t// Extract projectRoot from args\n\tconst {\n\t\tinput: inputArg,\n\t\toutput: outputArg,\n\t\tnumTasks: numTasksArg,\n\t\tforce,\n\t\tappend,\n\t\tresearch,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\n\t// Create the standard logger wrapper\n\tconst logWrapper = createLogWrapper(log);\n\n\t// --- Input Validation and Path Resolution ---\n\tif (!projectRoot) {\n\t\tlogWrapper.error('parsePRDDirect requires a projectRoot argument.');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'projectRoot is required.'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Resolve input path using path utilities\n\tlet inputPath;\n\tif (inputArg) {\n\t\ttry {\n\t\t\tinputPath = resolvePrdPath({ input: inputArg, projectRoot }, session);\n\t\t} catch (error) {\n\t\t\tlogWrapper.error(`Error resolving PRD path: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'FILE_NOT_FOUND', message: error.message }\n\t\t\t};\n\t\t}\n\t} else {\n\t\tlogWrapper.error('parsePRDDirect called without input path');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: { code: 'MISSING_ARGUMENT', message: 'Input path is required' }\n\t\t};\n\t}\n\n\t// Resolve output path - use new path utilities for default\n\tconst outputPath = outputArg\n\t\t? path.isAbsolute(outputArg)\n\t\t\t? outputArg\n\t\t\t: path.resolve(projectRoot, outputArg)\n\t\t: resolveProjectPath(TASKMASTER_TASKS_FILE, args) ||\n\t\t\tpath.resolve(projectRoot, TASKMASTER_TASKS_FILE);\n\n\t// Check if input file exists\n\tif (!fs.existsSync(inputPath)) {\n\t\tconst errorMsg = `Input PRD file not found at resolved path: ${inputPath}`;\n\t\tlogWrapper.error(errorMsg);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: { code: 'FILE_NOT_FOUND', message: errorMsg }\n\t\t};\n\t}\n\n\tconst outputDir = path.dirname(outputPath);\n\ttry {\n\t\tif (!fs.existsSync(outputDir)) {\n\t\t\tlogWrapper.info(`Creating output directory: ${outputDir}`);\n\t\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t\t}\n\t} catch (error) {\n\t\tconst errorMsg = `Failed to create output directory ${outputDir}: ${error.message}`;\n\t\tlogWrapper.error(errorMsg);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: { code: 'DIRECTORY_CREATE_FAILED', message: errorMsg }\n\t\t};\n\t}\n\n\tlet numTasks = getDefaultNumTasks(projectRoot);\n\tif (numTasksArg) {\n\t\tnumTasks =\n\t\t\ttypeof numTasksArg === 'string' ? parseInt(numTasksArg, 10) : numTasksArg;\n\t\tif (Number.isNaN(numTasks) || numTasks < 0) {\n\t\t\t// Ensure positive number\n\t\t\tnumTasks = getDefaultNumTasks(projectRoot); // Fallback to default if parsing fails or invalid\n\t\t\tlogWrapper.warn(\n\t\t\t\t`Invalid numTasks value: ${numTasksArg}. Using default: ${numTasks}`\n\t\t\t);\n\t\t}\n\t}\n\n\tif (append) {\n\t\tlogWrapper.info('Append mode enabled.');\n\t\tif (force) {\n\t\t\tlogWrapper.warn(\n\t\t\t\t'Both --force and --append flags were provided. --force takes precedence; append mode will be ignored.'\n\t\t\t);\n\t\t}\n\t}\n\n\tif (research) {\n\t\tlogWrapper.info(\n\t\t\t'Research mode enabled. Using Perplexity AI for enhanced PRD analysis.'\n\t\t);\n\t}\n\n\tlogWrapper.info(\n\t\t`Parsing PRD via direct function. Input: ${inputPath}, Output: ${outputPath}, NumTasks: ${numTasks}, Force: ${force}, Append: ${append}, Research: ${research}, ProjectRoot: ${projectRoot}`\n\t);\n\n\tconst wasSilent = isSilentMode();\n\tif (!wasSilent) {\n\t\tenableSilentMode();\n\t}\n\n\ttry {\n\t\t// Call the core parsePRD function\n\t\tconst result = await parsePRD(\n\t\t\tinputPath,\n\t\t\toutputPath,\n\t\t\tnumTasks,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog: logWrapper,\n\t\t\t\tprojectRoot,\n\t\t\t\ttag,\n\t\t\t\tforce,\n\t\t\t\tappend,\n\t\t\t\tresearch,\n\t\t\t\tcommandName: 'parse-prd',\n\t\t\t\toutputType: 'mcp'\n\t\t\t},\n\t\t\t'json'\n\t\t);\n\n\t\t// Adjust check for the new return structure\n\t\tif (result && result.success) {\n\t\t\tconst successMsg = `Successfully parsed PRD and generated tasks in ${result.tasksPath}`;\n\t\t\tlogWrapper.success(successMsg);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: successMsg,\n\t\t\t\t\toutputPath: result.tasksPath,\n\t\t\t\t\ttelemetryData: result.telemetryData,\n\t\t\t\t\ttagInfo: result.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Handle case where core function didn't return expected success structure\n\t\t\tlogWrapper.error(\n\t\t\t\t'Core parsePRD function did not return a successful structure.'\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\tresult?.message ||\n\t\t\t\t\t\t'Core function failed to parse PRD or returned unexpected result.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(`Error executing core parsePRD: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'PARSE_PRD_CORE_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error parsing PRD'\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tif (!wasSilent && isSilentMode()) {\n\t\t\tdisableSilentMode();\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/task-master.js", "/**\n * task-master.js\n * This module provides a centralized path management system for the Task Master application.\n * It exports the TaskMaster class and the initTaskMaster factory function to create a single,\n * authoritative source for all critical file and directory paths, resolving circular dependencies.\n */\n\nimport path from 'path';\nimport fs from 'fs';\nimport {\n\tTASKMASTER_DIR,\n\tTASKMASTER_TASKS_FILE,\n\tLEGACY_TASKS_FILE,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_REPORTS_DIR,\n\tTASKMASTER_CONFIG_FILE,\n\tLEGACY_CONFIG_FILE,\n\tCOMPLEXITY_REPORT_FILE\n} from './constants/paths.js';\nimport { findProjectRoot } from './utils/path-utils.js';\n\n/**\n * TaskMaster class manages all the paths for the application.\n * An instance of this class is created by the initTaskMaster function.\n */\nexport class TaskMaster {\n\t#paths;\n\t#tag;\n\n\t/**\n\t * The constructor is intended to be used only by the initTaskMaster factory function.\n\t * @param {object} paths - A pre-resolved object of all application paths.\n\t * @param {string|undefined} tag - The current tag.\n\t */\n\tconstructor(paths, tag) {\n\t\tthis.#paths = Object.freeze({ ...paths });\n\t\tthis.#tag = tag;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the project root.\n\t */\n\tgetProjectRoot() {\n\t\treturn this.#paths.projectRoot;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the .taskmaster directory.\n\t */\n\tgetTaskMasterDir() {\n\t\treturn this.#paths.taskMasterDir;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the tasks.json file.\n\t */\n\tgetTasksPath() {\n\t\treturn this.#paths.tasksPath;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the PRD file.\n\t */\n\tgetPrdPath() {\n\t\treturn this.#paths.prdPath;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the complexity report.\n\t */\n\tgetComplexityReportPath() {\n\t\tif (this.#paths.complexityReportPath) {\n\t\t\treturn this.#paths.complexityReportPath;\n\t\t}\n\n\t\tconst complexityReportFile =\n\t\t\tthis.getCurrentTag() !== 'master'\n\t\t\t\t? COMPLEXITY_REPORT_FILE.replace(\n\t\t\t\t\t\t'.json',\n\t\t\t\t\t\t`_${this.getCurrentTag()}.json`\n\t\t\t\t\t)\n\t\t\t\t: COMPLEXITY_REPORT_FILE;\n\n\t\treturn path.join(this.#paths.projectRoot, complexityReportFile);\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the config.json file.\n\t */\n\tgetConfigPath() {\n\t\treturn this.#paths.configPath;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the state.json file.\n\t */\n\tgetStatePath() {\n\t\treturn this.#paths.statePath;\n\t}\n\n\t/**\n\t * @returns {object} A frozen object containing all resolved paths.\n\t */\n\tgetAllPaths() {\n\t\treturn this.#paths;\n\t}\n\n\t/**\n\t * Gets the current tag from state.json or falls back to defaultTag from config\n\t * @returns {string} The current tag name\n\t */\n\tgetCurrentTag() {\n\t\tif (this.#tag) {\n\t\t\treturn this.#tag;\n\t\t}\n\n\t\ttry {\n\t\t\t// Try to read current tag from state.json using fs directly\n\t\t\tif (fs.existsSync(this.#paths.statePath)) {\n\t\t\t\tconst rawState = fs.readFileSync(this.#paths.statePath, 'utf8');\n\t\t\t\tconst stateData = JSON.parse(rawState);\n\t\t\t\tif (stateData && stateData.currentTag) {\n\t\t\t\t\treturn stateData.currentTag;\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Ignore errors, fall back to default\n\t\t}\n\n\t\t// Fall back to defaultTag from config using fs directly\n\t\ttry {\n\t\t\tif (fs.existsSync(this.#paths.configPath)) {\n\t\t\t\tconst rawConfig = fs.readFileSync(this.#paths.configPath, 'utf8');\n\t\t\t\tconst configData = JSON.parse(rawConfig);\n\t\t\t\tif (configData && configData.global && configData.global.defaultTag) {\n\t\t\t\t\treturn configData.global.defaultTag;\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Ignore errors, use hardcoded default\n\t\t}\n\n\t\t// Final fallback\n\t\treturn 'master';\n\t}\n}\n\n/**\n * Initializes a TaskMaster instance with resolved paths.\n * This function centralizes path resolution logic.\n *\n * @param {object} [overrides={}] - An object with possible path overrides.\n * @param {string} [overrides.projectRoot]\n * @param {string} [overrides.tasksPath]\n * @param {string} [overrides.prdPath]\n * @param {string} [overrides.complexityReportPath]\n * @param {string} [overrides.configPath]\n * @param {string} [overrides.statePath]\n * @param {string} [overrides.tag]\n * @returns {TaskMaster} An initialized TaskMaster instance.\n */\nexport function initTaskMaster(overrides = {}) {\n\tconst resolvePath = (\n\t\tpathType,\n\t\toverride,\n\t\tdefaultPaths = [],\n\t\tbasePath = null,\n\t\tcreateParentDirs = false\n\t) => {\n\t\tif (typeof override === 'string') {\n\t\t\tconst resolvedPath = path.isAbsolute(override)\n\t\t\t\t? override\n\t\t\t\t: path.resolve(basePath || process.cwd(), override);\n\n\t\t\tif (createParentDirs) {\n\t\t\t\t// For output paths, create parent directory if it doesn't exist\n\t\t\t\tconst parentDir = path.dirname(resolvedPath);\n\t\t\t\tif (!fs.existsSync(parentDir)) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tfs.mkdirSync(parentDir, { recursive: true });\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Could not create directory for ${pathType}: ${parentDir}. Error: ${error.message}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Original validation logic\n\t\t\t\tif (!fs.existsSync(resolvedPath)) {\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t`${pathType} override path does not exist: ${resolvedPath}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resolvedPath;\n\t\t}\n\n\t\tif (override === true) {\n\t\t\t// Required path - search defaults and fail if not found\n\t\t\tfor (const defaultPath of defaultPaths) {\n\t\t\t\tconst fullPath = path.isAbsolute(defaultPath)\n\t\t\t\t\t? defaultPath\n\t\t\t\t\t: path.join(basePath || process.cwd(), defaultPath);\n\t\t\t\tif (fs.existsSync(fullPath)) {\n\t\t\t\t\treturn fullPath;\n\t\t\t\t}\n\t\t\t}\n\t\t\tthrow new Error(\n\t\t\t\t`Required ${pathType} not found. Searched: ${defaultPaths.join(', ')}`\n\t\t\t);\n\t\t}\n\n\t\t// Optional path (override === false/undefined) - search defaults, return null if not found\n\t\tfor (const defaultPath of defaultPaths) {\n\t\t\tconst fullPath = path.isAbsolute(defaultPath)\n\t\t\t\t? defaultPath\n\t\t\t\t: path.join(basePath || process.cwd(), defaultPath);\n\t\t\tif (fs.existsSync(fullPath)) {\n\t\t\t\treturn fullPath;\n\t\t\t}\n\t\t}\n\n\t\treturn null;\n\t};\n\n\tconst paths = {};\n\n\t// Project Root\n\tif (overrides.projectRoot) {\n\t\tconst resolvedOverride = path.resolve(overrides.projectRoot);\n\t\tif (!fs.existsSync(resolvedOverride)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Project root override path does not exist: ${resolvedOverride}`\n\t\t\t);\n\t\t}\n\n\t\tconst hasTaskmasterDir = fs.existsSync(\n\t\t\tpath.join(resolvedOverride, TASKMASTER_DIR)\n\t\t);\n\t\tconst hasLegacyConfig = fs.existsSync(\n\t\t\tpath.join(resolvedOverride, LEGACY_CONFIG_FILE)\n\t\t);\n\n\t\tif (!hasTaskmasterDir && !hasLegacyConfig) {\n\t\t\tthrow new Error(\n\t\t\t\t`Project root override is not a valid taskmaster project: ${resolvedOverride}`\n\t\t\t);\n\t\t}\n\n\t\tpaths.projectRoot = resolvedOverride;\n\t} else {\n\t\t// findProjectRoot now always returns a value (fallback to cwd)\n\t\tpaths.projectRoot = findProjectRoot();\n\t}\n\n\t// TaskMaster Directory\n\tif ('taskMasterDir' in overrides) {\n\t\tpaths.taskMasterDir = resolvePath(\n\t\t\t'taskmaster directory',\n\t\t\toverrides.taskMasterDir,\n\t\t\t[TASKMASTER_DIR],\n\t\t\tpaths.projectRoot\n\t\t);\n\t} else {\n\t\tpaths.taskMasterDir = resolvePath(\n\t\t\t'taskmaster directory',\n\t\t\tfalse,\n\t\t\t[TASKMASTER_DIR],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\t// Always set default paths first\n\t// These can be overridden below if needed\n\tpaths.configPath = path.join(paths.projectRoot, TASKMASTER_CONFIG_FILE);\n\tpaths.statePath = path.join(\n\t\tpaths.taskMasterDir || path.join(paths.projectRoot, TASKMASTER_DIR),\n\t\t'state.json'\n\t);\n\tpaths.tasksPath = path.join(paths.projectRoot, TASKMASTER_TASKS_FILE);\n\n\t// Handle overrides - only validate/resolve if explicitly provided\n\tif ('configPath' in overrides) {\n\t\tpaths.configPath = resolvePath(\n\t\t\t'config file',\n\t\t\toverrides.configPath,\n\t\t\t[TASKMASTER_CONFIG_FILE, LEGACY_CONFIG_FILE],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\tif ('statePath' in overrides) {\n\t\tpaths.statePath = resolvePath(\n\t\t\t'state file',\n\t\t\toverrides.statePath,\n\t\t\t['state.json'],\n\t\t\tpaths.taskMasterDir\n\t\t);\n\t}\n\n\tif ('tasksPath' in overrides) {\n\t\tpaths.tasksPath = resolvePath(\n\t\t\t'tasks file',\n\t\t\toverrides.tasksPath,\n\t\t\t[TASKMASTER_TASKS_FILE, LEGACY_TASKS_FILE],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\tif ('prdPath' in overrides) {\n\t\tpaths.prdPath = resolvePath(\n\t\t\t'PRD file',\n\t\t\toverrides.prdPath,\n\t\t\t[\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'PRD.md'),\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'prd.md'),\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'PRD.txt'),\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'prd.txt'),\n\t\t\t\tpath.join('scripts', 'PRD.md'),\n\t\t\t\tpath.join('scripts', 'prd.md'),\n\t\t\t\tpath.join('scripts', 'PRD.txt'),\n\t\t\t\tpath.join('scripts', 'prd.txt'),\n\t\t\t\t'PRD.md',\n\t\t\t\t'prd.md',\n\t\t\t\t'PRD.txt',\n\t\t\t\t'prd.txt'\n\t\t\t],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\tif ('complexityReportPath' in overrides) {\n\t\tpaths.complexityReportPath = resolvePath(\n\t\t\t'complexity report',\n\t\t\toverrides.complexityReportPath,\n\t\t\t[\n\t\t\t\tpath.join(TASKMASTER_REPORTS_DIR, 'task-complexity-report.json'),\n\t\t\t\tpath.join(TASKMASTER_REPORTS_DIR, 'complexity-report.json'),\n\t\t\t\tpath.join('scripts', 'task-complexity-report.json'),\n\t\t\t\tpath.join('scripts', 'complexity-report.json'),\n\t\t\t\t'task-complexity-report.json',\n\t\t\t\t'complexity-report.json'\n\t\t\t],\n\t\t\tpaths.projectRoot,\n\t\t\ttrue // Enable parent directory creation for output paths\n\t\t);\n\t}\n\n\treturn new TaskMaster(paths, overrides.tag);\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/expand-all-tasks.js", "/**\n * Direct function wrapper for expandAllTasks\n */\n\nimport { expandAllTasks } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Expand all pending tasks with subtasks (Direct Function Wrapper)\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {number|string} [args.num] - Number of subtasks to generate\n * @param {boolean} [args.research] - Enable research-backed subtask generation\n * @param {string} [args.prompt] - Additional context to guide subtask generation\n * @param {boolean} [args.force] - Force regeneration of subtasks for tasks that already have them\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object from FastMCP\n * @param {Object} context - Context object containing session\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function expandAllTasksDirect(args, log, context = {}) {\n\tconst { session } = context; // Extract session\n\t// Destructure expected args, including projectRoot\n\tconst { tasksJsonPath, num, research, prompt, force, projectRoot, tag } =\n\t\targs;\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('expandAllTasksDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\tenableSilentMode(); // Enable silent mode for the core function call\n\ttry {\n\t\tlog.info(\n\t\t\t`Calling core expandAllTasks with args: ${JSON.stringify({ num, research, prompt, force, projectRoot, tag })}`\n\t\t);\n\n\t\t// Parse parameters (ensure correct types)\n\t\tconst numSubtasks = num ? parseInt(num, 10) : undefined;\n\t\tconst useResearch = research === true;\n\t\tconst additionalContext = prompt || '';\n\t\tconst forceFlag = force === true;\n\n\t\t// Call the core function, passing options and the context object { session, mcpLog, projectRoot }\n\t\tconst result = await expandAllTasks(\n\t\t\ttasksJsonPath,\n\t\t\tnumSubtasks,\n\t\t\tuseResearch,\n\t\t\tadditionalContext,\n\t\t\tforceFlag,\n\t\t\t{ session, mcpLog, projectRoot, tag },\n\t\t\t'json'\n\t\t);\n\n\t\t// Core function now returns a summary object including the *aggregated* telemetryData\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,\n\t\t\t\tdetails: {\n\t\t\t\t\texpandedCount: result.expandedCount,\n\t\t\t\t\tfailedCount: result.failedCount,\n\t\t\t\t\tskippedCount: result.skippedCount,\n\t\t\t\t\ttasksToExpand: result.tasksToExpand\n\t\t\t\t},\n\t\t\t\ttelemetryData: result.telemetryData // Pass the aggregated object\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Log the error using the MCP logger\n\t\tlog.error(`Error during core expandAllTasks execution: ${error.message}`);\n\t\t// Optionally log stack trace if available and debug enabled\n\t\t// if (error.stack && log.debug) { log.debug(error.stack); }\n\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR', // Or a more specific code if possible\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode(); // IMPORTANT: Ensure silent mode is always disabled\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/migrate.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport { fileURLToPath } from 'url';\nimport { createLogWrapper } from '../../../mcp-server/src/tools/utils.js';\nimport { findProjectRoot } from '../utils.js';\nimport {\n\tLEGACY_CONFIG_FILE,\n\tTASKMASTER_CONFIG_FILE\n} from '../../../src/constants/paths.js';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\n// Create a simple log wrapper for CLI use\nconst log = createLogWrapper({\n\tinfo: (msg) => console.log(chalk.blue('ℹ'), msg),\n\twarn: (msg) => console.log(chalk.yellow('⚠'), msg),\n\terror: (msg) => console.error(chalk.red('✗'), msg),\n\tsuccess: (msg) => console.log(chalk.green('✓'), msg)\n});\n\n/**\n * Main migration function\n * @param {Object} options - Migration options\n */\nexport async function migrateProject(options = {}) {\n\tconst projectRoot = findProjectRoot() || process.cwd();\n\n\tlog.info(`Starting migration in: ${projectRoot}`);\n\n\t// Check if .taskmaster directory already exists\n\tconst taskmasterDir = path.join(projectRoot, '.taskmaster');\n\tif (fs.existsSync(taskmasterDir) && !options.force) {\n\t\tlog.warn(\n\t\t\t'.taskmaster directory already exists. Use --force to overwrite or skip migration.'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Analyze what needs to be migrated\n\tconst migrationPlan = analyzeMigrationNeeds(projectRoot);\n\n\tif (migrationPlan.length === 0) {\n\t\tlog.info(\n\t\t\t'No files to migrate. Project may already be using the new structure.'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Show migration plan\n\tlog.info('Migration plan:');\n\tfor (const item of migrationPlan) {\n\t\tconst action = options.dryRun ? 'Would move' : 'Will move';\n\t\tlog.info(` ${action}: ${item.from} → ${item.to}`);\n\t}\n\n\tif (options.dryRun) {\n\t\tlog.info(\n\t\t\t'Dry run complete. Use --dry-run=false to perform actual migration.'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Confirm migration\n\tif (!options.yes) {\n\t\tconst readline = await import('readline');\n\t\tconst rl = readline.createInterface({\n\t\t\tinput: process.stdin,\n\t\t\toutput: process.stdout\n\t\t});\n\n\t\tconst answer = await new Promise((resolve) => {\n\t\t\trl.question('Proceed with migration? (y/N): ', resolve);\n\t\t});\n\t\trl.close();\n\n\t\tif (answer.toLowerCase() !== 'y' && answer.toLowerCase() !== 'yes') {\n\t\t\tlog.info('Migration cancelled.');\n\t\t\treturn;\n\t\t}\n\t}\n\n\t// Perform migration\n\ttry {\n\t\tawait performMigration(projectRoot, migrationPlan, options);\n\t\tlog.success('Migration completed successfully!');\n\t\tlog.info('You can now use the new .taskmaster directory structure.');\n\t\tif (!options.cleanup) {\n\t\t\tlog.info(\n\t\t\t\t'Old files were preserved. Use --cleanup to remove them after verification.'\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tlog.error(`Migration failed: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Analyze what files need to be migrated\n * @param {string} projectRoot - Project root directory\n * @returns {Array} Migration plan items\n */\nfunction analyzeMigrationNeeds(projectRoot) {\n\tconst migrationPlan = [];\n\n\t// Check for tasks directory\n\tconst tasksDir = path.join(projectRoot, 'tasks');\n\tif (fs.existsSync(tasksDir)) {\n\t\tconst tasksFiles = fs.readdirSync(tasksDir);\n\t\tfor (const file of tasksFiles) {\n\t\t\tmigrationPlan.push({\n\t\t\t\tfrom: path.join('tasks', file),\n\t\t\t\tto: path.join('.taskmaster', 'tasks', file),\n\t\t\t\ttype: 'task'\n\t\t\t});\n\t\t}\n\t}\n\n\t// Check for scripts directory files\n\tconst scriptsDir = path.join(projectRoot, 'scripts');\n\tif (fs.existsSync(scriptsDir)) {\n\t\tconst scriptsFiles = fs.readdirSync(scriptsDir);\n\t\tfor (const file of scriptsFiles) {\n\t\t\tconst filePath = path.join(scriptsDir, file);\n\t\t\tif (fs.statSync(filePath).isFile()) {\n\t\t\t\t// Categorize files more intelligently\n\t\t\t\tlet destination;\n\t\t\t\tconst lowerFile = file.toLowerCase();\n\n\t\t\t\tif (\n\t\t\t\t\tlowerFile.includes('example') ||\n\t\t\t\t\tlowerFile.includes('template') ||\n\t\t\t\t\tlowerFile.includes('boilerplate') ||\n\t\t\t\t\tlowerFile.includes('sample')\n\t\t\t\t) {\n\t\t\t\t\t// Template/example files go to templates (including example_prd.txt)\n\t\t\t\t\tdestination = path.join('.taskmaster', 'templates', file);\n\t\t\t\t} else if (\n\t\t\t\t\tlowerFile.includes('complexity') &&\n\t\t\t\t\tlowerFile.includes('report') &&\n\t\t\t\t\tlowerFile.endsWith('.json')\n\t\t\t\t) {\n\t\t\t\t\t// Only actual complexity reports go to reports\n\t\t\t\t\tdestination = path.join('.taskmaster', 'reports', file);\n\t\t\t\t} else if (\n\t\t\t\t\tlowerFile.includes('prd') ||\n\t\t\t\t\tlowerFile.endsWith('.md') ||\n\t\t\t\t\tlowerFile.endsWith('.txt')\n\t\t\t\t) {\n\t\t\t\t\t// Documentation files go to docs (but not examples or reports)\n\t\t\t\t\tdestination = path.join('.taskmaster', 'docs', file);\n\t\t\t\t} else {\n\t\t\t\t\t// Other files stay in scripts or get skipped - don't force everything into templates\n\t\t\t\t\tlog.warn(\n\t\t\t\t\t\t`Skipping migration of '${file}' - uncertain categorization. You may need to move this manually.`\n\t\t\t\t\t);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tmigrationPlan.push({\n\t\t\t\t\tfrom: path.join('scripts', file),\n\t\t\t\t\tto: destination,\n\t\t\t\t\ttype: 'script'\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check for .taskmasterconfig\n\tconst oldConfig = path.join(projectRoot, LEGACY_CONFIG_FILE);\n\tif (fs.existsSync(oldConfig)) {\n\t\tmigrationPlan.push({\n\t\t\tfrom: LEGACY_CONFIG_FILE,\n\t\t\tto: TASKMASTER_CONFIG_FILE,\n\t\t\ttype: 'config'\n\t\t});\n\t}\n\n\treturn migrationPlan;\n}\n\n/**\n * Perform the actual migration\n * @param {string} projectRoot - Project root directory\n * @param {Array} migrationPlan - List of files to migrate\n * @param {Object} options - Migration options\n */\nasync function performMigration(projectRoot, migrationPlan, options) {\n\t// Create .taskmaster directory\n\tconst taskmasterDir = path.join(projectRoot, '.taskmaster');\n\tif (!fs.existsSync(taskmasterDir)) {\n\t\tfs.mkdirSync(taskmasterDir, { recursive: true });\n\t}\n\n\t// Group migration items by destination directory to create only needed subdirs\n\tconst neededDirs = new Set();\n\tfor (const item of migrationPlan) {\n\t\tconst destDir = path.dirname(item.to);\n\t\tneededDirs.add(destDir);\n\t}\n\n\t// Create only the directories we actually need\n\tfor (const dir of neededDirs) {\n\t\tconst fullDirPath = path.join(projectRoot, dir);\n\t\tif (!fs.existsSync(fullDirPath)) {\n\t\t\tfs.mkdirSync(fullDirPath, { recursive: true });\n\t\t\tlog.info(`Created directory: ${dir}`);\n\t\t}\n\t}\n\n\t// Create backup if requested\n\tif (options.backup) {\n\t\tconst backupDir = path.join(projectRoot, '.taskmaster-migration-backup');\n\t\tlog.info(`Creating backup in: ${backupDir}`);\n\t\tif (fs.existsSync(backupDir)) {\n\t\t\tfs.rmSync(backupDir, { recursive: true, force: true });\n\t\t}\n\t\tfs.mkdirSync(backupDir, { recursive: true });\n\t}\n\n\t// Migrate files\n\tfor (const item of migrationPlan) {\n\t\tconst fromPath = path.join(projectRoot, item.from);\n\t\tconst toPath = path.join(projectRoot, item.to);\n\n\t\tif (!fs.existsSync(fromPath)) {\n\t\t\tlog.warn(`Source file not found: ${item.from}`);\n\t\t\tcontinue;\n\t\t}\n\n\t\t// Create backup if requested\n\t\tif (options.backup) {\n\t\t\tconst backupPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster-migration-backup',\n\t\t\t\titem.from\n\t\t\t);\n\t\t\tconst backupDir = path.dirname(backupPath);\n\t\t\tif (!fs.existsSync(backupDir)) {\n\t\t\t\tfs.mkdirSync(backupDir, { recursive: true });\n\t\t\t}\n\t\t\tfs.copyFileSync(fromPath, backupPath);\n\t\t}\n\n\t\t// Ensure destination directory exists\n\t\tconst toDir = path.dirname(toPath);\n\t\tif (!fs.existsSync(toDir)) {\n\t\t\tfs.mkdirSync(toDir, { recursive: true });\n\t\t}\n\n\t\t// Copy file\n\t\tfs.copyFileSync(fromPath, toPath);\n\t\tlog.info(`Migrated: ${item.from} → ${item.to}`);\n\n\t\t// Remove original if cleanup is requested\n\t\tif (options.cleanup) {\n\t\t\tfs.unlinkSync(fromPath);\n\t\t}\n\t}\n\n\t// Clean up empty directories if cleanup is requested\n\tif (options.cleanup) {\n\t\tconst dirsToCheck = ['tasks', 'scripts'];\n\t\tfor (const dir of dirsToCheck) {\n\t\t\tconst dirPath = path.join(projectRoot, dir);\n\t\t\tif (fs.existsSync(dirPath)) {\n\t\t\t\ttry {\n\t\t\t\t\tconst files = fs.readdirSync(dirPath);\n\t\t\t\t\tif (files.length === 0) {\n\t\t\t\t\t\tfs.rmdirSync(dirPath);\n\t\t\t\t\t\tlog.info(`Removed empty directory: ${dir}`);\n\t\t\t\t\t}\n\t\t\t\t} catch (error) {\n\t\t\t\t\t// Directory not empty or other error, skip\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport default { migrateProject };\n"], ["/claude-task-master/src/ai-providers/gemini-cli.js", "/**\n * src/ai-providers/gemini-cli.js\n *\n * Implementation for interacting with Gemini models via Gemini CLI\n * using the ai-sdk-provider-gemini-cli package.\n */\n\nimport { generateObject, generateText, streamText } from 'ai';\nimport { parse } from 'jsonc-parser';\nimport { BaseAIProvider } from './base-provider.js';\nimport { log } from '../../scripts/modules/utils.js';\n\nlet createGeminiProvider;\n\nasync function loadGeminiCliModule() {\n\tif (!createGeminiProvider) {\n\t\ttry {\n\t\t\tconst mod = await import('ai-sdk-provider-gemini-cli');\n\t\t\tcreateGeminiProvider = mod.createGeminiProvider;\n\t\t} catch (err) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Gemini CLI SDK is not installed. Please install 'ai-sdk-provider-gemini-cli' to use the gemini-cli provider.\"\n\t\t\t);\n\t\t}\n\t}\n}\n\nexport class GeminiCliProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Gemini CLI';\n\t}\n\n\t/**\n\t * Override validateAuth to handle Gemini CLI authentication options\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Gemini CLI is designed to use pre-configured OAuth authentication\n\t\t// Users choose gemini-cli specifically to leverage their existing\n\t\t// gemini auth login credentials, not to use API keys.\n\t\t// We support API keys for compatibility, but the expected usage\n\t\t// is through CLI authentication (no API key required).\n\t\t// No validation needed - the SDK will handle auth internally\n\t}\n\n\t/**\n\t * Creates and returns a Gemini CLI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.apiKey] - Optional Gemini API key (rarely used with gemini-cli)\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Promise<Function>} Gemini CLI client function\n\t * @throws {Error} If initialization fails\n\t */\n\tasync getClient(params) {\n\t\ttry {\n\t\t\t// Load the Gemini CLI module dynamically\n\t\t\tawait loadGeminiCliModule();\n\t\t\t// Primary use case: Use existing gemini CLI authentication\n\t\t\t// Secondary use case: Direct API key (for compatibility)\n\t\t\tlet authOptions = {};\n\n\t\t\tif (params.apiKey && params.apiKey !== 'gemini-cli-no-key-required') {\n\t\t\t\t// API key provided - use it for compatibility\n\t\t\t\tauthOptions = {\n\t\t\t\t\tauthType: 'api-key',\n\t\t\t\t\tapiKey: params.apiKey\n\t\t\t\t};\n\t\t\t} else {\n\t\t\t\t// Expected case: Use gemini CLI authentication via OAuth\n\t\t\t\tauthOptions = {\n\t\t\t\t\tauthType: 'oauth-personal'\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Add baseURL if provided (for custom endpoints)\n\t\t\tif (params.baseURL) {\n\t\t\t\tauthOptions.baseURL = params.baseURL;\n\t\t\t}\n\n\t\t\t// Create and return the provider\n\t\t\treturn createGeminiProvider(authOptions);\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\t/**\n\t * Extracts system messages from the messages array and returns them separately.\n\t * This is needed because ai-sdk-provider-gemini-cli expects system prompts as a separate parameter.\n\t * @param {Array} messages - Array of message objects\n\t * @param {Object} options - Options for system prompt enhancement\n\t * @param {boolean} options.enforceJsonOutput - Whether to add JSON enforcement to system prompt\n\t * @returns {Object} - {systemPrompt: string|undefined, messages: Array}\n\t */\n\t_extractSystemMessage(messages, options = {}) {\n\t\tif (!messages || !Array.isArray(messages)) {\n\t\t\treturn { systemPrompt: undefined, messages: messages || [] };\n\t\t}\n\n\t\tconst systemMessages = messages.filter((msg) => msg.role === 'system');\n\t\tconst nonSystemMessages = messages.filter((msg) => msg.role !== 'system');\n\n\t\t// Combine multiple system messages if present\n\t\tlet systemPrompt =\n\t\t\tsystemMessages.length > 0\n\t\t\t\t? systemMessages.map((msg) => msg.content).join('\\n\\n')\n\t\t\t\t: undefined;\n\n\t\t// Add Gemini CLI specific JSON enforcement if requested\n\t\tif (options.enforceJsonOutput) {\n\t\t\tconst jsonEnforcement = this._getJsonEnforcementPrompt();\n\t\t\tsystemPrompt = systemPrompt\n\t\t\t\t? `${systemPrompt}\\n\\n${jsonEnforcement}`\n\t\t\t\t: jsonEnforcement;\n\t\t}\n\n\t\treturn { systemPrompt, messages: nonSystemMessages };\n\t}\n\n\t/**\n\t * Gets a Gemini CLI specific system prompt to enforce strict JSON output\n\t * @returns {string} JSON enforcement system prompt\n\t */\n\t_getJsonEnforcementPrompt() {\n\t\treturn `CRITICAL: You MUST respond with ONLY valid JSON. Do not include any explanatory text, markdown formatting, code block markers, or conversational phrases like \"Here is\" or \"Of course\". Your entire response must be parseable JSON that starts with { or [ and ends with } or ]. No exceptions.`;\n\t}\n\n\t/**\n\t * Checks if a string is valid JSON\n\t * @param {string} text - Text to validate\n\t * @returns {boolean} True if valid JSON\n\t */\n\t_isValidJson(text) {\n\t\tif (!text || typeof text !== 'string') {\n\t\t\treturn false;\n\t\t}\n\n\t\ttry {\n\t\t\tJSON.parse(text.trim());\n\t\t\treturn true;\n\t\t} catch {\n\t\t\treturn false;\n\t\t}\n\t}\n\n\t/**\n\t * Detects if the user prompt is requesting JSON output\n\t * @param {Array} messages - Array of message objects\n\t * @returns {boolean} True if JSON output is likely expected\n\t */\n\t_detectJsonRequest(messages) {\n\t\tconst userMessages = messages.filter((msg) => msg.role === 'user');\n\t\tconst combinedText = userMessages\n\t\t\t.map((msg) => msg.content)\n\t\t\t.join(' ')\n\t\t\t.toLowerCase();\n\n\t\t// Look for indicators that JSON output is expected\n\t\tconst jsonIndicators = [\n\t\t\t'json',\n\t\t\t'respond only with',\n\t\t\t'return only',\n\t\t\t'output only',\n\t\t\t'format:',\n\t\t\t'structure:',\n\t\t\t'schema:',\n\t\t\t'{\"',\n\t\t\t'[{',\n\t\t\t'subtasks',\n\t\t\t'array',\n\t\t\t'object'\n\t\t];\n\n\t\treturn jsonIndicators.some((indicator) => combinedText.includes(indicator));\n\t}\n\n\t/**\n\t * Simplifies complex prompts for gemini-cli to improve JSON output compliance\n\t * @param {Array} messages - Array of message objects\n\t * @returns {Array} Simplified messages array\n\t */\n\t_simplifyJsonPrompts(messages) {\n\t\t// First, check if this is an expand-task operation by looking at the system message\n\t\tconst systemMsg = messages.find((m) => m.role === 'system');\n\t\tconst isExpandTask =\n\t\t\tsystemMsg &&\n\t\t\tsystemMsg.content.includes(\n\t\t\t\t'You are an AI assistant helping with task breakdown. Generate exactly'\n\t\t\t);\n\n\t\tif (!isExpandTask) {\n\t\t\treturn messages; // Not an expand task, return unchanged\n\t\t}\n\n\t\t// Extract subtask count from system message\n\t\tconst subtaskCountMatch = systemMsg.content.match(\n\t\t\t/Generate exactly (\\d+) subtasks/\n\t\t);\n\t\tconst subtaskCount = subtaskCountMatch ? subtaskCountMatch[1] : '10';\n\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`${this.name} detected expand-task operation, simplifying for ${subtaskCount} subtasks`\n\t\t);\n\n\t\treturn messages.map((msg) => {\n\t\t\tif (msg.role !== 'user') {\n\t\t\t\treturn msg;\n\t\t\t}\n\n\t\t\t// For expand-task user messages, create a much simpler, more direct prompt\n\t\t\t// that doesn't depend on specific task content\n\t\t\tconst simplifiedPrompt = `Generate exactly ${subtaskCount} subtasks in the following JSON format.\n\nCRITICAL INSTRUCTION: You must respond with ONLY valid JSON. No explanatory text, no \"Here is\", no \"Of course\", no markdown - just the JSON object.\n\nRequired JSON structure:\n{\n \"subtasks\": [\n {\n \"id\": 1,\n \"title\": \"Specific actionable task title\",\n \"description\": \"Clear task description\",\n \"dependencies\": [],\n \"details\": \"Implementation details and guidance\",\n \"testStrategy\": \"Testing approach\"\n }\n ]\n}\n\nGenerate ${subtaskCount} subtasks based on the original task context. Return ONLY the JSON object.`;\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} simplified user prompt for better JSON compliance`\n\t\t\t);\n\t\t\treturn { ...msg, content: simplifiedPrompt };\n\t\t});\n\t}\n\n\t/**\n\t * Extract JSON from Gemini's response using a tolerant parser.\n\t *\n\t * Optimized approach that progressively tries different parsing strategies:\n\t * 1. Direct parsing after cleanup\n\t * 2. Smart boundary detection with single-pass analysis\n\t * 3. Limited character-by-character fallback for edge cases\n\t *\n\t * @param {string} text - Raw text which may contain JSON\n\t * @returns {string} A valid JSON string if extraction succeeds, otherwise the original text\n\t */\n\textractJson(text) {\n\t\tif (!text || typeof text !== 'string') {\n\t\t\treturn text;\n\t\t}\n\n\t\tlet content = text.trim();\n\n\t\t// Early exit for very short content\n\t\tif (content.length < 2) {\n\t\t\treturn text;\n\t\t}\n\n\t\t// Strip common wrappers in a single pass\n\t\tcontent = content\n\t\t\t// Remove markdown fences\n\t\t\t.replace(/^.*?```(?:json)?\\s*([\\s\\S]*?)\\s*```.*$/i, '$1')\n\t\t\t// Remove variable declarations\n\t\t\t.replace(/^\\s*(?:const|let|var)\\s+\\w+\\s*=\\s*([\\s\\S]*?)(?:;|\\s*)$/i, '$1')\n\t\t\t// Remove common prefixes\n\t\t\t.replace(/^(?:Here's|The)\\s+(?:the\\s+)?JSON.*?[:]\\s*/i, '')\n\t\t\t.trim();\n\n\t\t// Find the first JSON-like structure\n\t\tconst firstObj = content.indexOf('{');\n\t\tconst firstArr = content.indexOf('[');\n\n\t\tif (firstObj === -1 && firstArr === -1) {\n\t\t\treturn text;\n\t\t}\n\n\t\tconst start =\n\t\t\tfirstArr === -1\n\t\t\t\t? firstObj\n\t\t\t\t: firstObj === -1\n\t\t\t\t\t? firstArr\n\t\t\t\t\t: Math.min(firstObj, firstArr);\n\t\tcontent = content.slice(start);\n\n\t\t// Optimized parsing function with error collection\n\t\tconst tryParse = (value) => {\n\t\t\tif (!value || value.length < 2) return undefined;\n\n\t\t\tconst errors = [];\n\t\t\ttry {\n\t\t\t\tconst result = parse(value, errors, {\n\t\t\t\t\tallowTrailingComma: true,\n\t\t\t\t\tallowEmptyContent: false\n\t\t\t\t});\n\t\t\t\tif (errors.length === 0 && result !== undefined) {\n\t\t\t\t\treturn JSON.stringify(result, null, 2);\n\t\t\t\t}\n\t\t\t} catch {\n\t\t\t\t// Parsing failed completely\n\t\t\t}\n\t\t\treturn undefined;\n\t\t};\n\n\t\t// Try parsing the full content first\n\t\tconst fullParse = tryParse(content);\n\t\tif (fullParse !== undefined) {\n\t\t\treturn fullParse;\n\t\t}\n\n\t\t// Smart boundary detection - single pass with optimizations\n\t\tconst openChar = content[0];\n\t\tconst closeChar = openChar === '{' ? '}' : ']';\n\n\t\tlet depth = 0;\n\t\tlet inString = false;\n\t\tlet escapeNext = false;\n\t\tlet lastValidEnd = -1;\n\n\t\t// Single-pass boundary detection with early termination\n\t\tfor (let i = 0; i < content.length && i < 10000; i++) {\n\t\t\t// Limit scan for performance\n\t\t\tconst char = content[i];\n\n\t\t\tif (escapeNext) {\n\t\t\t\tescapeNext = false;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (char === '\\\\') {\n\t\t\t\tescapeNext = true;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (char === '\"') {\n\t\t\t\tinString = !inString;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (inString) continue;\n\n\t\t\tif (char === openChar) {\n\t\t\t\tdepth++;\n\t\t\t} else if (char === closeChar) {\n\t\t\t\tdepth--;\n\t\t\t\tif (depth === 0) {\n\t\t\t\t\tlastValidEnd = i + 1;\n\t\t\t\t\t// Try parsing immediately on first valid boundary\n\t\t\t\t\tconst candidate = content.slice(0, lastValidEnd);\n\t\t\t\t\tconst parsed = tryParse(candidate);\n\t\t\t\t\tif (parsed !== undefined) {\n\t\t\t\t\t\treturn parsed;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If we found valid boundaries but parsing failed, try limited fallback\n\t\tif (lastValidEnd > 0) {\n\t\t\tconst maxAttempts = Math.min(5, Math.floor(lastValidEnd / 100)); // Limit attempts\n\t\t\tfor (let i = 0; i < maxAttempts; i++) {\n\t\t\t\tconst testEnd = Math.max(\n\t\t\t\t\tlastValidEnd - i * 50,\n\t\t\t\t\tMath.floor(lastValidEnd * 0.8)\n\t\t\t\t);\n\t\t\t\tconst candidate = content.slice(0, testEnd);\n\t\t\t\tconst parsed = tryParse(candidate);\n\t\t\t\tif (parsed !== undefined) {\n\t\t\t\t\treturn parsed;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn text;\n\t}\n\n\t/**\n\t * Generates text using Gemini CLI model\n\t * Overrides base implementation to properly handle system messages and enforce JSON output when needed\n\t */\n\tasync generateText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Generating ${this.name} text with model: ${params.modelId}`\n\t\t\t);\n\n\t\t\t// Detect if JSON output is expected and enforce it for better gemini-cli compatibility\n\t\t\tconst enforceJsonOutput = this._detectJsonRequest(params.messages);\n\n\t\t\t// Debug logging to understand what's happening\n\t\t\tlog('debug', `${this.name} JSON detection analysis:`, {\n\t\t\t\tenforceJsonOutput,\n\t\t\t\tmessageCount: params.messages.length,\n\t\t\t\tmessages: params.messages.map((msg) => ({\n\t\t\t\t\trole: msg.role,\n\t\t\t\t\tcontentPreview: msg.content\n\t\t\t\t\t\t? msg.content.substring(0, 200) + '...'\n\t\t\t\t\t\t: 'empty'\n\t\t\t\t}))\n\t\t\t});\n\n\t\t\tif (enforceJsonOutput) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`${this.name} detected JSON request - applying strict JSON enforcement system prompt`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// For gemini-cli, simplify complex prompts before processing\n\t\t\tlet processedMessages = params.messages;\n\t\t\tif (enforceJsonOutput) {\n\t\t\t\tprocessedMessages = this._simplifyJsonPrompts(params.messages);\n\t\t\t}\n\n\t\t\t// Extract system messages for separate handling with optional JSON enforcement\n\t\t\tconst { systemPrompt, messages } = this._extractSystemMessage(\n\t\t\t\tprocessedMessages,\n\t\t\t\t{ enforceJsonOutput }\n\t\t\t);\n\n\t\t\t// Debug the final system prompt being sent\n\t\t\tlog('debug', `${this.name} final system prompt:`, {\n\t\t\t\tsystemPromptLength: systemPrompt ? systemPrompt.length : 0,\n\t\t\t\tsystemPromptPreview: systemPrompt\n\t\t\t\t\t? systemPrompt.substring(0, 300) + '...'\n\t\t\t\t\t: 'none',\n\t\t\t\tfinalMessageCount: messages.length\n\t\t\t});\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst result = await generateText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tsystem: systemPrompt,\n\t\t\t\tmessages: messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\t// If we detected a JSON request and gemini-cli returned conversational text,\n\t\t\t// attempt to extract JSON from the response\n\t\t\tlet finalText = result.text;\n\t\t\tif (enforceJsonOutput && result.text && !this._isValidJson(result.text)) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`${this.name} response appears conversational, attempting JSON extraction`\n\t\t\t\t);\n\n\t\t\t\t// Log first 1000 chars of the response to see what Gemini actually returned\n\t\t\t\tlog('debug', `${this.name} raw response preview:`, {\n\t\t\t\t\tresponseLength: result.text.length,\n\t\t\t\t\tresponseStart: result.text.substring(0, 1000)\n\t\t\t\t});\n\n\t\t\t\tconst extractedJson = this.extractJson(result.text);\n\t\t\t\tif (this._isValidJson(extractedJson)) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`${this.name} successfully extracted JSON from conversational response`\n\t\t\t\t\t);\n\t\t\t\t\tfinalText = extractedJson;\n\t\t\t\t} else {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`${this.name} JSON extraction failed, returning original response`\n\t\t\t\t\t);\n\n\t\t\t\t\t// Log what extraction returned to debug why it failed\n\t\t\t\t\tlog('debug', `${this.name} extraction result preview:`, {\n\t\t\t\t\t\textractedLength: extractedJson ? extractedJson.length : 0,\n\t\t\t\t\t\textractedStart: extractedJson\n\t\t\t\t\t\t\t? extractedJson.substring(0, 500)\n\t\t\t\t\t\t\t: 'null'\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} generateText completed successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\ttext: finalText,\n\t\t\t\tusage: {\n\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthis.handleError('text generation', error);\n\t\t}\n\t}\n\n\t/**\n\t * Streams text using Gemini CLI model\n\t * Overrides base implementation to properly handle system messages and enforce JSON output when needed\n\t */\n\tasync streamText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog('debug', `Streaming ${this.name} text with model: ${params.modelId}`);\n\n\t\t\t// Detect if JSON output is expected and enforce it for better gemini-cli compatibility\n\t\t\tconst enforceJsonOutput = this._detectJsonRequest(params.messages);\n\n\t\t\t// Debug logging to understand what's happening\n\t\t\tlog('debug', `${this.name} JSON detection analysis:`, {\n\t\t\t\tenforceJsonOutput,\n\t\t\t\tmessageCount: params.messages.length,\n\t\t\t\tmessages: params.messages.map((msg) => ({\n\t\t\t\t\trole: msg.role,\n\t\t\t\t\tcontentPreview: msg.content\n\t\t\t\t\t\t? msg.content.substring(0, 200) + '...'\n\t\t\t\t\t\t: 'empty'\n\t\t\t\t}))\n\t\t\t});\n\n\t\t\tif (enforceJsonOutput) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`${this.name} detected JSON request - applying strict JSON enforcement system prompt`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Extract system messages for separate handling with optional JSON enforcement\n\t\t\tconst { systemPrompt, messages } = this._extractSystemMessage(\n\t\t\t\tparams.messages,\n\t\t\t\t{ enforceJsonOutput }\n\t\t\t);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst stream = await streamText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tsystem: systemPrompt,\n\t\t\t\tmessages: messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} streamText initiated successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\t// Note: For streaming, we can't intercept and modify the response in real-time\n\t\t\t// The JSON extraction would need to happen on the consuming side\n\t\t\treturn stream;\n\t\t} catch (error) {\n\t\t\tthis.handleError('text streaming', error);\n\t\t}\n\t}\n\n\t/**\n\t * Generates a structured object using Gemini CLI model\n\t * Overrides base implementation to handle Gemini-specific JSON formatting issues and system messages\n\t */\n\tasync generateObject(params) {\n\t\ttry {\n\t\t\t// First try the standard generateObject from base class\n\t\t\treturn await super.generateObject(params);\n\t\t} catch (error) {\n\t\t\t// If it's a JSON parsing error, try to extract and parse JSON manually\n\t\t\tif (error.message?.includes('JSON') || error.message?.includes('parse')) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Gemini CLI generateObject failed with parsing error, attempting manual extraction`\n\t\t\t\t);\n\n\t\t\t\ttry {\n\t\t\t\t\t// Validate params first\n\t\t\t\t\tthis.validateParams(params);\n\t\t\t\t\tthis.validateMessages(params.messages);\n\n\t\t\t\t\tif (!params.schema) {\n\t\t\t\t\t\tthrow new Error('Schema is required for object generation');\n\t\t\t\t\t}\n\t\t\t\t\tif (!params.objectName) {\n\t\t\t\t\t\tthrow new Error('Object name is required for object generation');\n\t\t\t\t\t}\n\n\t\t\t\t\t// Extract system messages for separate handling with JSON enforcement\n\t\t\t\t\tconst { systemPrompt, messages } = this._extractSystemMessage(\n\t\t\t\t\t\tparams.messages,\n\t\t\t\t\t\t{ enforceJsonOutput: true }\n\t\t\t\t\t);\n\n\t\t\t\t\t// Call generateObject directly with our client\n\t\t\t\t\tconst client = await this.getClient(params);\n\t\t\t\t\tconst result = await generateObject({\n\t\t\t\t\t\tmodel: client(params.modelId),\n\t\t\t\t\t\tsystem: systemPrompt,\n\t\t\t\t\t\tmessages: messages,\n\t\t\t\t\t\tschema: params.schema,\n\t\t\t\t\t\tmode: 'json', // Use json mode instead of auto for Gemini\n\t\t\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\t\t\ttemperature: params.temperature\n\t\t\t\t\t});\n\n\t\t\t\t\t// If we get rawResponse text, try to extract JSON from it\n\t\t\t\t\tif (result.rawResponse?.text && !result.object) {\n\t\t\t\t\t\tconst extractedJson = this.extractJson(result.rawResponse.text);\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tresult.object = JSON.parse(extractedJson);\n\t\t\t\t\t\t} catch (parseError) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'error',\n\t\t\t\t\t\t\t\t`Failed to parse extracted JSON: ${parseError.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t\t\t`Extracted JSON: ${extractedJson.substring(0, 500)}...`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t\t`Gemini CLI returned invalid JSON that could not be parsed: ${parseError.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn {\n\t\t\t\t\t\tobject: result.object,\n\t\t\t\t\t\tusage: {\n\t\t\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\t} catch (retryError) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'error',\n\t\t\t\t\t\t`Gemini CLI manual JSON extraction failed: ${retryError.message}`\n\t\t\t\t\t);\n\t\t\t\t\t// Re-throw the original error with more context\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t`${this.name} failed to generate valid JSON object: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// For non-parsing errors, just re-throw\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\tgetRequiredApiKeyName() {\n\t\treturn 'GEMINI_API_KEY';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n}\n"], ["/claude-task-master/src/utils/path-utils.js", "/**\n * Path utility functions for Task Master\n * Provides centralized path resolution logic for both CLI and MCP use cases\n */\n\nimport path from 'path';\nimport fs from 'fs';\nimport {\n\tTASKMASTER_TASKS_FILE,\n\tLEGACY_TASKS_FILE,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_REPORTS_DIR,\n\tCOMPLEXITY_REPORT_FILE,\n\tTASKMASTER_CONFIG_FILE,\n\tLEGACY_CONFIG_FILE\n} from '../constants/paths.js';\nimport { getLoggerOrDefault } from './logger-utils.js';\n\n/**\n * Normalize project root to ensure it doesn't end with .taskmaster\n * This prevents double .taskmaster paths when using constants that include .taskmaster\n * @param {string} projectRoot - The project root path to normalize\n * @returns {string} - Normalized project root path\n */\nexport function normalizeProjectRoot(projectRoot) {\n\tif (!projectRoot) return projectRoot;\n\n\t// Ensure it's a string\n\tprojectRoot = String(projectRoot);\n\n\t// Split the path into segments\n\tconst segments = projectRoot.split(path.sep);\n\n\t// Find the index of .taskmaster segment\n\tconst taskmasterIndex = segments.findIndex(\n\t\t(segment) => segment === '.taskmaster'\n\t);\n\n\tif (taskmasterIndex !== -1) {\n\t\t// If .taskmaster is found, return everything up to but not including .taskmaster\n\t\tconst normalizedSegments = segments.slice(0, taskmasterIndex);\n\t\treturn normalizedSegments.join(path.sep) || path.sep;\n\t}\n\n\treturn projectRoot;\n}\n\n/**\n * Find the project root directory by looking for project markers\n * @param {string} startDir - Directory to start searching from\n * @returns {string|null} - Project root path or null if not found\n */\nexport function findProjectRoot(startDir = process.cwd()) {\n\tconst projectMarkers = [\n\t\t'.taskmaster',\n\t\tTASKMASTER_TASKS_FILE,\n\t\t'tasks.json',\n\t\tLEGACY_TASKS_FILE,\n\t\t'.git',\n\t\t'.svn',\n\t\t'package.json',\n\t\t'yarn.lock',\n\t\t'package-lock.json',\n\t\t'pnpm-lock.yaml'\n\t];\n\n\tlet currentDir = path.resolve(startDir);\n\tconst rootDir = path.parse(currentDir).root;\n\tconst maxDepth = 50; // Reasonable limit to prevent infinite loops\n\tlet depth = 0;\n\n\twhile (currentDir !== rootDir && depth < maxDepth) {\n\t\t// Check if current directory contains any project markers\n\t\tfor (const marker of projectMarkers) {\n\t\t\tconst markerPath = path.join(currentDir, marker);\n\t\t\tif (fs.existsSync(markerPath)) {\n\t\t\t\treturn currentDir;\n\t\t\t}\n\t\t}\n\t\tcurrentDir = path.dirname(currentDir);\n\t\tdepth++;\n\t}\n\n\t// Fallback to current working directory if no project root found\n\treturn process.cwd();\n}\n\n/**\n * Find the tasks.json file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object from MCP args (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved tasks.json path or null if not found\n */\nexport function findTasksPath(explicitPath = null, args = null, log = null) {\n\t// Use the passed logger if available, otherwise use the default logger\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. First determine project root to use as base for all path resolution\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 2. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 3. If explicit path is provided, resolve it relative to project root (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(projectRoot, explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit tasks path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit tasks path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 4. Check possible locations in order of preference\n\tconst possiblePaths = [\n\t\tpath.join(projectRoot, TASKMASTER_TASKS_FILE), // .taskmaster/tasks/tasks.json (NEW)\n\t\tpath.join(projectRoot, LEGACY_TASKS_FILE) // tasks/tasks.json (LEGACY)\n\t];\n\n\tfor (const tasksPath of possiblePaths) {\n\t\tif (fs.existsSync(tasksPath)) {\n\t\t\tlogger.info?.(`Found tasks file at: ${tasksPath}`);\n\n\t\t\t// Issue deprecation warning for legacy paths\n\t\t\tif (\n\t\t\t\ttasksPath.includes('tasks/tasks.json') &&\n\t\t\t\t!tasksPath.includes('.taskmaster')\n\t\t\t) {\n\t\t\t\tlogger.warn?.(\n\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found tasks.json in legacy location '${tasksPath}'. Please migrate to the new .taskmaster directory structure. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t);\n\t\t\t} else if (\n\t\t\t\ttasksPath.endsWith('tasks.json') &&\n\t\t\t\t!tasksPath.includes('.taskmaster') &&\n\t\t\t\t!tasksPath.includes('tasks/')\n\t\t\t) {\n\t\t\t\tlogger.warn?.(\n\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found tasks.json in legacy root location '${tasksPath}'. Please migrate to the new .taskmaster directory structure. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\treturn tasksPath;\n\t\t}\n\t}\n\n\tlogger.warn?.(`No tasks.json found in project: ${projectRoot}`);\n\treturn null;\n}\n\n/**\n * Find the PRD document file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved PRD document path or null if not found\n */\nexport function findPRDPath(explicitPath = null, args = null, log = null) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit PRD path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit PRD path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Check possible locations in order of preference\n\tconst locations = [\n\t\tTASKMASTER_DOCS_DIR, // .taskmaster/docs/ (NEW)\n\t\t'scripts/', // Legacy location\n\t\t'' // Project root\n\t];\n\n\tconst fileNames = ['PRD.md', 'prd.md', 'PRD.txt', 'prd.txt'];\n\n\tfor (const location of locations) {\n\t\tfor (const fileName of fileNames) {\n\t\t\tconst prdPath = path.join(projectRoot, location, fileName);\n\t\t\tif (fs.existsSync(prdPath)) {\n\t\t\t\tlogger.info?.(`Found PRD document at: ${prdPath}`);\n\n\t\t\t\t// Issue deprecation warning for legacy paths\n\t\t\t\tif (location === 'scripts/' || location === '') {\n\t\t\t\t\tlogger.warn?.(\n\t\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found PRD file in legacy location '${prdPath}'. Please migrate to .taskmaster/docs/ directory. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn prdPath;\n\t\t\t}\n\t\t}\n\t}\n\n\tlogger.warn?.(`No PRD document found in project: ${projectRoot}`);\n\treturn null;\n}\n\n/**\n * Find the complexity report file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved complexity report path or null if not found\n */\nexport function findComplexityReportPath(\n\texplicitPath = null,\n\targs = null,\n\tlog = null\n) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit complexity report path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit complexity report path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Check possible locations in order of preference\n\tconst locations = [\n\t\tTASKMASTER_REPORTS_DIR, // .taskmaster/reports/ (NEW)\n\t\t'scripts/', // Legacy location\n\t\t'' // Project root\n\t];\n\n\tconst fileNames = ['task-complexity', 'complexity-report'].map((fileName) => {\n\t\tif (args?.tag && args?.tag !== 'master') {\n\t\t\treturn `${fileName}_${args.tag}.json`;\n\t\t}\n\t\treturn `${fileName}.json`;\n\t});\n\n\tfor (const location of locations) {\n\t\tfor (const fileName of fileNames) {\n\t\t\tconst reportPath = path.join(projectRoot, location, fileName);\n\t\t\tif (fs.existsSync(reportPath)) {\n\t\t\t\tlogger.info?.(`Found complexity report at: ${reportPath}`);\n\n\t\t\t\t// Issue deprecation warning for legacy paths\n\t\t\t\tif (location === 'scripts/' || location === '') {\n\t\t\t\t\tlogger.warn?.(\n\t\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found complexity report in legacy location '${reportPath}'. Please migrate to .taskmaster/reports/ directory. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn reportPath;\n\t\t\t}\n\t\t}\n\t}\n\n\tlogger.warn?.(`No complexity report found in project: ${projectRoot}`);\n\treturn null;\n}\n\n/**\n * Resolve output path for tasks.json (create if needed)\n * @param {string|null} explicitPath - Explicit output path provided by user\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string} - Resolved output path for tasks.json\n */\nexport function resolveTasksOutputPath(\n\texplicitPath = null,\n\targs = null,\n\tlog = null\n) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tlogger.info?.(`Using explicit output path: ${resolvedPath}`);\n\t\treturn resolvedPath;\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot =\n\t\targs?.projectRoot || findProjectRoot() || process.cwd();\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Use new .taskmaster structure by default\n\tconst defaultPath = path.join(projectRoot, TASKMASTER_TASKS_FILE);\n\tlogger.info?.(`Using default output path: ${defaultPath}`);\n\n\t// Ensure the directory exists\n\tconst outputDir = path.dirname(defaultPath);\n\tif (!fs.existsSync(outputDir)) {\n\t\tlogger.info?.(`Creating tasks directory: ${outputDir}`);\n\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t}\n\n\treturn defaultPath;\n}\n\n/**\n * Resolve output path for complexity report (create if needed)\n * @param {string|null} explicitPath - Explicit output path provided by user\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string} - Resolved output path for complexity report\n */\nexport function resolveComplexityReportOutputPath(\n\texplicitPath = null,\n\targs = null,\n\tlog = null\n) {\n\tconst logger = getLoggerOrDefault(log);\n\tconst tag = args?.tag;\n\n\t// 1. If explicit path is provided, use it\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tlogger.info?.(\n\t\t\t`Using explicit complexity report output path: ${resolvedPath}`\n\t\t);\n\t\treturn resolvedPath;\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot =\n\t\targs?.projectRoot || findProjectRoot() || process.cwd();\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 3. Use tag-aware filename\n\tlet filename = 'task-complexity-report.json';\n\tif (tag && tag !== 'master') {\n\t\tfilename = `task-complexity-report_${tag}.json`;\n\t}\n\n\t// 4. Use new .taskmaster structure by default\n\tconst defaultPath = path.join(projectRoot, '.taskmaster/reports', filename);\n\tlogger.info?.(\n\t\t`Using tag-aware complexity report output path: ${defaultPath}`\n\t);\n\n\t// Ensure the directory exists\n\tconst outputDir = path.dirname(defaultPath);\n\tif (!fs.existsSync(outputDir)) {\n\t\tlogger.info?.(`Creating reports directory: ${outputDir}`);\n\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t}\n\n\treturn defaultPath;\n}\n\n/**\n * Find the configuration file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved config file path or null if not found\n */\nexport function findConfigPath(explicitPath = null, args = null, log = null) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit config path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit config path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Check possible locations in order of preference\n\tconst possiblePaths = [\n\t\tpath.join(projectRoot, TASKMASTER_CONFIG_FILE), // NEW location\n\t\tpath.join(projectRoot, LEGACY_CONFIG_FILE) // LEGACY location\n\t];\n\n\tfor (const configPath of possiblePaths) {\n\t\tif (fs.existsSync(configPath)) {\n\t\t\t// Issue deprecation warning for legacy paths\n\t\t\tif (configPath?.endsWith(LEGACY_CONFIG_FILE)) {\n\t\t\t\tlogger.warn?.(\n\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found configuration in legacy location '${configPath}'. Please migrate to .taskmaster/config.json. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\treturn configPath;\n\t\t}\n\t}\n\n\tlogger.warn?.(`No configuration file found in project: ${projectRoot}`);\n\treturn null;\n}\n"], ["/claude-task-master/scripts/modules/utils/fuzzyTaskSearch.js", "/**\n * fuzzyTaskSearch.js\n * Reusable fuzzy search utility for finding relevant tasks based on semantic similarity\n */\n\nimport Fuse from 'fuse.js';\n\n/**\n * Configuration for different search contexts\n */\nconst SEARCH_CONFIGS = {\n\tresearch: {\n\t\tthreshold: 0.5, // More lenient for research (broader context)\n\t\tlimit: 20,\n\t\tkeys: [\n\t\t\t{ name: 'title', weight: 2.0 },\n\t\t\t{ name: 'description', weight: 1.0 },\n\t\t\t{ name: 'details', weight: 0.5 },\n\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t]\n\t},\n\taddTask: {\n\t\tthreshold: 0.4, // Stricter for add-task (more precise context)\n\t\tlimit: 15,\n\t\tkeys: [\n\t\t\t{ name: 'title', weight: 2.0 },\n\t\t\t{ name: 'description', weight: 1.5 },\n\t\t\t{ name: 'details', weight: 0.8 },\n\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t]\n\t},\n\tdefault: {\n\t\tthreshold: 0.4,\n\t\tlimit: 15,\n\t\tkeys: [\n\t\t\t{ name: 'title', weight: 2.0 },\n\t\t\t{ name: 'description', weight: 1.5 },\n\t\t\t{ name: 'details', weight: 1.0 },\n\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t]\n\t}\n};\n\n/**\n * Purpose categories for pattern-based task matching\n */\nconst PURPOSE_CATEGORIES = [\n\t{ pattern: /(command|cli|flag)/i, label: 'CLI commands' },\n\t{ pattern: /(task|subtask|add)/i, label: 'Task management' },\n\t{ pattern: /(dependency|depend)/i, label: 'Dependency handling' },\n\t{ pattern: /(AI|model|prompt|research)/i, label: 'AI integration' },\n\t{ pattern: /(UI|display|show|interface)/i, label: 'User interface' },\n\t{ pattern: /(schedule|time|cron)/i, label: 'Scheduling' },\n\t{ pattern: /(config|setting|option)/i, label: 'Configuration' },\n\t{ pattern: /(test|testing|spec)/i, label: 'Testing' },\n\t{ pattern: /(auth|login|user)/i, label: 'Authentication' },\n\t{ pattern: /(database|db|data)/i, label: 'Data management' },\n\t{ pattern: /(api|endpoint|route)/i, label: 'API development' },\n\t{ pattern: /(deploy|build|release)/i, label: 'Deployment' },\n\t{ pattern: /(security|auth|login|user)/i, label: 'Security' },\n\t{ pattern: /.*/, label: 'Other' }\n];\n\n/**\n * Relevance score thresholds\n */\nconst RELEVANCE_THRESHOLDS = {\n\thigh: 0.25,\n\tmedium: 0.4,\n\tlow: 0.6\n};\n\n/**\n * Fuzzy search utility class for finding relevant tasks\n */\nexport class FuzzyTaskSearch {\n\tconstructor(tasks, searchType = 'default') {\n\t\tthis.tasks = tasks;\n\t\tthis.config = SEARCH_CONFIGS[searchType] || SEARCH_CONFIGS.default;\n\t\tthis.searchableTasks = this._prepareSearchableTasks(tasks);\n\t\tthis.fuse = new Fuse(this.searchableTasks, {\n\t\t\tincludeScore: true,\n\t\t\tthreshold: this.config.threshold,\n\t\t\tkeys: this.config.keys,\n\t\t\tshouldSort: true,\n\t\t\tuseExtendedSearch: true,\n\t\t\tlimit: this.config.limit\n\t\t});\n\t}\n\n\t/**\n\t * Prepare tasks for searching by expanding dependency titles\n\t * @param {Array} tasks - Array of task objects\n\t * @returns {Array} Tasks with expanded dependency information\n\t */\n\t_prepareSearchableTasks(tasks) {\n\t\treturn tasks.map((task) => {\n\t\t\t// Get titles of this task's dependencies if they exist\n\t\t\tconst dependencyTitles =\n\t\t\t\ttask.dependencies?.length > 0\n\t\t\t\t\t? task.dependencies\n\t\t\t\t\t\t\t.map((depId) => {\n\t\t\t\t\t\t\t\tconst depTask = tasks.find((t) => t.id === depId);\n\t\t\t\t\t\t\t\treturn depTask ? depTask.title : '';\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t.filter((title) => title)\n\t\t\t\t\t\t\t.join(' ')\n\t\t\t\t\t: '';\n\n\t\t\treturn {\n\t\t\t\t...task,\n\t\t\t\tdependencyTitles\n\t\t\t};\n\t\t});\n\t}\n\n\t/**\n\t * Extract significant words from a prompt\n\t * @param {string} prompt - The search prompt\n\t * @returns {Array<string>} Array of significant words\n\t */\n\t_extractPromptWords(prompt) {\n\t\treturn prompt\n\t\t\t.toLowerCase()\n\t\t\t.replace(/[^\\w\\s-]/g, ' ') // Replace non-alphanumeric chars with spaces\n\t\t\t.split(/\\s+/)\n\t\t\t.filter((word) => word.length > 3); // Words at least 4 chars\n\t}\n\n\t/**\n\t * Find tasks related to a prompt using fuzzy search\n\t * @param {string} prompt - The search prompt\n\t * @param {Object} options - Search options\n\t * @param {number} [options.maxResults=8] - Maximum number of results to return\n\t * @param {boolean} [options.includeRecent=true] - Include recent tasks in results\n\t * @param {boolean} [options.includeCategoryMatches=true] - Include category-based matches\n\t * @returns {Object} Search results with relevance breakdown\n\t */\n\tfindRelevantTasks(prompt, options = {}) {\n\t\tconst {\n\t\t\tmaxResults = 8,\n\t\t\tincludeRecent = true,\n\t\t\tincludeCategoryMatches = true\n\t\t} = options;\n\n\t\t// Extract significant words from prompt\n\t\tconst promptWords = this._extractPromptWords(prompt);\n\n\t\t// Perform fuzzy search with full prompt\n\t\tconst fuzzyResults = this.fuse.search(prompt);\n\n\t\t// Also search for each significant word to catch different aspects\n\t\tlet wordResults = [];\n\t\tfor (const word of promptWords) {\n\t\t\tif (word.length > 5) {\n\t\t\t\t// Only use significant words\n\t\t\t\tconst results = this.fuse.search(word);\n\t\t\t\tif (results.length > 0) {\n\t\t\t\t\twordResults.push(...results);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Merge and deduplicate results\n\t\tconst mergedResults = [...fuzzyResults];\n\n\t\t// Add word results that aren't already in fuzzyResults\n\t\tfor (const wordResult of wordResults) {\n\t\t\tif (!mergedResults.some((r) => r.item.id === wordResult.item.id)) {\n\t\t\t\tmergedResults.push(wordResult);\n\t\t\t}\n\t\t}\n\n\t\t// Group search results by relevance\n\t\tconst highRelevance = mergedResults\n\t\t\t.filter((result) => result.score < RELEVANCE_THRESHOLDS.high)\n\t\t\t.map((result) => ({ ...result.item, score: result.score }));\n\n\t\tconst mediumRelevance = mergedResults\n\t\t\t.filter(\n\t\t\t\t(result) =>\n\t\t\t\t\tresult.score >= RELEVANCE_THRESHOLDS.high &&\n\t\t\t\t\tresult.score < RELEVANCE_THRESHOLDS.medium\n\t\t\t)\n\t\t\t.map((result) => ({ ...result.item, score: result.score }));\n\n\t\tconst lowRelevance = mergedResults\n\t\t\t.filter(\n\t\t\t\t(result) =>\n\t\t\t\t\tresult.score >= RELEVANCE_THRESHOLDS.medium &&\n\t\t\t\t\tresult.score < RELEVANCE_THRESHOLDS.low\n\t\t\t)\n\t\t\t.map((result) => ({ ...result.item, score: result.score }));\n\n\t\t// Get recent tasks (newest first) if requested\n\t\tconst recentTasks = includeRecent\n\t\t\t? [...this.tasks].sort((a, b) => b.id - a.id).slice(0, 5)\n\t\t\t: [];\n\n\t\t// Find category-based matches if requested\n\t\tlet categoryTasks = [];\n\t\tlet promptCategory = null;\n\t\tif (includeCategoryMatches) {\n\t\t\tpromptCategory = PURPOSE_CATEGORIES.find((cat) =>\n\t\t\t\tcat.pattern.test(prompt)\n\t\t\t);\n\t\t\tcategoryTasks = promptCategory\n\t\t\t\t? this.tasks\n\t\t\t\t\t\t.filter(\n\t\t\t\t\t\t\t(t) =>\n\t\t\t\t\t\t\t\tpromptCategory.pattern.test(t.title) ||\n\t\t\t\t\t\t\t\tpromptCategory.pattern.test(t.description) ||\n\t\t\t\t\t\t\t\t(t.details && promptCategory.pattern.test(t.details))\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.slice(0, 3)\n\t\t\t\t: [];\n\t\t}\n\n\t\t// Combine all relevant tasks, prioritizing by relevance\n\t\tconst allRelevantTasks = [...highRelevance];\n\n\t\t// Add medium relevance if not already included\n\t\tfor (const task of mediumRelevance) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add low relevance if not already included\n\t\tfor (const task of lowRelevance) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add category tasks if not already included\n\t\tfor (const task of categoryTasks) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add recent tasks if not already included\n\t\tfor (const task of recentTasks) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Get top N results for final output\n\t\tconst finalResults = allRelevantTasks.slice(0, maxResults);\n\n\t\treturn {\n\t\t\tresults: finalResults,\n\t\t\tbreakdown: {\n\t\t\t\thighRelevance,\n\t\t\t\tmediumRelevance,\n\t\t\t\tlowRelevance,\n\t\t\t\tcategoryTasks,\n\t\t\t\trecentTasks,\n\t\t\t\tpromptCategory,\n\t\t\t\tpromptWords\n\t\t\t},\n\t\t\tmetadata: {\n\t\t\t\ttotalSearched: this.tasks.length,\n\t\t\t\tfuzzyMatches: fuzzyResults.length,\n\t\t\t\twordMatches: wordResults.length,\n\t\t\t\tfinalCount: finalResults.length\n\t\t\t}\n\t\t};\n\t}\n\n\t/**\n\t * Get task IDs from search results\n\t * @param {Object} searchResults - Results from findRelevantTasks\n\t * @returns {Array<string>} Array of task ID strings\n\t */\n\tgetTaskIds(searchResults) {\n\t\treturn searchResults.results.map((task) => task.id.toString());\n\t}\n\n\t/**\n\t * Get task IDs including subtasks from search results\n\t * @param {Object} searchResults - Results from findRelevantTasks\n\t * @param {boolean} [includeSubtasks=false] - Whether to include subtask IDs\n\t * @returns {Array<string>} Array of task and subtask ID strings\n\t */\n\tgetTaskIdsWithSubtasks(searchResults, includeSubtasks = false) {\n\t\tconst taskIds = [];\n\n\t\tfor (const task of searchResults.results) {\n\t\t\ttaskIds.push(task.id.toString());\n\n\t\t\tif (includeSubtasks && task.subtasks && task.subtasks.length > 0) {\n\t\t\t\tfor (const subtask of task.subtasks) {\n\t\t\t\t\ttaskIds.push(`${task.id}.${subtask.id}`);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn taskIds;\n\t}\n\n\t/**\n\t * Format search results for display\n\t * @param {Object} searchResults - Results from findRelevantTasks\n\t * @param {Object} options - Formatting options\n\t * @returns {string} Formatted search results summary\n\t */\n\tformatSearchSummary(searchResults, options = {}) {\n\t\tconst { includeScores = false, includeBreakdown = false } = options;\n\t\tconst { results, breakdown, metadata } = searchResults;\n\n\t\tlet summary = `Found ${results.length} relevant tasks from ${metadata.totalSearched} total tasks`;\n\n\t\tif (includeBreakdown && breakdown) {\n\t\t\tconst parts = [];\n\t\t\tif (breakdown.highRelevance.length > 0)\n\t\t\t\tparts.push(`${breakdown.highRelevance.length} high relevance`);\n\t\t\tif (breakdown.mediumRelevance.length > 0)\n\t\t\t\tparts.push(`${breakdown.mediumRelevance.length} medium relevance`);\n\t\t\tif (breakdown.lowRelevance.length > 0)\n\t\t\t\tparts.push(`${breakdown.lowRelevance.length} low relevance`);\n\t\t\tif (breakdown.categoryTasks.length > 0)\n\t\t\t\tparts.push(`${breakdown.categoryTasks.length} category matches`);\n\n\t\t\tif (parts.length > 0) {\n\t\t\t\tsummary += ` (${parts.join(', ')})`;\n\t\t\t}\n\n\t\t\tif (breakdown.promptCategory) {\n\t\t\t\tsummary += `\\nCategory detected: ${breakdown.promptCategory.label}`;\n\t\t\t}\n\t\t}\n\n\t\treturn summary;\n\t}\n}\n\n/**\n * Factory function to create a fuzzy search instance\n * @param {Array} tasks - Array of task objects\n * @param {string} [searchType='default'] - Type of search configuration to use\n * @returns {FuzzyTaskSearch} Fuzzy search instance\n */\nexport function createFuzzyTaskSearch(tasks, searchType = 'default') {\n\treturn new FuzzyTaskSearch(tasks, searchType);\n}\n\n/**\n * Quick utility function to find relevant task IDs for a prompt\n * @param {Array} tasks - Array of task objects\n * @param {string} prompt - Search prompt\n * @param {Object} options - Search options\n * @returns {Array<string>} Array of relevant task ID strings\n */\nexport function findRelevantTaskIds(tasks, prompt, options = {}) {\n\tconst {\n\t\tsearchType = 'default',\n\t\tmaxResults = 8,\n\t\tincludeSubtasks = false\n\t} = options;\n\n\tconst fuzzySearch = new FuzzyTaskSearch(tasks, searchType);\n\tconst results = fuzzySearch.findRelevantTasks(prompt, { maxResults });\n\n\treturn includeSubtasks\n\t\t? fuzzySearch.getTaskIdsWithSubtasks(results, true)\n\t\t: fuzzySearch.getTaskIds(results);\n}\n\nexport default FuzzyTaskSearch;\n"], ["/claude-task-master/mcp-server/src/tools/get-task.js", "/**\n * tools/get-task.js\n * Tool to get task details by ID\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { showTaskDirect } from '../core/task-master-core.js';\nimport {\n\tfindTasksPath,\n\tfindComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Custom processor function that removes allTasks from the response\n * @param {Object} data - The data returned from showTaskDirect\n * @returns {Object} - The processed data with allTasks removed\n */\nfunction processTaskResponse(data) {\n\tif (!data) return data;\n\n\t// If we have the expected structure with task and allTasks\n\tif (typeof data === 'object' && data !== null && data.id && data.title) {\n\t\t// If the data itself looks like the task object, return it\n\t\treturn data;\n\t} else if (data.task) {\n\t\treturn data.task;\n\t}\n\n\t// If structure is unexpected, return as is\n\treturn data;\n}\n\n/**\n * Register the get-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerShowTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'get_task',\n\t\tdescription: 'Get detailed information about a specific task',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'Task ID(s) to get (can be comma-separated for multiple tasks)'\n\t\t\t\t),\n\t\t\tstatus: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\"Filter subtasks by status (e.g., 'pending', 'done')\"),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file relative to project root'),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the project root directory (Optional, usually from session)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst { id, file, status, projectRoot } = args;\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Getting task details for ID: ${id}${status ? ` (filtering subtasks by status: ${status})` : ''} in root: ${projectRoot}`\n\t\t\t\t);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Resolve the path to tasks.json using the NORMALIZED projectRoot from args\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: projectRoot, file: file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function, passing the normalized projectRoot\n\t\t\t\t// Resolve the path to complexity report\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = findComplexityReportPath(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\t\tcomplexityReport: args.complexityReport,\n\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t}\n\t\t\t\tconst result = await showTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\t// Pass other relevant args\n\t\t\t\t\t\tid: id,\n\t\t\t\t\t\tstatus: status,\n\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully retrieved task details for ID: ${args.id}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to get task: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\t// Use our custom processor function\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error retrieving task details',\n\t\t\t\t\tprocessTaskResponse,\n\t\t\t\t\tprojectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in get-task tool: ${error.message}\\n${error.stack}`);\n\t\t\t\treturn createErrorResponse(`Failed to get task: ${error.message}`);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/utils/rule-transformer.js", "/**\n * Rule Transformer Module\n * Handles conversion of Cursor rules to profile rules\n *\n * This module procedurally generates .{profile}/rules files from assets/rules files,\n * eliminating the need to maintain both sets of files manually.\n */\nimport fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Import the shared MCP configuration helper\nimport {\n\tsetupMCPConfiguration,\n\tremoveTaskMasterMCPConfiguration\n} from './create-mcp-config.js';\n\n// Import profile constants (single source of truth)\nimport { RULE_PROFILES } from '../constants/profiles.js';\n\n// --- Profile Imports ---\nimport * as profilesModule from '../profiles/index.js';\n\nexport function isValidProfile(profile) {\n\treturn RULE_PROFILES.includes(profile);\n}\n\n/**\n * Get rule profile by name\n * @param {string} name - Profile name\n * @returns {Object|null} Profile object or null if not found\n */\nexport function getRulesProfile(name) {\n\tif (!isValidProfile(name)) {\n\t\treturn null;\n\t}\n\n\t// Get the profile from the imported profiles module\n\tconst profileKey = `${name}Profile`;\n\tconst profile = profilesModule[profileKey];\n\n\tif (!profile) {\n\t\tthrow new Error(\n\t\t\t`Profile not found: static import missing for '${name}'. Valid profiles: ${RULE_PROFILES.join(', ')}`\n\t\t);\n\t}\n\n\treturn profile;\n}\n\n/**\n * Replace basic Cursor terms with profile equivalents\n */\nfunction replaceBasicTerms(content, conversionConfig) {\n\tlet result = content;\n\n\t// Apply profile term replacements\n\tconversionConfig.profileTerms.forEach((pattern) => {\n\t\tif (typeof pattern.to === 'function') {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t} else {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t}\n\t});\n\n\t// Apply file extension replacements\n\tconversionConfig.fileExtensions.forEach((pattern) => {\n\t\tresult = result.replace(pattern.from, pattern.to);\n\t});\n\n\treturn result;\n}\n\n/**\n * Replace Cursor tool references with profile tool equivalents\n */\nfunction replaceToolReferences(content, conversionConfig) {\n\tlet result = content;\n\n\t// Basic pattern for direct tool name replacements\n\tconst toolNames = conversionConfig.toolNames;\n\tconst toolReferencePattern = new RegExp(\n\t\t`\\\\b(${Object.keys(toolNames).join('|')})\\\\b`,\n\t\t'g'\n\t);\n\n\t// Apply direct tool name replacements\n\tresult = result.replace(toolReferencePattern, (match, toolName) => {\n\t\treturn toolNames[toolName] || toolName;\n\t});\n\n\t// Apply contextual tool replacements\n\tconversionConfig.toolContexts.forEach((pattern) => {\n\t\tresult = result.replace(pattern.from, pattern.to);\n\t});\n\n\t// Apply tool group replacements\n\tconversionConfig.toolGroups.forEach((pattern) => {\n\t\tresult = result.replace(pattern.from, pattern.to);\n\t});\n\n\treturn result;\n}\n\n/**\n * Update documentation URLs to point to profile documentation\n */\nfunction updateDocReferences(content, conversionConfig) {\n\tlet result = content;\n\n\t// Apply documentation URL replacements\n\tconversionConfig.docUrls.forEach((pattern) => {\n\t\tif (typeof pattern.to === 'function') {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t} else {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t}\n\t});\n\n\treturn result;\n}\n\n/**\n * Update file references in markdown links\n */\nfunction updateFileReferences(content, conversionConfig) {\n\tconst { pathPattern, replacement } = conversionConfig.fileReferences;\n\treturn content.replace(pathPattern, replacement);\n}\n\n/**\n * Transform rule content to profile-specific rules\n * @param {string} content - The content to transform\n * @param {Object} conversionConfig - The conversion configuration\n * @param {Object} globalReplacements - Global text replacements\n * @returns {string} - The transformed content\n */\nfunction transformRuleContent(content, conversionConfig, globalReplacements) {\n\tlet result = content;\n\n\t// Apply all transformations in appropriate order\n\tresult = updateFileReferences(result, conversionConfig);\n\tresult = replaceBasicTerms(result, conversionConfig);\n\tresult = replaceToolReferences(result, conversionConfig);\n\tresult = updateDocReferences(result, conversionConfig);\n\n\t// Apply any global/catch-all replacements from the profile\n\t// Super aggressive failsafe pass to catch any variations we might have missed\n\t// This ensures critical transformations are applied even in contexts we didn't anticipate\n\tglobalReplacements.forEach((pattern) => {\n\t\tif (typeof pattern.to === 'function') {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t} else {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t}\n\t});\n\n\treturn result;\n}\n\n/**\n * Convert a Cursor rule file to a profile-specific rule file\n * @param {string} sourcePath - Path to the source .mdc file\n * @param {string} targetPath - Path to the target file\n * @param {Object} profile - The profile configuration\n * @returns {boolean} - Success status\n */\nexport function convertRuleToProfileRule(sourcePath, targetPath, profile) {\n\tconst { conversionConfig, globalReplacements } = profile;\n\ttry {\n\t\t// Read source content\n\t\tconst content = fs.readFileSync(sourcePath, 'utf8');\n\n\t\t// Transform content\n\t\tconst transformedContent = transformRuleContent(\n\t\t\tcontent,\n\t\t\tconversionConfig,\n\t\t\tglobalReplacements\n\t\t);\n\n\t\t// Ensure target directory exists\n\t\tconst targetDir = path.dirname(targetPath);\n\t\tif (!fs.existsSync(targetDir)) {\n\t\t\tfs.mkdirSync(targetDir, { recursive: true });\n\t\t}\n\n\t\t// Write transformed content\n\t\tfs.writeFileSync(targetPath, transformedContent);\n\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.error(`Error converting rule file: ${error.message}`);\n\t\treturn false;\n\t}\n}\n\n/**\n * Convert all Cursor rules to profile rules for a specific profile\n */\nexport function convertAllRulesToProfileRules(projectRoot, profile) {\n\tconst __filename = fileURLToPath(import.meta.url);\n\tconst __dirname = path.dirname(__filename);\n\tconst sourceDir = path.join(__dirname, '..', '..', 'assets', 'rules');\n\tconst targetDir = path.join(projectRoot, profile.rulesDir);\n\tconst assetsDir = path.join(__dirname, '..', '..', 'assets');\n\n\tlet success = 0;\n\tlet failed = 0;\n\n\t// 1. Call onAddRulesProfile first (for pre-processing like copying assets)\n\tif (typeof profile.onAddRulesProfile === 'function') {\n\t\ttry {\n\t\t\tprofile.onAddRulesProfile(projectRoot, assetsDir);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Rule Transformer] Called onAddRulesProfile for ${profile.profileName}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Rule Transformer] onAddRulesProfile failed for ${profile.profileName}: ${error.message}`\n\t\t\t);\n\t\t\tfailed++;\n\t\t}\n\t}\n\n\t// 2. Handle fileMap-based rule conversion (if any)\n\tconst sourceFiles = Object.keys(profile.fileMap);\n\tif (sourceFiles.length > 0) {\n\t\t// Only create rules directory if we have files to copy\n\t\tif (!fs.existsSync(targetDir)) {\n\t\t\tfs.mkdirSync(targetDir, { recursive: true });\n\t\t}\n\n\t\tfor (const sourceFile of sourceFiles) {\n\t\t\t// Determine if this is an asset file (not a rule file)\n\t\t\tconst isAssetFile = !sourceFile.startsWith('rules/');\n\n\t\t\ttry {\n\t\t\t\t// Use explicit path from fileMap - assets/ is the base directory\n\t\t\t\tconst sourcePath = path.join(assetsDir, sourceFile);\n\n\t\t\t\t// Check if source file exists\n\t\t\t\tif (!fs.existsSync(sourcePath)) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t`[Rule Transformer] Source file not found: ${sourcePath}, skipping`\n\t\t\t\t\t);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst targetFilename = profile.fileMap[sourceFile];\n\t\t\t\tconst targetPath = path.join(targetDir, targetFilename);\n\n\t\t\t\t// Ensure target subdirectory exists (for rules like taskmaster/dev_workflow.md)\n\t\t\t\tconst targetFileDir = path.dirname(targetPath);\n\t\t\t\tif (!fs.existsSync(targetFileDir)) {\n\t\t\t\t\tfs.mkdirSync(targetFileDir, { recursive: true });\n\t\t\t\t}\n\n\t\t\t\t// Read source content\n\t\t\t\tlet content = fs.readFileSync(sourcePath, 'utf8');\n\n\t\t\t\t// Apply transformations (only if this is a rule file, not an asset file)\n\t\t\t\tif (!isAssetFile) {\n\t\t\t\t\tcontent = transformRuleContent(\n\t\t\t\t\t\tcontent,\n\t\t\t\t\t\tprofile.conversionConfig,\n\t\t\t\t\t\tprofile.globalReplacements\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Write to target\n\t\t\t\tfs.writeFileSync(targetPath, content, 'utf8');\n\t\t\t\tsuccess++;\n\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] ${isAssetFile ? 'Copied' : 'Converted'} ${sourceFile} -> ${targetFilename} for ${profile.profileName}`\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tfailed++;\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`[Rule Transformer] Failed to ${isAssetFile ? 'copy' : 'convert'} ${sourceFile} for ${profile.profileName}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t// 3. Setup MCP configuration (if enabled)\n\tif (profile.mcpConfig !== false) {\n\t\ttry {\n\t\t\tsetupMCPConfiguration(projectRoot, profile.mcpConfigPath);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Rule Transformer] Setup MCP configuration for ${profile.profileName}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Rule Transformer] MCP setup failed for ${profile.profileName}: ${error.message}`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 4. Call post-conversion hook (for finalization)\n\tif (typeof profile.onPostConvertRulesProfile === 'function') {\n\t\ttry {\n\t\t\tprofile.onPostConvertRulesProfile(projectRoot, assetsDir);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Rule Transformer] Called onPostConvertRulesProfile for ${profile.profileName}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Rule Transformer] onPostConvertRulesProfile failed for ${profile.profileName}: ${error.message}`\n\t\t\t);\n\t\t}\n\t}\n\n\t// Ensure we return at least 1 success for profiles that only use lifecycle functions\n\treturn { success: Math.max(success, 1), failed };\n}\n\n/**\n * Remove only Task Master specific files from a profile, leaving other existing rules intact\n * @param {string} projectRoot - Target project directory\n * @param {Object} profile - Profile configuration\n * @returns {Object} Result object\n */\nexport function removeProfileRules(projectRoot, profile) {\n\tconst targetDir = path.join(projectRoot, profile.rulesDir);\n\tconst profileDir = path.join(projectRoot, profile.profileDir);\n\n\tconst result = {\n\t\tprofileName: profile.profileName,\n\t\tsuccess: false,\n\t\tskipped: false,\n\t\terror: null,\n\t\tfilesRemoved: [],\n\t\tmcpResult: null,\n\t\tprofileDirRemoved: false,\n\t\tnotice: null\n\t};\n\n\ttry {\n\t\t// 1. Call onRemoveRulesProfile first (for custom cleanup like removing assets)\n\t\tif (typeof profile.onRemoveRulesProfile === 'function') {\n\t\t\ttry {\n\t\t\t\tprofile.onRemoveRulesProfile(projectRoot);\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] Called onRemoveRulesProfile for ${profile.profileName}`\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`[Rule Transformer] onRemoveRulesProfile failed for ${profile.profileName}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// 2. Remove fileMap-based files (if any)\n\t\tconst sourceFiles = Object.keys(profile.fileMap);\n\t\tif (sourceFiles.length > 0) {\n\t\t\t// Check if profile directory exists at all (for full profiles)\n\t\t\tif (!fs.existsSync(profileDir)) {\n\t\t\t\tresult.success = true;\n\t\t\t\tresult.skipped = true;\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] Profile directory does not exist: ${profileDir}`\n\t\t\t\t);\n\t\t\t\treturn result;\n\t\t\t}\n\n\t\t\tlet hasOtherRulesFiles = false;\n\n\t\t\tif (fs.existsSync(targetDir)) {\n\t\t\t\t// Get list of files we're responsible for\n\t\t\t\tconst taskMasterFiles = sourceFiles.map(\n\t\t\t\t\t(sourceFile) => profile.fileMap[sourceFile]\n\t\t\t\t);\n\n\t\t\t\t// Get all files in the rules directory\n\t\t\t\tconst allFiles = fs.readdirSync(targetDir, { recursive: true });\n\t\t\t\tconst allFilePaths = allFiles\n\t\t\t\t\t.filter((file) => {\n\t\t\t\t\t\tconst fullPath = path.join(targetDir, file);\n\t\t\t\t\t\treturn fs.statSync(fullPath).isFile();\n\t\t\t\t\t})\n\t\t\t\t\t.map((file) => file.toString()); // Ensure it's a string\n\n\t\t\t\t// Remove only Task Master files\n\t\t\t\tfor (const taskMasterFile of taskMasterFiles) {\n\t\t\t\t\tconst filePath = path.join(targetDir, taskMasterFile);\n\t\t\t\t\tif (fs.existsSync(filePath)) {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tfs.rmSync(filePath, { force: true });\n\t\t\t\t\t\t\tresult.filesRemoved.push(taskMasterFile);\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t\t\t`[Rule Transformer] Removed Task Master file: ${taskMasterFile}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'error',\n\t\t\t\t\t\t\t\t`[Rule Transformer] Failed to remove ${taskMasterFile}: ${error.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Check for other (non-Task Master) files\n\t\t\t\tconst remainingFiles = allFilePaths.filter(\n\t\t\t\t\t(file) => !taskMasterFiles.includes(file)\n\t\t\t\t);\n\n\t\t\t\thasOtherRulesFiles = remainingFiles.length > 0;\n\n\t\t\t\t// Remove empty directories or note preserved files\n\t\t\t\tif (remainingFiles.length === 0) {\n\t\t\t\t\tfs.rmSync(targetDir, { recursive: true, force: true });\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`[Rule Transformer] Removed empty rules directory: ${targetDir}`\n\t\t\t\t\t);\n\t\t\t\t} else if (hasOtherRulesFiles) {\n\t\t\t\t\tresult.notice = `Preserved ${remainingFiles.length} existing rule files in ${profile.rulesDir}`;\n\t\t\t\t\tlog('info', `[Rule Transformer] ${result.notice}`);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// 3. Handle MCP configuration - only remove Task Master, preserve other servers\n\t\tif (profile.mcpConfig !== false) {\n\t\t\ttry {\n\t\t\t\tresult.mcpResult = removeTaskMasterMCPConfiguration(\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tprofile.mcpConfigPath\n\t\t\t\t);\n\t\t\t\tif (result.mcpResult.hasOtherServers) {\n\t\t\t\t\tif (!result.notice) {\n\t\t\t\t\t\tresult.notice = 'Preserved other MCP server configurations';\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresult.notice += '; preserved other MCP server configurations';\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] Processed MCP configuration for ${profile.profileName}`\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`[Rule Transformer] MCP cleanup failed for ${profile.profileName}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// 4. Check if we should remove the entire profile directory\n\t\tif (fs.existsSync(profileDir)) {\n\t\t\tconst remainingContents = fs.readdirSync(profileDir);\n\t\t\tif (remainingContents.length === 0 && profile.profileDir !== '.') {\n\t\t\t\t// Only remove profile directory if it's empty and not root directory\n\t\t\t\ttry {\n\t\t\t\t\tfs.rmSync(profileDir, { recursive: true, force: true });\n\t\t\t\t\tresult.profileDirRemoved = true;\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`[Rule Transformer] Removed empty profile directory: ${profileDir}`\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'error',\n\t\t\t\t\t\t`[Rule Transformer] Failed to remove profile directory ${profileDir}: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else if (remainingContents.length > 0) {\n\t\t\t\t// Profile directory has remaining files/folders, add notice\n\t\t\t\tconst preservedNotice = `Preserved ${remainingContents.length} existing files/folders in ${profile.profileDir}`;\n\t\t\t\tif (!result.notice) {\n\t\t\t\t\tresult.notice = preservedNotice;\n\t\t\t\t} else {\n\t\t\t\t\tresult.notice += `; ${preservedNotice.toLowerCase()}`;\n\t\t\t\t}\n\t\t\t\tlog('info', `[Rule Transformer] ${preservedNotice}`);\n\t\t\t}\n\t\t}\n\n\t\tresult.success = true;\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`[Rule Transformer] Successfully removed ${profile.profileName} Task Master files from ${projectRoot}`\n\t\t);\n\t} catch (error) {\n\t\tresult.error = error.message;\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Rule Transformer] Failed to remove ${profile.profileName} rules: ${error.message}`\n\t\t);\n\t}\n\n\treturn result;\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/remove-subtask.js", "/**\n * Direct function wrapper for removeSubtask\n */\n\nimport { removeSubtask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Remove a subtask from its parent task\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Subtask ID in format \"parentId.subtaskId\" (required)\n * @param {boolean} [args.convert] - Whether to convert the subtask to a standalone task\n * @param {boolean} [args.skipGenerate] - Skip regenerating task files\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function removeSubtaskDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, convert, skipGenerate, projectRoot, tag } = args;\n\ttry {\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tlog.info(`Removing subtask with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('removeSubtaskDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!id) {\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Subtask ID is required and must be in format \"parentId.subtaskId\"'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate subtask ID format\n\t\tif (!id.includes('.')) {\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: `Invalid subtask ID format: ${id}. Expected format: \"parentId.subtaskId\"`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Convert convertToTask to a boolean\n\t\tconst convertToTask = convert === true;\n\n\t\t// Determine if we should generate files\n\t\tconst generateFiles = !skipGenerate;\n\n\t\tlog.info(\n\t\t\t`Removing subtask ${id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`\n\t\t);\n\n\t\t// Use the provided tasksPath\n\t\tconst result = await removeSubtask(\n\t\t\ttasksPath,\n\t\t\tid,\n\t\t\tconvertToTask,\n\t\t\tgenerateFiles,\n\t\t\t{\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t}\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\tif (convertToTask && result) {\n\t\t\t// Return info about the converted task\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Subtask ${id} successfully converted to task #${result.id}`,\n\t\t\t\t\ttask: result\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Return simple success message for deletion\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Subtask ${id} successfully removed`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled even if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in removeSubtaskDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-dependency.js", "/**\n * add-dependency.js\n * Direct function implementation for adding a dependency to a task\n */\n\nimport { addDependency } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for addDependency with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string|number} args.id - Task ID to add dependency to\n * @param {string|number} args.dependsOn - Task ID that will become a dependency\n * @param {string} args.tag - Tag for the task (optional)\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Result object with success status and data/error information\n */\nexport async function addDependencyDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, dependsOn, tag, projectRoot } = args;\n\ttry {\n\t\tlog.info(`Adding dependency with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addDependencyDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate required parameters\n\t\tif (!id) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Task ID (id) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!dependsOn) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Dependency ID (dependsOn) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Format IDs for the core function\n\t\tconst taskId =\n\t\t\tid && id.includes && id.includes('.') ? id : parseInt(id, 10);\n\t\tconst dependencyId =\n\t\t\tdependsOn && dependsOn.includes && dependsOn.includes('.')\n\t\t\t\t? dependsOn\n\t\t\t\t: parseInt(dependsOn, 10);\n\n\t\tlog.info(\n\t\t\t`Adding dependency: task ${taskId} will depend on ${dependencyId}`\n\t\t);\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\t// Create context object\n\t\tconst context = { projectRoot, tag };\n\n\t\t// Call the core function using the provided path\n\t\tawait addDependency(tasksPath, taskId, dependencyId, context);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,\n\t\t\t\ttaskId: taskId,\n\t\t\t\tdependencyId: dependencyId\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addDependencyDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/language-model.js", "/**\n * @fileoverview Claude Code Language Model implementation\n */\n\nimport { NoSuchModelError } from '@ai-sdk/provider';\nimport { generateId } from '@ai-sdk/provider-utils';\nimport { convertToClaudeCodeMessages } from './message-converter.js';\nimport { extractJson } from './json-extractor.js';\nimport { createAPICallError, createAuthenticationError } from './errors.js';\n\nlet query;\nlet AbortError;\n\nasync function loadClaudeCodeModule() {\n\tif (!query || !AbortError) {\n\t\ttry {\n\t\t\tconst mod = await import('@anthropic-ai/claude-code');\n\t\t\tquery = mod.query;\n\t\t\tAbortError = mod.AbortError;\n\t\t} catch (err) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Claude Code SDK is not installed. Please install '@anthropic-ai/claude-code' to use the claude-code provider.\"\n\t\t\t);\n\t\t}\n\t}\n}\n\n/**\n * @typedef {import('./types.js').ClaudeCodeSettings} ClaudeCodeSettings\n * @typedef {import('./types.js').ClaudeCodeModelId} ClaudeCodeModelId\n * @typedef {import('./types.js').ClaudeCodeLanguageModelOptions} ClaudeCodeLanguageModelOptions\n */\n\nconst modelMap = {\n\topus: 'opus',\n\tsonnet: 'sonnet'\n};\n\nexport class ClaudeCodeLanguageModel {\n\tspecificationVersion = 'v1';\n\tdefaultObjectGenerationMode = 'json';\n\tsupportsImageUrls = false;\n\tsupportsStructuredOutputs = false;\n\n\t/** @type {ClaudeCodeModelId} */\n\tmodelId;\n\n\t/** @type {ClaudeCodeSettings} */\n\tsettings;\n\n\t/** @type {string|undefined} */\n\tsessionId;\n\n\t/**\n\t * @param {ClaudeCodeLanguageModelOptions} options\n\t */\n\tconstructor(options) {\n\t\tthis.modelId = options.id;\n\t\tthis.settings = options.settings ?? {};\n\n\t\t// Validate model ID format\n\t\tif (\n\t\t\t!this.modelId ||\n\t\t\ttypeof this.modelId !== 'string' ||\n\t\t\tthis.modelId.trim() === ''\n\t\t) {\n\t\t\tthrow new NoSuchModelError({\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\tmodelType: 'languageModel'\n\t\t\t});\n\t\t}\n\t}\n\n\tget provider() {\n\t\treturn 'claude-code';\n\t}\n\n\t/**\n\t * Get the model name for Claude Code CLI\n\t * @returns {string}\n\t */\n\tgetModel() {\n\t\tconst mapped = modelMap[this.modelId];\n\t\treturn mapped ?? this.modelId;\n\t}\n\n\t/**\n\t * Generate unsupported parameter warnings\n\t * @param {Object} options - Generation options\n\t * @returns {Array} Warnings array\n\t */\n\tgenerateUnsupportedWarnings(options) {\n\t\tconst warnings = [];\n\t\tconst unsupportedParams = [];\n\n\t\t// Check for unsupported parameters\n\t\tif (options.temperature !== undefined)\n\t\t\tunsupportedParams.push('temperature');\n\t\tif (options.maxTokens !== undefined) unsupportedParams.push('maxTokens');\n\t\tif (options.topP !== undefined) unsupportedParams.push('topP');\n\t\tif (options.topK !== undefined) unsupportedParams.push('topK');\n\t\tif (options.presencePenalty !== undefined)\n\t\t\tunsupportedParams.push('presencePenalty');\n\t\tif (options.frequencyPenalty !== undefined)\n\t\t\tunsupportedParams.push('frequencyPenalty');\n\t\tif (options.stopSequences !== undefined && options.stopSequences.length > 0)\n\t\t\tunsupportedParams.push('stopSequences');\n\t\tif (options.seed !== undefined) unsupportedParams.push('seed');\n\n\t\tif (unsupportedParams.length > 0) {\n\t\t\t// Add a warning for each unsupported parameter\n\t\t\tfor (const param of unsupportedParams) {\n\t\t\t\twarnings.push({\n\t\t\t\t\ttype: 'unsupported-setting',\n\t\t\t\t\tsetting: param,\n\t\t\t\t\tdetails: `Claude Code CLI does not support the ${param} parameter. It will be ignored.`\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\treturn warnings;\n\t}\n\n\t/**\n\t * Generate text using Claude Code\n\t * @param {Object} options - Generation options\n\t * @returns {Promise<Object>}\n\t */\n\tasync doGenerate(options) {\n\t\tawait loadClaudeCodeModule();\n\t\tconst { messagesPrompt } = convertToClaudeCodeMessages(\n\t\t\toptions.prompt,\n\t\t\toptions.mode\n\t\t);\n\n\t\tconst abortController = new AbortController();\n\t\tif (options.abortSignal) {\n\t\t\toptions.abortSignal.addEventListener('abort', () =>\n\t\t\t\tabortController.abort()\n\t\t\t);\n\t\t}\n\n\t\tconst queryOptions = {\n\t\t\tmodel: this.getModel(),\n\t\t\tabortController,\n\t\t\tresume: this.sessionId,\n\t\t\tpathToClaudeCodeExecutable: this.settings.pathToClaudeCodeExecutable,\n\t\t\tcustomSystemPrompt: this.settings.customSystemPrompt,\n\t\t\tappendSystemPrompt: this.settings.appendSystemPrompt,\n\t\t\tmaxTurns: this.settings.maxTurns,\n\t\t\tmaxThinkingTokens: this.settings.maxThinkingTokens,\n\t\t\tcwd: this.settings.cwd,\n\t\t\texecutable: this.settings.executable,\n\t\t\texecutableArgs: this.settings.executableArgs,\n\t\t\tpermissionMode: this.settings.permissionMode,\n\t\t\tpermissionPromptToolName: this.settings.permissionPromptToolName,\n\t\t\tcontinue: this.settings.continue,\n\t\t\tallowedTools: this.settings.allowedTools,\n\t\t\tdisallowedTools: this.settings.disallowedTools,\n\t\t\tmcpServers: this.settings.mcpServers\n\t\t};\n\n\t\tlet text = '';\n\t\tlet usage = { promptTokens: 0, completionTokens: 0 };\n\t\tlet finishReason = 'stop';\n\t\tlet costUsd;\n\t\tlet durationMs;\n\t\tlet rawUsage;\n\t\tconst warnings = this.generateUnsupportedWarnings(options);\n\n\t\ttry {\n\t\t\tconst response = query({\n\t\t\t\tprompt: messagesPrompt,\n\t\t\t\toptions: queryOptions\n\t\t\t});\n\n\t\t\tfor await (const message of response) {\n\t\t\t\tif (message.type === 'assistant') {\n\t\t\t\t\ttext += message.message.content\n\t\t\t\t\t\t.map((c) => (c.type === 'text' ? c.text : ''))\n\t\t\t\t\t\t.join('');\n\t\t\t\t} else if (message.type === 'result') {\n\t\t\t\t\tthis.sessionId = message.session_id;\n\t\t\t\t\tcostUsd = message.total_cost_usd;\n\t\t\t\t\tdurationMs = message.duration_ms;\n\n\t\t\t\t\tif ('usage' in message) {\n\t\t\t\t\t\trawUsage = message.usage;\n\t\t\t\t\t\tusage = {\n\t\t\t\t\t\t\tpromptTokens:\n\t\t\t\t\t\t\t\t(message.usage.cache_creation_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t(message.usage.cache_read_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t(message.usage.input_tokens ?? 0),\n\t\t\t\t\t\t\tcompletionTokens: message.usage.output_tokens ?? 0\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\n\t\t\t\t\tif (message.subtype === 'error_max_turns') {\n\t\t\t\t\t\tfinishReason = 'length';\n\t\t\t\t\t} else if (message.subtype === 'error_during_execution') {\n\t\t\t\t\t\tfinishReason = 'error';\n\t\t\t\t\t}\n\t\t\t\t} else if (message.type === 'system' && message.subtype === 'init') {\n\t\t\t\t\tthis.sessionId = message.session_id;\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// -------------------------------------------------------------\n\t\t\t// Work-around for Claude-Code CLI/SDK JSON truncation bug (#913)\n\t\t\t// -------------------------------------------------------------\n\t\t\t// If the SDK throws a JSON SyntaxError *but* we already hold some\n\t\t\t// buffered text, assume the response was truncated by the CLI.\n\t\t\t// We keep the accumulated text, mark the finish reason, push a\n\t\t\t// provider-warning and *skip* the normal error handling so Task\n\t\t\t// Master can continue processing.\n\t\t\tconst isJsonTruncation =\n\t\t\t\terror instanceof SyntaxError &&\n\t\t\t\t/JSON/i.test(error.message || '') &&\n\t\t\t\t(error.message.includes('position') ||\n\t\t\t\t\terror.message.includes('Unexpected end'));\n\t\t\tif (isJsonTruncation && text && text.length > 0) {\n\t\t\t\twarnings.push({\n\t\t\t\t\ttype: 'provider-warning',\n\t\t\t\t\tdetails:\n\t\t\t\t\t\t'Claude Code SDK emitted a JSON parse error but Task Master recovered buffered text (possible CLI truncation).'\n\t\t\t\t});\n\t\t\t\tfinishReason = 'truncated';\n\t\t\t\t// Skip re-throwing: fall through so the caller receives usable data\n\t\t\t} else {\n\t\t\t\tif (error instanceof AbortError) {\n\t\t\t\t\tthrow options.abortSignal?.aborted\n\t\t\t\t\t\t? options.abortSignal.reason\n\t\t\t\t\t\t: error;\n\t\t\t\t}\n\n\t\t\t\t// Check for authentication errors\n\t\t\t\tif (\n\t\t\t\t\terror.message?.includes('not logged in') ||\n\t\t\t\t\terror.message?.includes('authentication') ||\n\t\t\t\t\terror.exitCode === 401\n\t\t\t\t) {\n\t\t\t\t\tthrow createAuthenticationError({\n\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\terror.message ||\n\t\t\t\t\t\t\t'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Wrap other errors with API call error\n\t\t\t\tthrow createAPICallError({\n\t\t\t\t\tmessage: error.message || 'Claude Code CLI error',\n\t\t\t\t\tcode: error.code,\n\t\t\t\t\texitCode: error.exitCode,\n\t\t\t\t\tstderr: error.stderr,\n\t\t\t\t\tpromptExcerpt: messagesPrompt.substring(0, 200),\n\t\t\t\t\tisRetryable: error.code === 'ENOENT' || error.code === 'ECONNREFUSED'\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\t// Extract JSON if in object-json mode\n\t\tif (options.mode?.type === 'object-json' && text) {\n\t\t\ttext = extractJson(text);\n\t\t}\n\n\t\treturn {\n\t\t\ttext: text || undefined,\n\t\t\tusage,\n\t\t\tfinishReason,\n\t\t\trawCall: {\n\t\t\t\trawPrompt: messagesPrompt,\n\t\t\t\trawSettings: queryOptions\n\t\t\t},\n\t\t\twarnings: warnings.length > 0 ? warnings : undefined,\n\t\t\tresponse: {\n\t\t\t\tid: generateId(),\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId\n\t\t\t},\n\t\t\trequest: {\n\t\t\t\tbody: messagesPrompt\n\t\t\t},\n\t\t\tproviderMetadata: {\n\t\t\t\t'claude-code': {\n\t\t\t\t\t...(this.sessionId !== undefined && { sessionId: this.sessionId }),\n\t\t\t\t\t...(costUsd !== undefined && { costUsd }),\n\t\t\t\t\t...(durationMs !== undefined && { durationMs }),\n\t\t\t\t\t...(rawUsage !== undefined && { rawUsage })\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\t}\n\n\t/**\n\t * Stream text using Claude Code\n\t * @param {Object} options - Stream options\n\t * @returns {Promise<Object>}\n\t */\n\tasync doStream(options) {\n\t\tawait loadClaudeCodeModule();\n\t\tconst { messagesPrompt } = convertToClaudeCodeMessages(\n\t\t\toptions.prompt,\n\t\t\toptions.mode\n\t\t);\n\n\t\tconst abortController = new AbortController();\n\t\tif (options.abortSignal) {\n\t\t\toptions.abortSignal.addEventListener('abort', () =>\n\t\t\t\tabortController.abort()\n\t\t\t);\n\t\t}\n\n\t\tconst queryOptions = {\n\t\t\tmodel: this.getModel(),\n\t\t\tabortController,\n\t\t\tresume: this.sessionId,\n\t\t\tpathToClaudeCodeExecutable: this.settings.pathToClaudeCodeExecutable,\n\t\t\tcustomSystemPrompt: this.settings.customSystemPrompt,\n\t\t\tappendSystemPrompt: this.settings.appendSystemPrompt,\n\t\t\tmaxTurns: this.settings.maxTurns,\n\t\t\tmaxThinkingTokens: this.settings.maxThinkingTokens,\n\t\t\tcwd: this.settings.cwd,\n\t\t\texecutable: this.settings.executable,\n\t\t\texecutableArgs: this.settings.executableArgs,\n\t\t\tpermissionMode: this.settings.permissionMode,\n\t\t\tpermissionPromptToolName: this.settings.permissionPromptToolName,\n\t\t\tcontinue: this.settings.continue,\n\t\t\tallowedTools: this.settings.allowedTools,\n\t\t\tdisallowedTools: this.settings.disallowedTools,\n\t\t\tmcpServers: this.settings.mcpServers\n\t\t};\n\n\t\tconst warnings = this.generateUnsupportedWarnings(options);\n\n\t\tconst stream = new ReadableStream({\n\t\t\tstart: async (controller) => {\n\t\t\t\ttry {\n\t\t\t\t\tconst response = query({\n\t\t\t\t\t\tprompt: messagesPrompt,\n\t\t\t\t\t\toptions: queryOptions\n\t\t\t\t\t});\n\n\t\t\t\t\tlet usage = { promptTokens: 0, completionTokens: 0 };\n\t\t\t\t\tlet accumulatedText = '';\n\n\t\t\t\t\tfor await (const message of response) {\n\t\t\t\t\t\tif (message.type === 'assistant') {\n\t\t\t\t\t\t\tconst text = message.message.content\n\t\t\t\t\t\t\t\t.map((c) => (c.type === 'text' ? c.text : ''))\n\t\t\t\t\t\t\t\t.join('');\n\n\t\t\t\t\t\t\tif (text) {\n\t\t\t\t\t\t\t\taccumulatedText += text;\n\n\t\t\t\t\t\t\t\t// In object-json mode, we need to accumulate the full text\n\t\t\t\t\t\t\t\t// and extract JSON at the end, so don't stream individual deltas\n\t\t\t\t\t\t\t\tif (options.mode?.type !== 'object-json') {\n\t\t\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\t\t\ttype: 'text-delta',\n\t\t\t\t\t\t\t\t\t\ttextDelta: text\n\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if (message.type === 'result') {\n\t\t\t\t\t\t\tlet rawUsage;\n\t\t\t\t\t\t\tif ('usage' in message) {\n\t\t\t\t\t\t\t\trawUsage = message.usage;\n\t\t\t\t\t\t\t\tusage = {\n\t\t\t\t\t\t\t\t\tpromptTokens:\n\t\t\t\t\t\t\t\t\t\t(message.usage.cache_creation_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t\t\t(message.usage.cache_read_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t\t\t(message.usage.input_tokens ?? 0),\n\t\t\t\t\t\t\t\t\tcompletionTokens: message.usage.output_tokens ?? 0\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tlet finishReason = 'stop';\n\t\t\t\t\t\t\tif (message.subtype === 'error_max_turns') {\n\t\t\t\t\t\t\t\tfinishReason = 'length';\n\t\t\t\t\t\t\t} else if (message.subtype === 'error_during_execution') {\n\t\t\t\t\t\t\t\tfinishReason = 'error';\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Store session ID in the model instance\n\t\t\t\t\t\t\tthis.sessionId = message.session_id;\n\n\t\t\t\t\t\t\t// In object-json mode, extract JSON and send the full text at once\n\t\t\t\t\t\t\tif (options.mode?.type === 'object-json' && accumulatedText) {\n\t\t\t\t\t\t\t\tconst extractedJson = extractJson(accumulatedText);\n\t\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\t\ttype: 'text-delta',\n\t\t\t\t\t\t\t\t\ttextDelta: extractedJson\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\ttype: 'finish',\n\t\t\t\t\t\t\t\tfinishReason,\n\t\t\t\t\t\t\t\tusage,\n\t\t\t\t\t\t\t\tproviderMetadata: {\n\t\t\t\t\t\t\t\t\t'claude-code': {\n\t\t\t\t\t\t\t\t\t\tsessionId: message.session_id,\n\t\t\t\t\t\t\t\t\t\t...(message.total_cost_usd !== undefined && {\n\t\t\t\t\t\t\t\t\t\t\tcostUsd: message.total_cost_usd\n\t\t\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\t\t\t...(message.duration_ms !== undefined && {\n\t\t\t\t\t\t\t\t\t\t\tdurationMs: message.duration_ms\n\t\t\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\t\t\t...(rawUsage !== undefined && { rawUsage })\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t} else if (\n\t\t\t\t\t\t\tmessage.type === 'system' &&\n\t\t\t\t\t\t\tmessage.subtype === 'init'\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t// Store session ID for future use\n\t\t\t\t\t\t\tthis.sessionId = message.session_id;\n\n\t\t\t\t\t\t\t// Emit response metadata when session is initialized\n\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\ttype: 'response-metadata',\n\t\t\t\t\t\t\t\tid: message.session_id,\n\t\t\t\t\t\t\t\ttimestamp: new Date(),\n\t\t\t\t\t\t\t\tmodelId: this.modelId\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// -------------------------------------------------------------\n\t\t\t\t\t// Work-around for Claude-Code CLI/SDK JSON truncation bug (#913)\n\t\t\t\t\t// -------------------------------------------------------------\n\t\t\t\t\t// If we hit the SDK JSON SyntaxError but have buffered text, finalize\n\t\t\t\t\t// the stream gracefully instead of emitting an error.\n\t\t\t\t\tconst isJsonTruncation =\n\t\t\t\t\t\terror instanceof SyntaxError &&\n\t\t\t\t\t\t/JSON/i.test(error.message || '') &&\n\t\t\t\t\t\t(error.message.includes('position') ||\n\t\t\t\t\t\t\terror.message.includes('Unexpected end'));\n\n\t\t\t\t\tif (\n\t\t\t\t\t\tisJsonTruncation &&\n\t\t\t\t\t\taccumulatedText &&\n\t\t\t\t\t\taccumulatedText.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\t// Prepare final text payload\n\t\t\t\t\t\tconst finalText =\n\t\t\t\t\t\t\toptions.mode?.type === 'object-json'\n\t\t\t\t\t\t\t\t? extractJson(accumulatedText)\n\t\t\t\t\t\t\t\t: accumulatedText;\n\n\t\t\t\t\t\t// Emit any remaining text\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: 'text-delta',\n\t\t\t\t\t\t\ttextDelta: finalText\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\t// Emit finish with truncated reason and warning\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: 'finish',\n\t\t\t\t\t\t\tfinishReason: 'truncated',\n\t\t\t\t\t\t\tusage,\n\t\t\t\t\t\t\tproviderMetadata: { 'claude-code': { truncated: true } },\n\t\t\t\t\t\t\twarnings: [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\ttype: 'provider-warning',\n\t\t\t\t\t\t\t\t\tdetails:\n\t\t\t\t\t\t\t\t\t\t'Claude Code SDK JSON truncation detected; stream recovered.'\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tcontroller.close();\n\t\t\t\t\t\treturn; // Skip normal error path\n\t\t\t\t\t}\n\n\t\t\t\t\tcontroller.close();\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlet errorToEmit;\n\n\t\t\t\t\tif (error instanceof AbortError) {\n\t\t\t\t\t\terrorToEmit = options.abortSignal?.aborted\n\t\t\t\t\t\t\t? options.abortSignal.reason\n\t\t\t\t\t\t\t: error;\n\t\t\t\t\t} else if (\n\t\t\t\t\t\terror.message?.includes('not logged in') ||\n\t\t\t\t\t\terror.message?.includes('authentication') ||\n\t\t\t\t\t\terror.exitCode === 401\n\t\t\t\t\t) {\n\t\t\t\t\t\terrorToEmit = createAuthenticationError({\n\t\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\t\terror.message ||\n\t\t\t\t\t\t\t\t'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'\n\t\t\t\t\t\t});\n\t\t\t\t\t} else {\n\t\t\t\t\t\terrorToEmit = createAPICallError({\n\t\t\t\t\t\t\tmessage: error.message || 'Claude Code CLI error',\n\t\t\t\t\t\t\tcode: error.code,\n\t\t\t\t\t\t\texitCode: error.exitCode,\n\t\t\t\t\t\t\tstderr: error.stderr,\n\t\t\t\t\t\t\tpromptExcerpt: messagesPrompt.substring(0, 200),\n\t\t\t\t\t\t\tisRetryable:\n\t\t\t\t\t\t\t\terror.code === 'ENOENT' || error.code === 'ECONNREFUSED'\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\n\t\t\t\t\t// Emit error as a stream part\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: 'error',\n\t\t\t\t\t\terror: errorToEmit\n\t\t\t\t\t});\n\n\t\t\t\t\tcontroller.close();\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\treturn {\n\t\t\tstream,\n\t\t\trawCall: {\n\t\t\t\trawPrompt: messagesPrompt,\n\t\t\t\trawSettings: queryOptions\n\t\t\t},\n\t\t\twarnings: warnings.length > 0 ? warnings : undefined,\n\t\t\trequest: {\n\t\t\t\tbody: messagesPrompt\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/src/utils/profiles.js", "/**\n * Profiles Utility\n * Consolidated utilities for profile detection, setup, and summary generation\n */\nimport fs from 'fs';\nimport path from 'path';\nimport inquirer from 'inquirer';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport { log } from '../../scripts/modules/utils.js';\nimport { getRulesProfile } from './rule-transformer.js';\nimport { RULE_PROFILES } from '../constants/profiles.js';\n\n// =============================================================================\n// PROFILE DETECTION\n// =============================================================================\n\n/**\n * Get the display name for a profile\n * @param {string} profileName - The profile name\n * @returns {string} - The display name\n */\nexport function getProfileDisplayName(profileName) {\n\ttry {\n\t\tconst profile = getRulesProfile(profileName);\n\t\treturn profile.displayName || profileName;\n\t} catch (error) {\n\t\treturn profileName;\n\t}\n}\n\n/**\n * Get installed profiles in the project directory\n * @param {string} projectRoot - Project directory path\n * @returns {string[]} - Array of installed profile names\n */\nexport function getInstalledProfiles(projectRoot) {\n\tconst installedProfiles = [];\n\n\tfor (const profileName of RULE_PROFILES) {\n\t\ttry {\n\t\t\tconst profile = getRulesProfile(profileName);\n\t\t\tconst profileDir = path.join(projectRoot, profile.profileDir);\n\n\t\t\t// Check if profile directory exists (skip root directory check)\n\t\t\tif (profile.profileDir === '.' || fs.existsSync(profileDir)) {\n\t\t\t\t// Check if any files from the profile's fileMap exist\n\t\t\t\tconst rulesDir = path.join(projectRoot, profile.rulesDir);\n\t\t\t\tif (fs.existsSync(rulesDir)) {\n\t\t\t\t\tconst ruleFiles = Object.values(profile.fileMap);\n\t\t\t\t\tconst hasRuleFiles = ruleFiles.some((ruleFile) =>\n\t\t\t\t\t\tfs.existsSync(path.join(rulesDir, ruleFile))\n\t\t\t\t\t);\n\t\t\t\t\tif (hasRuleFiles) {\n\t\t\t\t\t\tinstalledProfiles.push(profileName);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Skip profiles that can't be loaded\n\t\t}\n\t}\n\n\treturn installedProfiles;\n}\n\n/**\n * Check if removing specified profiles would leave no profiles installed\n * @param {string} projectRoot - Project root directory\n * @param {string[]} profilesToRemove - Array of profile names to remove\n * @returns {boolean} - True if removal would leave no profiles\n */\nexport function wouldRemovalLeaveNoProfiles(projectRoot, profilesToRemove) {\n\tconst installedProfiles = getInstalledProfiles(projectRoot);\n\n\t// If no profiles are currently installed, removal cannot leave no profiles\n\tif (installedProfiles.length === 0) {\n\t\treturn false;\n\t}\n\n\tconst remainingProfiles = installedProfiles.filter(\n\t\t(profile) => !profilesToRemove.includes(profile)\n\t);\n\treturn remainingProfiles.length === 0;\n}\n\n// =============================================================================\n// PROFILE SETUP\n// =============================================================================\n\n// Note: Profile choices are now generated dynamically within runInteractiveProfilesSetup()\n// to ensure proper alphabetical sorting and pagination configuration\n\n/**\n * Launches an interactive prompt for selecting which rule profiles to include in your project.\n *\n * This function dynamically lists all available profiles (from RULE_PROFILES) and presents them as checkboxes.\n * The user must select at least one profile (no defaults are pre-selected). The result is an array of selected profile names.\n *\n * Used by both project initialization (init) and the CLI 'task-master rules setup' command.\n *\n * @returns {Promise<string[]>} Array of selected profile names (e.g., ['cursor', 'windsurf'])\n */\nexport async function runInteractiveProfilesSetup() {\n\t// Generate the profile list dynamically with proper display names, alphabetized\n\tconst profileDescriptions = RULE_PROFILES.map((profileName) => {\n\t\tconst displayName = getProfileDisplayName(profileName);\n\t\tconst profile = getRulesProfile(profileName);\n\n\t\t// Determine description based on profile capabilities\n\t\tlet description;\n\t\tconst hasRules = Object.keys(profile.fileMap).length > 0;\n\t\tconst hasMcpConfig = profile.mcpConfig === true;\n\n\t\tif (!profile.includeDefaultRules) {\n\t\t\t// Integration guide profiles (claude, codex, gemini, opencode, zed, amp) - don't include standard coding rules\n\t\t\tif (profileName === 'claude') {\n\t\t\t\tdescription = 'Integration guide with Task Master slash commands';\n\t\t\t} else if (profileName === 'codex') {\n\t\t\t\tdescription = 'Comprehensive Task Master integration guide';\n\t\t\t} else if (hasMcpConfig) {\n\t\t\t\tdescription = 'Integration guide and MCP config';\n\t\t\t} else {\n\t\t\t\tdescription = 'Integration guide';\n\t\t\t}\n\t\t} else if (hasRules && hasMcpConfig) {\n\t\t\t// Full rule profiles with MCP config\n\t\t\tif (profileName === 'roo') {\n\t\t\t\tdescription = 'Rule profile, MCP config, and agent modes';\n\t\t\t} else {\n\t\t\t\tdescription = 'Rule profile and MCP config';\n\t\t\t}\n\t\t} else if (hasRules) {\n\t\t\t// Rule profiles without MCP config\n\t\t\tdescription = 'Rule profile';\n\t\t}\n\n\t\treturn {\n\t\t\tprofileName,\n\t\t\tdisplayName,\n\t\t\tdescription\n\t\t};\n\t}).sort((a, b) => a.displayName.localeCompare(b.displayName));\n\n\tconst profileListText = profileDescriptions\n\t\t.map(\n\t\t\t({ displayName, description }) =>\n\t\t\t\t`${chalk.white('• ')}${chalk.yellow(displayName)}${chalk.white(` - ${description}`)}`\n\t\t)\n\t\t.join('\\n');\n\n\tconsole.log(\n\t\tboxen(\n\t\t\t`${chalk.white.bold('Rule Profiles Setup')}\\n\\n${chalk.white(\n\t\t\t\t'Rule profiles help enforce best practices and conventions for Task Master.\\n' +\n\t\t\t\t\t'Each profile provides coding guidelines tailored for specific AI coding environments.\\n\\n'\n\t\t\t)}${chalk.cyan('Available Profiles:')}\\n${profileListText}`,\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Generate choices in the same order as the display text above\n\tconst sortedChoices = profileDescriptions.map(\n\t\t({ profileName, displayName }) => ({\n\t\t\tname: displayName,\n\t\t\tvalue: profileName\n\t\t})\n\t);\n\n\tconst ruleProfilesQuestion = {\n\t\ttype: 'checkbox',\n\t\tname: 'ruleProfiles',\n\t\tmessage: 'Which rule profiles would you like to add to your project?',\n\t\tchoices: sortedChoices,\n\t\tpageSize: sortedChoices.length, // Show all options without pagination\n\t\tloop: false, // Disable loop scrolling\n\t\tvalidate: (input) => input.length > 0 || 'You must select at least one.'\n\t};\n\tconst { ruleProfiles } = await inquirer.prompt([ruleProfilesQuestion]);\n\treturn ruleProfiles;\n}\n\n// =============================================================================\n// PROFILE SUMMARY\n// =============================================================================\n\n/**\n * Generate appropriate summary message for a profile based on its type\n * @param {string} profileName - Name of the profile\n * @param {Object} addResult - Result object with success/failed counts\n * @returns {string} Formatted summary message\n */\nexport function generateProfileSummary(profileName, addResult) {\n\tconst profileConfig = getRulesProfile(profileName);\n\n\tif (!profileConfig.includeDefaultRules) {\n\t\t// Integration guide profiles (claude, codex, gemini, amp)\n\t\treturn `Summary for ${profileName}: Integration guide installed.`;\n\t} else {\n\t\t// Rule profiles with coding guidelines\n\t\treturn `Summary for ${profileName}: ${addResult.success} files processed, ${addResult.failed} failed.`;\n\t}\n}\n\n/**\n * Generate appropriate summary message for profile removal\n * @param {string} profileName - Name of the profile\n * @param {Object} removeResult - Result object from removal operation\n * @returns {string} Formatted summary message\n */\nexport function generateProfileRemovalSummary(profileName, removeResult) {\n\tif (removeResult.skipped) {\n\t\treturn `Summary for ${profileName}: Skipped (default or protected files)`;\n\t}\n\n\tif (removeResult.error && !removeResult.success) {\n\t\treturn `Summary for ${profileName}: Failed to remove - ${removeResult.error}`;\n\t}\n\n\tconst profileConfig = getRulesProfile(profileName);\n\n\tif (!profileConfig.includeDefaultRules) {\n\t\t// Integration guide profiles (claude, codex, gemini, amp)\n\t\tconst baseMessage = `Summary for ${profileName}: Integration guide removed`;\n\t\tif (removeResult.notice) {\n\t\t\treturn `${baseMessage} (${removeResult.notice})`;\n\t\t}\n\t\treturn baseMessage;\n\t} else {\n\t\t// Rule profiles with coding guidelines\n\t\tconst baseMessage = `Summary for ${profileName}: Rule profile removed`;\n\t\tif (removeResult.notice) {\n\t\t\treturn `${baseMessage} (${removeResult.notice})`;\n\t\t}\n\t\treturn baseMessage;\n\t}\n}\n\n/**\n * Categorize profiles and generate final summary statistics\n * @param {Array} addResults - Array of add result objects\n * @returns {Object} Object with categorized profiles and totals\n */\nexport function categorizeProfileResults(addResults) {\n\tconst successfulProfiles = [];\n\tlet totalSuccess = 0;\n\tlet totalFailed = 0;\n\n\taddResults.forEach((r) => {\n\t\ttotalSuccess += r.success;\n\t\ttotalFailed += r.failed;\n\n\t\t// All profiles are considered successful if they completed without major errors\n\t\tif (r.success > 0 || r.failed === 0) {\n\t\t\tsuccessfulProfiles.push(r.profileName);\n\t\t}\n\t});\n\n\treturn {\n\t\tsuccessfulProfiles,\n\t\tallSuccessfulProfiles: successfulProfiles,\n\t\ttotalSuccess,\n\t\ttotalFailed\n\t};\n}\n\n/**\n * Categorize removal results and generate final summary statistics\n * @param {Array} removalResults - Array of removal result objects\n * @returns {Object} Object with categorized removal results\n */\nexport function categorizeRemovalResults(removalResults) {\n\tconst successfulRemovals = [];\n\tconst skippedRemovals = [];\n\tconst failedRemovals = [];\n\tconst removalsWithNotices = [];\n\n\tremovalResults.forEach((result) => {\n\t\tif (result.success) {\n\t\t\tsuccessfulRemovals.push(result.profileName);\n\t\t} else if (result.skipped) {\n\t\t\tskippedRemovals.push(result.profileName);\n\t\t} else if (result.error) {\n\t\t\tfailedRemovals.push(result);\n\t\t}\n\n\t\tif (result.notice) {\n\t\t\tremovalsWithNotices.push(result);\n\t\t}\n\t});\n\n\treturn {\n\t\tsuccessfulRemovals,\n\t\tskippedRemovals,\n\t\tfailedRemovals,\n\t\tremovalsWithNotices\n\t};\n}\n"], ["/claude-task-master/src/utils/create-mcp-config.js", "import fs from 'fs';\nimport path from 'path';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Return JSON with existing mcp.json formatting style\nfunction formatJSONWithTabs(obj) {\n\tlet json = JSON.stringify(obj, null, '\\t');\n\n\tjson = json.replace(\n\t\t/(\\[\\n\\t+)([^[\\]]+?)(\\n\\t+\\])/g,\n\t\t(match, openBracket, content, closeBracket) => {\n\t\t\t// Only convert to single line if content doesn't contain nested objects/arrays\n\t\t\tif (!content.includes('{') && !content.includes('[')) {\n\t\t\t\tconst singleLineContent = content\n\t\t\t\t\t.replace(/\\n\\t+/g, ' ')\n\t\t\t\t\t.replace(/\\s+/g, ' ')\n\t\t\t\t\t.trim();\n\t\t\t\treturn `[${singleLineContent}]`;\n\t\t\t}\n\t\t\treturn match;\n\t\t}\n\t);\n\n\treturn json;\n}\n\n// Structure matches project conventions (see scripts/init.js)\nexport function setupMCPConfiguration(projectRoot, mcpConfigPath) {\n\t// Handle null mcpConfigPath (e.g., for Claude/Codex profiles)\n\tif (!mcpConfigPath) {\n\t\tlog(\n\t\t\t'debug',\n\t\t\t'[MCP Config] No mcpConfigPath provided, skipping MCP configuration setup'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Build the full path to the MCP config file\n\tconst mcpPath = path.join(projectRoot, mcpConfigPath);\n\tconst configDir = path.dirname(mcpPath);\n\n\tlog('info', `Setting up MCP configuration at ${mcpPath}...`);\n\n\t// New MCP config to be added - references the installed package\n\tconst newMCPServer = {\n\t\t'task-master-ai': {\n\t\t\tcommand: 'npx',\n\t\t\targs: ['-y', '--package=task-master-ai', 'task-master-ai'],\n\t\t\tenv: {\n\t\t\t\tANTHROPIC_API_KEY: 'YOUR_ANTHROPIC_API_KEY_HERE',\n\t\t\t\tPERPLEXITY_API_KEY: 'YOUR_PERPLEXITY_API_KEY_HERE',\n\t\t\t\tOPENAI_API_KEY: 'YOUR_OPENAI_KEY_HERE',\n\t\t\t\tGOOGLE_API_KEY: 'YOUR_GOOGLE_KEY_HERE',\n\t\t\t\tXAI_API_KEY: 'YOUR_XAI_KEY_HERE',\n\t\t\t\tOPENROUTER_API_KEY: 'YOUR_OPENROUTER_KEY_HERE',\n\t\t\t\tMISTRAL_API_KEY: 'YOUR_MISTRAL_KEY_HERE',\n\t\t\t\tAZURE_OPENAI_API_KEY: 'YOUR_AZURE_KEY_HERE',\n\t\t\t\tOLLAMA_API_KEY: 'YOUR_OLLAMA_API_KEY_HERE'\n\t\t\t}\n\t\t}\n\t};\n\n\t// Create config directory if it doesn't exist\n\tif (!fs.existsSync(configDir)) {\n\t\tfs.mkdirSync(configDir, { recursive: true });\n\t}\n\n\tif (fs.existsSync(mcpPath)) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t'MCP configuration file already exists, checking for existing task-master-ai...'\n\t\t);\n\t\ttry {\n\t\t\t// Read existing config\n\t\t\tconst mcpConfig = JSON.parse(fs.readFileSync(mcpPath, 'utf8'));\n\t\t\t// Initialize mcpServers if it doesn't exist\n\t\t\tif (!mcpConfig.mcpServers) {\n\t\t\t\tmcpConfig.mcpServers = {};\n\t\t\t}\n\t\t\t// Check if any existing server configuration already has task-master-ai in its args\n\t\t\tconst hasMCPString = Object.values(mcpConfig.mcpServers).some(\n\t\t\t\t(server) =>\n\t\t\t\t\tserver.args &&\n\t\t\t\t\tArray.isArray(server.args) &&\n\t\t\t\t\tserver.args.some(\n\t\t\t\t\t\t(arg) => typeof arg === 'string' && arg.includes('task-master-ai')\n\t\t\t\t\t)\n\t\t\t);\n\t\t\tif (hasMCPString) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'Found existing task-master-ai MCP configuration in mcp.json, leaving untouched'\n\t\t\t\t);\n\t\t\t\treturn; // Exit early, don't modify the existing configuration\n\t\t\t}\n\t\t\t// Add the task-master-ai server if it doesn't exist\n\t\t\tif (!mcpConfig.mcpServers['task-master-ai']) {\n\t\t\t\tmcpConfig.mcpServers['task-master-ai'] = newMCPServer['task-master-ai'];\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'Added task-master-ai server to existing MCP configuration'\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog('info', 'task-master-ai server already configured in mcp.json');\n\t\t\t}\n\t\t\t// Write the updated configuration\n\t\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(mcpConfig) + '\\n');\n\t\t\tlog('success', 'Updated MCP configuration file');\n\t\t} catch (error) {\n\t\t\tlog('error', `Failed to update MCP configuration: ${error.message}`);\n\t\t\t// Create a backup before potentially modifying\n\t\t\tconst backupPath = `${mcpPath}.backup-${Date.now()}`;\n\t\t\tif (fs.existsSync(mcpPath)) {\n\t\t\t\tfs.copyFileSync(mcpPath, backupPath);\n\t\t\t\tlog('info', `Created backup of existing mcp.json at ${backupPath}`);\n\t\t\t}\n\t\t\t// Create new configuration\n\t\t\tconst newMCPConfig = {\n\t\t\t\tmcpServers: newMCPServer\n\t\t\t};\n\t\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(newMCPConfig) + '\\n');\n\t\t\tlog(\n\t\t\t\t'warn',\n\t\t\t\t'Created new MCP configuration file (backup of original file was created if it existed)'\n\t\t\t);\n\t\t}\n\t} else {\n\t\t// If mcp.json doesn't exist, create it\n\t\tconst newMCPConfig = {\n\t\t\tmcpServers: newMCPServer\n\t\t};\n\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(newMCPConfig) + '\\n');\n\t\tlog('success', `Created MCP configuration file at ${mcpPath}`);\n\t}\n\n\t// Add note to console about MCP integration\n\tlog('info', 'MCP server will use the installed task-master-ai package');\n}\n\n/**\n * Remove Task Master MCP server configuration from an existing mcp.json file\n * Only removes Task Master entries, preserving other MCP servers\n * @param {string} projectRoot - Target project directory\n * @param {string} mcpConfigPath - Relative path to MCP config file (e.g., '.cursor/mcp.json')\n * @returns {Object} Result object with success status and details\n */\nexport function removeTaskMasterMCPConfiguration(projectRoot, mcpConfigPath) {\n\t// Handle null mcpConfigPath (e.g., for Claude/Codex profiles)\n\tif (!mcpConfigPath) {\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tremoved: false,\n\t\t\tdeleted: false,\n\t\t\terror: null,\n\t\t\thasOtherServers: false\n\t\t};\n\t}\n\n\tconst mcpPath = path.join(projectRoot, mcpConfigPath);\n\n\tlet result = {\n\t\tsuccess: false,\n\t\tremoved: false,\n\t\tdeleted: false,\n\t\terror: null,\n\t\thasOtherServers: false\n\t};\n\n\tif (!fs.existsSync(mcpPath)) {\n\t\tresult.success = true;\n\t\tresult.removed = false;\n\t\tlog('debug', `[MCP Config] MCP config file does not exist: ${mcpPath}`);\n\t\treturn result;\n\t}\n\n\ttry {\n\t\t// Read existing config\n\t\tconst mcpConfig = JSON.parse(fs.readFileSync(mcpPath, 'utf8'));\n\n\t\tif (!mcpConfig.mcpServers) {\n\t\t\tresult.success = true;\n\t\t\tresult.removed = false;\n\t\t\tlog('debug', `[MCP Config] No mcpServers section found in: ${mcpPath}`);\n\t\t\treturn result;\n\t\t}\n\n\t\t// Check if Task Master is configured\n\t\tconst hasTaskMaster =\n\t\t\tmcpConfig.mcpServers['task-master-ai'] ||\n\t\t\tObject.values(mcpConfig.mcpServers).some(\n\t\t\t\t(server) =>\n\t\t\t\t\tserver.args &&\n\t\t\t\t\tArray.isArray(server.args) &&\n\t\t\t\t\tserver.args.some(\n\t\t\t\t\t\t(arg) => typeof arg === 'string' && arg.includes('task-master-ai')\n\t\t\t\t\t)\n\t\t\t);\n\n\t\tif (!hasTaskMaster) {\n\t\t\tresult.success = true;\n\t\t\tresult.removed = false;\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[MCP Config] Task Master not found in MCP config: ${mcpPath}`\n\t\t\t);\n\t\t\treturn result;\n\t\t}\n\n\t\t// Remove task-master-ai server\n\t\tdelete mcpConfig.mcpServers['task-master-ai'];\n\n\t\t// Also remove any servers that have task-master-ai in their args\n\t\tObject.keys(mcpConfig.mcpServers).forEach((serverName) => {\n\t\t\tconst server = mcpConfig.mcpServers[serverName];\n\t\t\tif (\n\t\t\t\tserver.args &&\n\t\t\t\tArray.isArray(server.args) &&\n\t\t\t\tserver.args.some(\n\t\t\t\t\t(arg) => typeof arg === 'string' && arg.includes('task-master-ai')\n\t\t\t\t)\n\t\t\t) {\n\t\t\t\tdelete mcpConfig.mcpServers[serverName];\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[MCP Config] Removed server '${serverName}' containing task-master-ai`\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\n\t\t// Check if there are other MCP servers remaining\n\t\tconst remainingServers = Object.keys(mcpConfig.mcpServers);\n\t\tresult.hasOtherServers = remainingServers.length > 0;\n\n\t\tif (result.hasOtherServers) {\n\t\t\t// Write back the modified config with remaining servers\n\t\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(mcpConfig) + '\\n');\n\t\t\tresult.success = true;\n\t\t\tresult.removed = true;\n\t\t\tresult.deleted = false;\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`[MCP Config] Removed Task Master from MCP config, preserving other servers: ${remainingServers.join(', ')}`\n\t\t\t);\n\t\t} else {\n\t\t\t// No other servers, delete the entire file\n\t\t\tfs.rmSync(mcpPath, { force: true });\n\t\t\tresult.success = true;\n\t\t\tresult.removed = true;\n\t\t\tresult.deleted = true;\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`[MCP Config] Removed MCP config file (no other servers remaining): ${mcpPath}`\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tresult.error = error.message;\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[MCP Config] Failed to remove Task Master from MCP config: ${error.message}`\n\t\t);\n\t}\n\n\treturn result;\n}\n"], ["/claude-task-master/mcp-server/src/tools/move-task.js", "/**\n * tools/move-task.js\n * Tool for moving tasks or subtasks to a new position\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { moveTaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the moveTask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerMoveTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'move_task',\n\t\tdescription: 'Move a task or subtask to a new position',\n\t\tparameters: z.object({\n\t\t\tfrom: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'ID of the task/subtask to move (e.g., \"5\" or \"5.2\"). Can be comma-separated to move multiple tasks (e.g., \"5,6,7\")'\n\t\t\t\t),\n\t\t\tto: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'ID of the destination (e.g., \"7\" or \"7.3\"). Must match the number of source IDs if comma-separated'\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Custom path to tasks.json file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'Root directory of the project (typically derived from session)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Find tasks.json path if not provided\n\t\t\t\tlet tasksJsonPath = args.file;\n\n\t\t\t\tif (!tasksJsonPath) {\n\t\t\t\t\ttasksJsonPath = findTasksPath(args, log);\n\t\t\t\t}\n\n\t\t\t\t// Parse comma-separated IDs\n\t\t\t\tconst fromIds = args.from.split(',').map((id) => id.trim());\n\t\t\t\tconst toIds = args.to.split(',').map((id) => id.trim());\n\n\t\t\t\t// Validate matching IDs count\n\t\t\t\tif (fromIds.length !== toIds.length) {\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t'The number of source and destination IDs must match',\n\t\t\t\t\t\t'MISMATCHED_ID_COUNT'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// If moving multiple tasks\n\t\t\t\tif (fromIds.length > 1) {\n\t\t\t\t\tconst results = [];\n\t\t\t\t\t// Move tasks one by one, only generate files on the last move\n\t\t\t\t\tfor (let i = 0; i < fromIds.length; i++) {\n\t\t\t\t\t\tconst fromId = fromIds[i];\n\t\t\t\t\t\tconst toId = toIds[i];\n\n\t\t\t\t\t\t// Skip if source and destination are the same\n\t\t\t\t\t\tif (fromId === toId) {\n\t\t\t\t\t\t\tlog.info(`Skipping ${fromId} -> ${toId} (same ID)`);\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tconst shouldGenerateFiles = i === fromIds.length - 1;\n\t\t\t\t\t\tconst result = await moveTaskDirect(\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tsourceId: fromId,\n\t\t\t\t\t\t\t\tdestinationId: toId,\n\t\t\t\t\t\t\t\ttasksJsonPath,\n\t\t\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tlog,\n\t\t\t\t\t\t\t{ session }\n\t\t\t\t\t\t);\n\n\t\t\t\t\t\tif (!result.success) {\n\t\t\t\t\t\t\tlog.error(\n\t\t\t\t\t\t\t\t`Failed to move ${fromId} to ${toId}: ${result.error.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tresults.push(result.data);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn handleApiResult(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tsuccess: true,\n\t\t\t\t\t\t\tdata: {\n\t\t\t\t\t\t\t\tmoves: results,\n\t\t\t\t\t\t\t\tmessage: `Successfully moved ${results.length} tasks`\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog,\n\t\t\t\t\t\t'Error moving multiple tasks',\n\t\t\t\t\t\tundefined,\n\t\t\t\t\t\targs.projectRoot\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\t// Moving a single task\n\t\t\t\t\treturn handleApiResult(\n\t\t\t\t\t\tawait moveTaskDirect(\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tsourceId: args.from,\n\t\t\t\t\t\t\t\tdestinationId: args.to,\n\t\t\t\t\t\t\t\ttasksJsonPath,\n\t\t\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tlog,\n\t\t\t\t\t\t\t{ session }\n\t\t\t\t\t\t),\n\t\t\t\t\t\tlog,\n\t\t\t\t\t\t'Error moving task',\n\t\t\t\t\t\tundefined,\n\t\t\t\t\t\targs.projectRoot\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Failed to move task: ${error.message}`,\n\t\t\t\t\t'MOVE_TASK_ERROR'\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/rename-tag.js", "/**\n * rename-tag.js\n * Direct function implementation for renaming a tag\n */\n\nimport { renameTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for renaming a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.oldName - Current name of the tag to rename\n * @param {string} args.newName - New name for the tag\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function renameTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, oldName, newName, projectRoot } = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('renameTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!oldName || typeof oldName !== 'string') {\n\t\t\tlog.error('Missing required parameter: oldName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Old tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!newName || typeof newName !== 'string') {\n\t\t\tlog.error('Missing required parameter: newName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'New tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Renaming tag from \"${oldName}\" to \"${newName}\"`);\n\n\t\t// Call the renameTag function\n\t\tconst result = await renameTag(\n\t\t\ttasksJsonPath,\n\t\t\toldName,\n\t\t\tnewName,\n\t\t\t{}, // options (empty for now)\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\toldName: result.oldName,\n\t\t\t\tnewName: result.newName,\n\t\t\t\trenamed: result.renamed,\n\t\t\t\ttaskCount: result.taskCount,\n\t\t\t\twasCurrentTag: result.wasCurrentTag,\n\t\t\t\tmessage: `Successfully renamed tag from \"${result.oldName}\" to \"${result.newName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in renameTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'RENAME_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/test-clean-tags.js", "import fs from 'fs';\nimport {\n\tcreateTag,\n\tlistTags\n} from './scripts/modules/task-manager/tag-management.js';\n\nconsole.log('=== Testing Tag Management with Clean File ===');\n\n// Create a clean test tasks.json file\nconst testTasksPath = './test-tasks.json';\nconst cleanData = {\n\tmaster: {\n\t\ttasks: [\n\t\t\t{ id: 1, title: 'Test Task 1', status: 'pending' },\n\t\t\t{ id: 2, title: 'Test Task 2', status: 'done' }\n\t\t],\n\t\tmetadata: {\n\t\t\tcreated: new Date().toISOString(),\n\t\t\tdescription: 'Master tag'\n\t\t}\n\t}\n};\n\n// Write clean test file\nfs.writeFileSync(testTasksPath, JSON.stringify(cleanData, null, 2));\nconsole.log('Created clean test file');\n\ntry {\n\t// Test creating a new tag\n\tconsole.log('\\n--- Testing createTag ---');\n\tawait createTag(\n\t\ttestTasksPath,\n\t\t'test-branch',\n\t\t{ copyFromCurrent: true, description: 'Test branch' },\n\t\t{ projectRoot: process.cwd() },\n\t\t'json'\n\t);\n\n\t// Read the file and check for corruption\n\tconst resultData = JSON.parse(fs.readFileSync(testTasksPath, 'utf8'));\n\tconsole.log('Keys in result file:', Object.keys(resultData));\n\tconsole.log('Has _rawTaggedData in file:', !!resultData._rawTaggedData);\n\n\tif (resultData._rawTaggedData) {\n\t\tconsole.log('❌ CORRUPTION DETECTED: _rawTaggedData found in file!');\n\t} else {\n\t\tconsole.log('✅ SUCCESS: No _rawTaggedData corruption in file');\n\t}\n\n\t// Test listing tags\n\tconsole.log('\\n--- Testing listTags ---');\n\tconst tagList = await listTags(\n\t\ttestTasksPath,\n\t\t{},\n\t\t{ projectRoot: process.cwd() },\n\t\t'json'\n\t);\n\tconsole.log(\n\t\t'Found tags:',\n\t\ttagList.tags.map((t) => t.name)\n\t);\n} catch (error) {\n\tconsole.error('Error during test:', error.message);\n} finally {\n\t// Clean up test file\n\tif (fs.existsSync(testTasksPath)) {\n\t\tfs.unlinkSync(testTasksPath);\n\t\tconsole.log('\\nCleaned up test file');\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/generate-task-files.js", "/**\n * generate-task-files.js\n * Direct function implementation for generating task files from tasks.json\n */\n\nimport { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for generateTaskFiles with error handling.\n *\n * @param {Object} args - Command arguments containing tasksJsonPath and outputDir.\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.outputDir - Path to the output directory.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function generateTaskFilesDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, outputDir, projectRoot, tag } = args;\n\ttry {\n\t\tlog.info(`Generating task files with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if paths were provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\t\tif (!outputDir) {\n\t\t\tconst errorMessage = 'outputDir is required but was not provided.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided paths\n\t\tconst tasksPath = tasksJsonPath;\n\t\tconst resolvedOutputDir = outputDir;\n\n\t\tlog.info(`Generating task files from ${tasksPath} to ${resolvedOutputDir}`);\n\n\t\t// Execute core generateTaskFiles function in a separate try/catch\n\t\ttry {\n\t\t\t// Enable silent mode to prevent logs from being written to stdout\n\t\t\tenableSilentMode();\n\n\t\t\t// Pass projectRoot and tag so the core respects context\n\t\t\tgenerateTaskFiles(tasksPath, resolvedOutputDir, {\n\t\t\t\tprojectRoot,\n\t\t\t\ttag,\n\t\t\t\tmcpLog: log\n\t\t\t});\n\n\t\t\t// Restore normal logging after task generation\n\t\t\tdisableSilentMode();\n\t\t} catch (genError) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Error in generateTaskFiles: ${genError.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'GENERATE_FILES_ERROR', message: genError.message }\n\t\t\t};\n\t\t}\n\n\t\t// Return success with file paths\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully generated task files`,\n\t\t\t\ttasksPath: tasksPath,\n\t\t\t\toutputDir: resolvedOutputDir,\n\t\t\t\ttaskFiles:\n\t\t\t\t\t'Individual task files have been generated in the output directory'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error generating task files: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'GENERATE_TASKS_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error generating task files'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/remove-dependency.js", "/**\n * Direct function wrapper for removeDependency\n */\n\nimport { removeDependency } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Remove a dependency from a task\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string|number} args.id - Task ID to remove dependency from\n * @param {string|number} args.dependsOn - Task ID to remove as a dependency\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function removeDependencyDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, dependsOn, projectRoot, tag } = args;\n\ttry {\n\t\tlog.info(`Removing dependency with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('removeDependencyDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate required parameters\n\t\tif (!id) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Task ID (id) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!dependsOn) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Dependency ID (dependsOn) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Format IDs for the core function\n\t\tconst taskId =\n\t\t\tid && id.includes && id.includes('.') ? id : parseInt(id, 10);\n\t\tconst dependencyId =\n\t\t\tdependsOn && dependsOn.includes && dependsOn.includes('.')\n\t\t\t\t? dependsOn\n\t\t\t\t: parseInt(dependsOn, 10);\n\n\t\tlog.info(\n\t\t\t`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`\n\t\t);\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\t// Call the core function using the provided tasksPath\n\t\tawait removeDependency(tasksPath, taskId, dependencyId, {\n\t\t\tprojectRoot,\n\t\t\ttag\n\t\t});\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,\n\t\t\t\ttaskId: taskId,\n\t\t\t\tdependencyId: dependencyId\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in removeDependencyDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-tag.js", "/**\n * add-tag.js\n * Direct function implementation for creating a new tag\n */\n\nimport {\n\tcreateTag,\n\tcreateTagFromBranch\n} from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for creating a new tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.name - Name of the new tag to create\n * @param {boolean} [args.copyFromCurrent=false] - Whether to copy tasks from current tag\n * @param {string} [args.copyFromTag] - Specific tag to copy tasks from\n * @param {boolean} [args.fromBranch=false] - Create tag name from current git branch\n * @param {string} [args.description] - Optional description for the tag\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function addTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst {\n\t\ttasksJsonPath,\n\t\tname,\n\t\tcopyFromCurrent = false,\n\t\tcopyFromTag,\n\t\tfromBranch = false,\n\t\tdescription,\n\t\tprojectRoot\n\t} = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Handle --from-branch option\n\t\tif (fromBranch) {\n\t\t\tlog.info('Creating tag from current git branch');\n\n\t\t\t// Import git utilities\n\t\t\tconst gitUtils = await import(\n\t\t\t\t'../../../../scripts/modules/utils/git-utils.js'\n\t\t\t);\n\n\t\t\t// Check if we're in a git repository\n\t\t\tif (!(await gitUtils.isGitRepository(projectRoot))) {\n\t\t\t\tlog.error('Not in a git repository');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'NOT_GIT_REPO',\n\t\t\t\t\t\tmessage: 'Not in a git repository. Cannot use fromBranch option.'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Get current git branch\n\t\t\tconst currentBranch = await gitUtils.getCurrentBranch(projectRoot);\n\t\t\tif (!currentBranch) {\n\t\t\t\tlog.error('Could not determine current git branch');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'NO_CURRENT_BRANCH',\n\t\t\t\t\t\tmessage: 'Could not determine current git branch.'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Prepare options for branch-based tag creation\n\t\t\tconst branchOptions = {\n\t\t\t\tcopyFromCurrent,\n\t\t\t\tcopyFromTag,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created from git branch \"${currentBranch}\"`\n\t\t\t};\n\n\t\t\t// Call the createTagFromBranch function\n\t\t\tconst result = await createTagFromBranch(\n\t\t\t\ttasksJsonPath,\n\t\t\t\tcurrentBranch,\n\t\t\t\tbranchOptions,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot\n\t\t\t\t},\n\t\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tbranchName: result.branchName,\n\t\t\t\t\ttagName: result.tagName,\n\t\t\t\t\tcreated: result.created,\n\t\t\t\t\tmappingUpdated: result.mappingUpdated,\n\t\t\t\t\tmessage: `Successfully created tag \"${result.tagName}\" from git branch \"${result.branchName}\"`\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Check required parameters for regular tag creation\n\t\t\tif (!name || typeof name !== 'string') {\n\t\t\t\tlog.error('Missing required parameter: name');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\t\tmessage: 'Tag name is required and must be a string'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(`Creating new tag: ${name}`);\n\n\t\t\t// Prepare options\n\t\t\tconst options = {\n\t\t\t\tcopyFromCurrent,\n\t\t\t\tcopyFromTag,\n\t\t\t\tdescription\n\t\t\t};\n\n\t\t\t// Call the createTag function\n\t\t\tconst result = await createTag(\n\t\t\t\ttasksJsonPath,\n\t\t\t\tname,\n\t\t\t\toptions,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot\n\t\t\t\t},\n\t\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\ttagName: result.tagName,\n\t\t\t\t\tcreated: result.created,\n\t\t\t\t\ttasksCopied: result.tasksCopied,\n\t\t\t\t\tsourceTag: result.sourceTag,\n\t\t\t\t\tdescription: result.description,\n\t\t\t\t\tmessage: `Successfully created tag \"${result.tagName}\"`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'ADD_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/list-tasks.js", "/**\n * list-tasks.js\n * Direct function implementation for listing tasks\n */\n\nimport { listTasks } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for listTasks with error handling and caching.\n *\n * @param {Object} args - Command arguments (now expecting tasksJsonPath explicitly).\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.reportPath - Path to the report file.\n * @param {string} args.status - Status of the task.\n * @param {boolean} args.withSubtasks - Whether to include subtasks.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string } }.\n */\nexport async function listTasksDirect(args, log, context = {}) {\n\t// Destructure the explicit tasksJsonPath from args\n\tconst { tasksJsonPath, reportPath, status, withSubtasks, projectRoot, tag } =\n\t\targs;\n\tconst { session } = context;\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('listTasksDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Use the explicit tasksJsonPath for cache key\n\tconst statusFilter = status || 'all';\n\tconst withSubtasksFilter = withSubtasks || false;\n\n\t// Define the action function to be executed on cache miss\n\tconst coreListTasksAction = async () => {\n\t\ttry {\n\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\tenableSilentMode();\n\n\t\t\tlog.info(\n\t\t\t\t`Executing core listTasks function for path: ${tasksJsonPath}, filter: ${statusFilter}, subtasks: ${withSubtasksFilter}`\n\t\t\t);\n\t\t\t// Pass the explicit tasksJsonPath to the core function\n\t\t\tconst resultData = listTasks(\n\t\t\t\ttasksJsonPath,\n\t\t\t\tstatusFilter,\n\t\t\t\treportPath,\n\t\t\t\twithSubtasksFilter,\n\t\t\t\t'json',\n\t\t\t\t{ projectRoot, session, tag }\n\t\t\t);\n\n\t\t\tif (!resultData || !resultData.tasks) {\n\t\t\t\tlog.error('Invalid or empty response from listTasks core function');\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'INVALID_CORE_RESPONSE',\n\t\t\t\t\t\tmessage: 'Invalid or empty response from listTasks core function'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(\n\t\t\t\t`Core listTasks function retrieved ${resultData.tasks.length} tasks`\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn { success: true, data: resultData };\n\t\t} catch (error) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Core listTasks function failed: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'LIST_TASKS_CORE_ERROR',\n\t\t\t\t\tmessage: error.message || 'Failed to list tasks'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t};\n\n\ttry {\n\t\tconst result = await coreListTasksAction();\n\t\tlog.info('listTasksDirect completed');\n\t\treturn result;\n\t} catch (error) {\n\t\tlog.error(`Unexpected error during listTasks: ${error.message}`);\n\t\tconsole.error(error.stack);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/prompt-manager.js", "import fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { log } from './utils.js';\nimport Ajv from 'ajv';\nimport addFormats from 'ajv-formats';\n\n/**\n * Manages prompt templates for AI interactions\n */\nexport class PromptManager {\n\tconstructor() {\n\t\tconst __filename = fileURLToPath(import.meta.url);\n\t\tconst __dirname = path.dirname(__filename);\n\t\tthis.promptsDir = path.join(__dirname, '..', '..', 'src', 'prompts');\n\t\tthis.cache = new Map();\n\t\tthis.setupValidation();\n\t}\n\n\t/**\n\t * Set up JSON schema validation\n\t * @private\n\t */\n\tsetupValidation() {\n\t\tthis.ajv = new Ajv({ allErrors: true, strict: false });\n\t\taddFormats(this.ajv);\n\n\t\ttry {\n\t\t\t// Load schema from src/prompts/schemas\n\t\t\tconst schemaPath = path.join(\n\t\t\t\tthis.promptsDir,\n\t\t\t\t'schemas',\n\t\t\t\t'prompt-template.schema.json'\n\t\t\t);\n\t\t\tconst schemaContent = fs.readFileSync(schemaPath, 'utf-8');\n\t\t\tconst schema = JSON.parse(schemaContent);\n\n\t\t\tthis.validatePrompt = this.ajv.compile(schema);\n\t\t\tlog('info', '✓ JSON schema validation enabled');\n\t\t} catch (error) {\n\t\t\tlog('warn', `⚠ Schema validation disabled: ${error.message}`);\n\t\t\tthis.validatePrompt = () => true; // Fallback to no validation\n\t\t}\n\t}\n\n\t/**\n\t * Load a prompt template and render it with variables\n\t * @param {string} promptId - The prompt template ID\n\t * @param {Object} variables - Variables to inject into the template\n\t * @param {string} [variantKey] - Optional specific variant to use\n\t * @returns {{systemPrompt: string, userPrompt: string, metadata: Object}}\n\t */\n\tloadPrompt(promptId, variables = {}, variantKey = null) {\n\t\ttry {\n\t\t\t// Check cache first\n\t\t\tconst cacheKey = `${promptId}-${JSON.stringify(variables)}-${variantKey}`;\n\t\t\tif (this.cache.has(cacheKey)) {\n\t\t\t\treturn this.cache.get(cacheKey);\n\t\t\t}\n\n\t\t\t// Load template\n\t\t\tconst template = this.loadTemplate(promptId);\n\n\t\t\t// Validate parameters if schema validation is available\n\t\t\tif (this.validatePrompt && this.validatePrompt !== true) {\n\t\t\t\tthis.validateParameters(template, variables);\n\t\t\t}\n\n\t\t\t// Select the variant - use specified key or select based on conditions\n\t\t\tconst variant = variantKey\n\t\t\t\t? { ...template.prompts[variantKey], name: variantKey }\n\t\t\t\t: this.selectVariant(template, variables);\n\n\t\t\t// Render the prompts with variables\n\t\t\tconst rendered = {\n\t\t\t\tsystemPrompt: this.renderTemplate(variant.system, variables),\n\t\t\t\tuserPrompt: this.renderTemplate(variant.user, variables),\n\t\t\t\tmetadata: {\n\t\t\t\t\ttemplateId: template.id,\n\t\t\t\t\tversion: template.version,\n\t\t\t\t\tvariant: variant.name || 'default',\n\t\t\t\t\tparameters: variables\n\t\t\t\t}\n\t\t\t};\n\n\t\t\t// Cache the result\n\t\t\tthis.cache.set(cacheKey, rendered);\n\n\t\t\treturn rendered;\n\t\t} catch (error) {\n\t\t\tlog('error', `Failed to load prompt ${promptId}: ${error.message}`);\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\t/**\n\t * Load a prompt template from disk\n\t * @private\n\t */\n\tloadTemplate(promptId) {\n\t\tconst templatePath = path.join(this.promptsDir, `${promptId}.json`);\n\n\t\ttry {\n\t\t\tconst content = fs.readFileSync(templatePath, 'utf-8');\n\t\t\tconst template = JSON.parse(content);\n\n\t\t\t// Schema validation if available (do this first for detailed errors)\n\t\t\tif (this.validatePrompt && this.validatePrompt !== true) {\n\t\t\t\tconst valid = this.validatePrompt(template);\n\t\t\t\tif (!valid) {\n\t\t\t\t\tconst errors = this.validatePrompt.errors\n\t\t\t\t\t\t.map((err) => `${err.instancePath || 'root'}: ${err.message}`)\n\t\t\t\t\t\t.join(', ');\n\t\t\t\t\tthrow new Error(`Schema validation failed: ${errors}`);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Fallback basic validation if no schema validation available\n\t\t\t\tif (!template.id || !template.prompts || !template.prompts.default) {\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t'Invalid template structure: missing required fields (id, prompts.default)'\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn template;\n\t\t} catch (error) {\n\t\t\tif (error.code === 'ENOENT') {\n\t\t\t\tthrow new Error(`Prompt template '${promptId}' not found`);\n\t\t\t}\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\t/**\n\t * Validate parameters against template schema\n\t * @private\n\t */\n\tvalidateParameters(template, variables) {\n\t\tif (!template.parameters) return;\n\n\t\tconst errors = [];\n\n\t\tfor (const [paramName, paramConfig] of Object.entries(\n\t\t\ttemplate.parameters\n\t\t)) {\n\t\t\tconst value = variables[paramName];\n\n\t\t\t// Check required parameters\n\t\t\tif (paramConfig.required && value === undefined) {\n\t\t\t\terrors.push(`Required parameter '${paramName}' missing`);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Skip validation for undefined optional parameters\n\t\t\tif (value === undefined) continue;\n\n\t\t\t// Type validation\n\t\t\tif (!this.validateParameterType(value, paramConfig.type)) {\n\t\t\t\terrors.push(\n\t\t\t\t\t`Parameter '${paramName}' expected ${paramConfig.type}, got ${typeof value}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Enum validation\n\t\t\tif (paramConfig.enum && !paramConfig.enum.includes(value)) {\n\t\t\t\terrors.push(\n\t\t\t\t\t`Parameter '${paramName}' must be one of: ${paramConfig.enum.join(', ')}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Pattern validation for strings\n\t\t\tif (paramConfig.pattern && typeof value === 'string') {\n\t\t\t\tconst regex = new RegExp(paramConfig.pattern);\n\t\t\t\tif (!regex.test(value)) {\n\t\t\t\t\terrors.push(\n\t\t\t\t\t\t`Parameter '${paramName}' does not match required pattern: ${paramConfig.pattern}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Range validation for numbers\n\t\t\tif (typeof value === 'number') {\n\t\t\t\tif (paramConfig.minimum !== undefined && value < paramConfig.minimum) {\n\t\t\t\t\terrors.push(\n\t\t\t\t\t\t`Parameter '${paramName}' must be >= ${paramConfig.minimum}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tif (paramConfig.maximum !== undefined && value > paramConfig.maximum) {\n\t\t\t\t\terrors.push(\n\t\t\t\t\t\t`Parameter '${paramName}' must be <= ${paramConfig.maximum}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (errors.length > 0) {\n\t\t\tthrow new Error(`Parameter validation failed: ${errors.join('; ')}`);\n\t\t}\n\t}\n\n\t/**\n\t * Validate parameter type\n\t * @private\n\t */\n\tvalidateParameterType(value, expectedType) {\n\t\tswitch (expectedType) {\n\t\t\tcase 'string':\n\t\t\t\treturn typeof value === 'string';\n\t\t\tcase 'number':\n\t\t\t\treturn typeof value === 'number';\n\t\t\tcase 'boolean':\n\t\t\t\treturn typeof value === 'boolean';\n\t\t\tcase 'array':\n\t\t\t\treturn Array.isArray(value);\n\t\t\tcase 'object':\n\t\t\t\treturn (\n\t\t\t\t\ttypeof value === 'object' && value !== null && !Array.isArray(value)\n\t\t\t\t);\n\t\t\tdefault:\n\t\t\t\treturn true;\n\t\t}\n\t}\n\n\t/**\n\t * Select the best variant based on conditions\n\t * @private\n\t */\n\tselectVariant(template, variables) {\n\t\t// Check each variant's condition\n\t\tfor (const [name, variant] of Object.entries(template.prompts)) {\n\t\t\tif (name === 'default') continue;\n\n\t\t\tif (\n\t\t\t\tvariant.condition &&\n\t\t\t\tthis.evaluateCondition(variant.condition, variables)\n\t\t\t) {\n\t\t\t\treturn { ...variant, name };\n\t\t\t}\n\t\t}\n\n\t\t// Fall back to default\n\t\treturn { ...template.prompts.default, name: 'default' };\n\t}\n\n\t/**\n\t * Evaluate a condition string\n\t * @private\n\t */\n\tevaluateCondition(condition, variables) {\n\t\ttry {\n\t\t\t// Create a safe evaluation context\n\t\t\tconst context = { ...variables };\n\n\t\t\t// Simple condition evaluation (can be enhanced)\n\t\t\t// For now, supports basic comparisons\n\t\t\tconst func = new Function(...Object.keys(context), `return ${condition}`);\n\t\t\treturn func(...Object.values(context));\n\t\t} catch (error) {\n\t\t\tlog('warn', `Failed to evaluate condition: ${condition}`);\n\t\t\treturn false;\n\t\t}\n\t}\n\n\t/**\n\t * Render a template string with variables\n\t * @private\n\t */\n\trenderTemplate(template, variables) {\n\t\tlet rendered = template;\n\n\t\t// Handle helper functions like (eq variable \"value\")\n\t\trendered = rendered.replace(\n\t\t\t/\\(eq\\s+(\\w+(?:\\.\\w+)*)\\s+\"([^\"]+)\"\\)/g,\n\t\t\t(match, path, compareValue) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\treturn value === compareValue ? 'true' : 'false';\n\t\t\t}\n\t\t);\n\n\t\t// Handle not helper function like (not variable)\n\t\trendered = rendered.replace(/\\(not\\s+(\\w+(?:\\.\\w+)*)\\)/g, (match, path) => {\n\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\treturn !value ? 'true' : 'false';\n\t\t});\n\n\t\t// Handle gt (greater than) helper function like (gt variable 0)\n\t\trendered = rendered.replace(\n\t\t\t/\\(gt\\s+(\\w+(?:\\.\\w+)*)\\s+(\\d+(?:\\.\\d+)?)\\)/g,\n\t\t\t(match, path, compareValue) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\tconst numValue = parseFloat(compareValue);\n\t\t\t\treturn typeof value === 'number' && value > numValue ? 'true' : 'false';\n\t\t\t}\n\t\t);\n\n\t\t// Handle gte (greater than or equal) helper function like (gte variable 0)\n\t\trendered = rendered.replace(\n\t\t\t/\\(gte\\s+(\\w+(?:\\.\\w+)*)\\s+(\\d+(?:\\.\\d+)?)\\)/g,\n\t\t\t(match, path, compareValue) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\tconst numValue = parseFloat(compareValue);\n\t\t\t\treturn typeof value === 'number' && value >= numValue\n\t\t\t\t\t? 'true'\n\t\t\t\t\t: 'false';\n\t\t\t}\n\t\t);\n\n\t\t// Handle conditionals with else {{#if variable}}...{{else}}...{{/if}}\n\t\trendered = rendered.replace(\n\t\t\t/\\{\\{#if\\s+([^}]+)\\}\\}([\\s\\S]*?)(?:\\{\\{else\\}\\}([\\s\\S]*?))?\\{\\{\\/if\\}\\}/g,\n\t\t\t(match, condition, trueContent, falseContent = '') => {\n\t\t\t\t// Handle boolean values and helper function results\n\t\t\t\tlet value;\n\t\t\t\tif (condition === 'true') {\n\t\t\t\t\tvalue = true;\n\t\t\t\t} else if (condition === 'false') {\n\t\t\t\t\tvalue = false;\n\t\t\t\t} else {\n\t\t\t\t\tvalue = this.getNestedValue(variables, condition);\n\t\t\t\t}\n\t\t\t\treturn value ? trueContent : falseContent;\n\t\t\t}\n\t\t);\n\n\t\t// Handle each loops {{#each array}}...{{/each}}\n\t\trendered = rendered.replace(\n\t\t\t/\\{\\{#each\\s+(\\w+(?:\\.\\w+)*)\\}\\}([\\s\\S]*?)\\{\\{\\/each\\}\\}/g,\n\t\t\t(match, path, content) => {\n\t\t\t\tconst array = this.getNestedValue(variables, path);\n\t\t\t\tif (!Array.isArray(array)) return '';\n\n\t\t\t\treturn array\n\t\t\t\t\t.map((item, index) => {\n\t\t\t\t\t\t// Create a context with item properties and special variables\n\t\t\t\t\t\tconst itemContext = {\n\t\t\t\t\t\t\t...variables,\n\t\t\t\t\t\t\t...item,\n\t\t\t\t\t\t\t'@index': index,\n\t\t\t\t\t\t\t'@first': index === 0,\n\t\t\t\t\t\t\t'@last': index === array.length - 1\n\t\t\t\t\t\t};\n\n\t\t\t\t\t\t// Recursively render the content with item context\n\t\t\t\t\t\treturn this.renderTemplate(content, itemContext);\n\t\t\t\t\t})\n\t\t\t\t\t.join('');\n\t\t\t}\n\t\t);\n\n\t\t// Handle json helper {{{json variable}}} (triple braces for raw output)\n\t\trendered = rendered.replace(\n\t\t\t/\\{\\{\\{json\\s+(\\w+(?:\\.\\w+)*)\\}\\}\\}/g,\n\t\t\t(match, path) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\treturn value !== undefined ? JSON.stringify(value, null, 2) : '';\n\t\t\t}\n\t\t);\n\n\t\t// Handle variable substitution {{variable}}\n\t\trendered = rendered.replace(/\\{\\{(\\w+(?:\\.\\w+)*)\\}\\}/g, (match, path) => {\n\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\treturn value !== undefined ? value : '';\n\t\t});\n\n\t\treturn rendered;\n\t}\n\n\t/**\n\t * Get nested value from object using dot notation\n\t * @private\n\t */\n\tgetNestedValue(obj, path) {\n\t\treturn path\n\t\t\t.split('.')\n\t\t\t.reduce(\n\t\t\t\t(current, key) =>\n\t\t\t\t\tcurrent && current[key] !== undefined ? current[key] : undefined,\n\t\t\t\tobj\n\t\t\t);\n\t}\n\n\t/**\n\t * Validate all prompt templates\n\t */\n\tvalidateAllPrompts() {\n\t\tconst results = { total: 0, errors: [], valid: [] };\n\n\t\ttry {\n\t\t\tconst files = fs.readdirSync(this.promptsDir);\n\t\t\tconst promptFiles = files.filter((file) => file.endsWith('.json'));\n\n\t\t\tfor (const file of promptFiles) {\n\t\t\t\tconst promptId = file.replace('.json', '');\n\t\t\t\tresults.total++;\n\n\t\t\t\ttry {\n\t\t\t\t\tthis.loadTemplate(promptId);\n\t\t\t\t\tresults.valid.push(promptId);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tresults.errors.push(`${promptId}: ${error.message}`);\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\tresults.errors.push(\n\t\t\t\t`Failed to read templates directory: ${error.message}`\n\t\t\t);\n\t\t}\n\n\t\treturn results;\n\t}\n\n\t/**\n\t * List all available prompt templates\n\t */\n\tlistPrompts() {\n\t\ttry {\n\t\t\tconst files = fs.readdirSync(this.promptsDir);\n\t\t\tconst prompts = [];\n\n\t\t\tfor (const file of files) {\n\t\t\t\tif (!file.endsWith('.json')) continue;\n\n\t\t\t\tconst promptId = file.replace('.json', '');\n\t\t\t\ttry {\n\t\t\t\t\tconst template = this.loadTemplate(promptId);\n\t\t\t\t\tprompts.push({\n\t\t\t\t\t\tid: template.id,\n\t\t\t\t\t\tdescription: template.description,\n\t\t\t\t\t\tversion: template.version,\n\t\t\t\t\t\tparameters: template.parameters,\n\t\t\t\t\t\ttags: template.metadata?.tags || []\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog('warn', `Failed to load template ${promptId}: ${error.message}`);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn prompts;\n\t\t} catch (error) {\n\t\t\tif (error.code === 'ENOENT') {\n\t\t\t\t// Templates directory doesn't exist yet\n\t\t\t\treturn [];\n\t\t\t}\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\t/**\n\t * Validate template structure\n\t */\n\tvalidateTemplate(templatePath) {\n\t\ttry {\n\t\t\tconst content = fs.readFileSync(templatePath, 'utf-8');\n\t\t\tconst template = JSON.parse(content);\n\n\t\t\t// Check required fields\n\t\t\tconst required = ['id', 'version', 'description', 'prompts'];\n\t\t\tfor (const field of required) {\n\t\t\t\tif (!template[field]) {\n\t\t\t\t\treturn { valid: false, error: `Missing required field: ${field}` };\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check default prompt exists\n\t\t\tif (!template.prompts.default) {\n\t\t\t\treturn { valid: false, error: 'Missing default prompt variant' };\n\t\t\t}\n\n\t\t\t// Check each variant has required fields\n\t\t\tfor (const [name, variant] of Object.entries(template.prompts)) {\n\t\t\t\tif (!variant.system || !variant.user) {\n\t\t\t\t\treturn {\n\t\t\t\t\t\tvalid: false,\n\t\t\t\t\t\terror: `Variant '${name}' missing system or user prompt`\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Schema validation if available\n\t\t\tif (this.validatePrompt && this.validatePrompt !== true) {\n\t\t\t\tconst valid = this.validatePrompt(template);\n\t\t\t\tif (!valid) {\n\t\t\t\t\tconst errors = this.validatePrompt.errors\n\t\t\t\t\t\t.map((err) => `${err.instancePath || 'root'}: ${err.message}`)\n\t\t\t\t\t\t.join(', ');\n\t\t\t\t\treturn { valid: false, error: `Schema validation failed: ${errors}` };\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn { valid: true };\n\t\t} catch (error) {\n\t\t\treturn { valid: false, error: error.message };\n\t\t}\n\t}\n}\n\n// Singleton instance\nlet promptManager = null;\n\n/**\n * Get or create the prompt manager instance\n * @returns {PromptManager}\n */\nexport function getPromptManager() {\n\tif (!promptManager) {\n\t\tpromptManager = new PromptManager();\n\t}\n\treturn promptManager;\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/create-tag-from-branch.js", "/**\n * create-tag-from-branch.js\n * Direct function implementation for creating tags from git branches\n */\n\nimport { createTagFromBranch } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tgetCurrentBranch,\n\tisGitRepository\n} from '../../../../scripts/modules/utils/git-utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for creating tags from git branches with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.branchName] - Git branch name (optional, uses current branch if not provided)\n * @param {boolean} [args.copyFromCurrent] - Copy tasks from current tag\n * @param {string} [args.copyFromTag] - Copy tasks from specific tag\n * @param {string} [args.description] - Custom description for the tag\n * @param {boolean} [args.autoSwitch] - Automatically switch to the new tag\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function createTagFromBranchDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst {\n\t\ttasksJsonPath,\n\t\tbranchName,\n\t\tcopyFromCurrent,\n\t\tcopyFromTag,\n\t\tdescription,\n\t\tautoSwitch,\n\t\tprojectRoot\n\t} = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('createTagFromBranchDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check if projectRoot was provided\n\t\tif (!projectRoot) {\n\t\t\tlog.error('createTagFromBranchDirect called without projectRoot');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'projectRoot is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check if we're in a git repository\n\t\tif (!(await isGitRepository(projectRoot))) {\n\t\t\tlog.error('Not in a git repository');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'NOT_GIT_REPOSITORY',\n\t\t\t\t\tmessage: 'Not in a git repository. Cannot create tag from branch.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Determine branch name\n\t\tlet targetBranch = branchName;\n\t\tif (!targetBranch) {\n\t\t\ttargetBranch = await getCurrentBranch(projectRoot);\n\t\t\tif (!targetBranch) {\n\t\t\t\tlog.error('Could not determine current git branch');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'NO_CURRENT_BRANCH',\n\t\t\t\t\t\tmessage: 'Could not determine current git branch'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t}\n\n\t\tlog.info(`Creating tag from git branch: ${targetBranch}`);\n\n\t\t// Prepare options\n\t\tconst options = {\n\t\t\tcopyFromCurrent: copyFromCurrent || false,\n\t\t\tcopyFromTag,\n\t\t\tdescription:\n\t\t\t\tdescription || `Tag created from git branch \"${targetBranch}\"`,\n\t\t\tautoSwitch: autoSwitch || false\n\t\t};\n\n\t\t// Call the createTagFromBranch function\n\t\tconst result = await createTagFromBranch(\n\t\t\ttasksJsonPath,\n\t\t\ttargetBranch,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tbranchName: result.branchName,\n\t\t\t\ttagName: result.tagName,\n\t\t\t\tcreated: result.created,\n\t\t\t\tmappingUpdated: result.mappingUpdated,\n\t\t\t\tautoSwitched: result.autoSwitched,\n\t\t\t\tmessage: `Successfully created tag \"${result.tagName}\" from branch \"${result.branchName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in createTagFromBranchDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'CREATE_TAG_FROM_BRANCH_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/initialize-project.js", "import { initializeProject } from '../../../../scripts/init.js'; // Import core function and its logger if needed separately\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n\t// isSilentMode // Not used directly here\n} from '../../../../scripts/modules/utils.js';\nimport os from 'os'; // Import os module for home directory check\nimport { RULE_PROFILES } from '../../../../src/constants/profiles.js';\nimport { convertAllRulesToProfileRules } from '../../../../src/utils/rule-transformer.js';\n\n/**\n * Direct function wrapper for initializing a project.\n * Derives target directory from session, sets CWD, and calls core init logic.\n * @param {object} args - Arguments containing initialization options (addAliases, initGit, storeTasksInGit, skipInstall, yes, projectRoot, rules)\n * @param {object} log - The FastMCP logger instance.\n * @param {object} context - The context object, must contain { session }.\n * @returns {Promise<{success: boolean, data?: any, error?: {code: string, message: string}}>} - Standard result object.\n */\nexport async function initializeProjectDirect(args, log, context = {}) {\n\tconst { session } = context; // Keep session if core logic needs it\n\tconst homeDir = os.homedir();\n\n\tlog.info(`Args received in direct function: ${JSON.stringify(args)}`);\n\n\t// --- Determine Target Directory ---\n\t// TRUST the projectRoot passed from the tool layer via args\n\t// The HOF in the tool layer already normalized and validated it came from a reliable source (args or session)\n\tconst targetDirectory = args.projectRoot;\n\n\t// --- Validate the targetDirectory (basic sanity checks) ---\n\tif (\n\t\t!targetDirectory ||\n\t\ttypeof targetDirectory !== 'string' || // Ensure it's a string\n\t\ttargetDirectory === '/' ||\n\t\ttargetDirectory === homeDir\n\t) {\n\t\tlog.error(\n\t\t\t`Invalid target directory received from tool layer: '${targetDirectory}'`\n\t\t);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_TARGET_DIRECTORY',\n\t\t\t\tmessage: `Cannot initialize project: Invalid target directory '${targetDirectory}' received. Please ensure a valid workspace/folder is open or specified.`,\n\t\t\t\tdetails: `Received args.projectRoot: ${args.projectRoot}` // Show what was received\n\t\t\t}\n\t\t};\n\t}\n\n\t// --- Proceed with validated targetDirectory ---\n\tlog.info(`Validated target directory for initialization: ${targetDirectory}`);\n\n\tconst originalCwd = process.cwd();\n\tlet resultData;\n\tlet success = false;\n\tlet errorResult = null;\n\n\tlog.info(\n\t\t`Temporarily changing CWD to ${targetDirectory} for initialization.`\n\t);\n\tprocess.chdir(targetDirectory); // Change CWD to the HOF-provided root\n\n\tenableSilentMode();\n\ttry {\n\t\t// Construct options ONLY from the relevant flags in args\n\t\t// The core initializeProject operates in the current CWD, which we just set\n\t\tconst options = {\n\t\t\taddAliases: args.addAliases,\n\t\t\tinitGit: args.initGit,\n\t\t\tstoreTasksInGit: args.storeTasksInGit,\n\t\t\tskipInstall: args.skipInstall,\n\t\t\tyes: true // Force yes mode\n\t\t};\n\n\t\t// Handle rules option with MCP-specific defaults\n\t\tif (Array.isArray(args.rules) && args.rules.length > 0) {\n\t\t\toptions.rules = args.rules;\n\t\t\toptions.rulesExplicitlyProvided = true;\n\t\t\tlog.info(`Including rules: ${args.rules.join(', ')}`);\n\t\t} else {\n\t\t\t// For MCP initialization, default to Cursor profile only\n\t\t\toptions.rules = ['cursor'];\n\t\t\toptions.rulesExplicitlyProvided = true;\n\t\t\tlog.info(`No rule profiles specified, defaulting to: Cursor`);\n\t\t}\n\n\t\tlog.info(`Initializing project with options: ${JSON.stringify(options)}`);\n\t\tconst result = await initializeProject(options); // Call core logic\n\n\t\tresultData = {\n\t\t\tmessage: 'Project initialized successfully.',\n\t\t\tnext_step:\n\t\t\t\t'Now that the project is initialized, the next step is to create the tasks by parsing a PRD. This will create the tasks folder and the initial task files (tasks folder will be created when parse-prd is run). The parse-prd tool will require a prd.txt file as input (typically found in .taskmaster/docs/ directory). You can create a prd.txt file by asking the user about their idea, and then using the .taskmaster/templates/example_prd.txt file as a template to generate a prd.txt file in .taskmaster/docs/. You may skip all of this if the user already has a prd.txt file. You can THEN use the parse-prd tool to create the tasks. So: step 1 after initialization is to create a prd.txt file in .taskmaster/docs/prd.txt or confirm the user already has one. Step 2 is to use the parse-prd tool to create the tasks. Do not bother looking for tasks after initialization, just use the parse-prd tool to create the tasks after creating a prd.txt from which to parse the tasks. You do NOT need to reinitialize the project to parse-prd.',\n\t\t\t...result\n\t\t};\n\t\tsuccess = true;\n\t\tlog.info(\n\t\t\t`Project initialization completed successfully in ${targetDirectory}.`\n\t\t);\n\t} catch (error) {\n\t\tlog.error(`Core initializeProject failed: ${error.message}`);\n\t\terrorResult = {\n\t\t\tcode: 'INITIALIZATION_FAILED',\n\t\t\tmessage: `Core project initialization failed: ${error.message}`,\n\t\t\tdetails: error.stack\n\t\t};\n\t\tsuccess = false;\n\t} finally {\n\t\tdisableSilentMode();\n\t\tlog.info(`Restoring original CWD: ${originalCwd}`);\n\t\tprocess.chdir(originalCwd);\n\t}\n\n\tif (success) {\n\t\treturn { success: true, data: resultData };\n\t} else {\n\t\treturn { success: false, error: errorResult };\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/copy-tag.js", "/**\n * copy-tag.js\n * Direct function implementation for copying a tag\n */\n\nimport { copyTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for copying a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.sourceName - Name of the source tag to copy from\n * @param {string} args.targetName - Name of the new tag to create\n * @param {string} [args.description] - Optional description for the new tag\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function copyTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, sourceName, targetName, description, projectRoot } =\n\t\targs;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('copyTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!sourceName || typeof sourceName !== 'string') {\n\t\t\tlog.error('Missing required parameter: sourceName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Source tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!targetName || typeof targetName !== 'string') {\n\t\t\tlog.error('Missing required parameter: targetName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Target tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Copying tag from \"${sourceName}\" to \"${targetName}\"`);\n\n\t\t// Prepare options\n\t\tconst options = {\n\t\t\tdescription\n\t\t};\n\n\t\t// Call the copyTag function\n\t\tconst result = await copyTag(\n\t\t\ttasksJsonPath,\n\t\t\tsourceName,\n\t\t\ttargetName,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tsourceName: result.sourceName,\n\t\t\t\ttargetName: result.targetName,\n\t\t\t\tcopied: result.copied,\n\t\t\t\ttasksCopied: result.tasksCopied,\n\t\t\t\tdescription: result.description,\n\t\t\t\tmessage: `Successfully copied tag from \"${result.sourceName}\" to \"${result.targetName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in copyTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'COPY_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/src/utils/manage-gitignore.js", "// Utility to manage .gitignore files with task file preferences and template merging\nimport fs from 'fs';\nimport path from 'path';\n\n// Constants\nconst TASK_FILES_COMMENT = '# Task files';\nconst TASK_JSON_PATTERN = 'tasks.json';\nconst TASK_DIR_PATTERN = 'tasks/';\n\n/**\n * Normalizes a line by removing comments and trimming whitespace\n * @param {string} line - Line to normalize\n * @returns {string} Normalized line\n */\nfunction normalizeLine(line) {\n\treturn line.trim().replace(/^#/, '').trim();\n}\n\n/**\n * Checks if a line is task-related (tasks.json or tasks/)\n * @param {string} line - Line to check\n * @returns {boolean} True if line is task-related\n */\nfunction isTaskLine(line) {\n\tconst normalized = normalizeLine(line);\n\treturn normalized === TASK_JSON_PATTERN || normalized === TASK_DIR_PATTERN;\n}\n\n/**\n * Adjusts task-related lines in template based on storage preference\n * @param {string[]} templateLines - Array of template lines\n * @param {boolean} storeTasksInGit - Whether to comment out task lines\n * @returns {string[]} Adjusted template lines\n */\nfunction adjustTaskLinesInTemplate(templateLines, storeTasksInGit) {\n\treturn templateLines.map((line) => {\n\t\tif (isTaskLine(line)) {\n\t\t\tconst normalized = normalizeLine(line);\n\t\t\t// Preserve original trailing whitespace from the line\n\t\t\tconst originalTrailingSpace = line.match(/\\s*$/)[0];\n\t\t\treturn storeTasksInGit\n\t\t\t\t? `# ${normalized}${originalTrailingSpace}`\n\t\t\t\t: `${normalized}${originalTrailingSpace}`;\n\t\t}\n\t\treturn line;\n\t});\n}\n\n/**\n * Removes existing task files section from content\n * @param {string[]} existingLines - Existing file lines\n * @returns {string[]} Lines with task section removed\n */\nfunction removeExistingTaskSection(existingLines) {\n\tconst cleanedLines = [];\n\tlet inTaskSection = false;\n\n\tfor (const line of existingLines) {\n\t\t// Start of task files section\n\t\tif (line.trim() === TASK_FILES_COMMENT) {\n\t\t\tinTaskSection = true;\n\t\t\tcontinue;\n\t\t}\n\n\t\t// Task lines (commented or not)\n\t\tif (isTaskLine(line)) {\n\t\t\tcontinue;\n\t\t}\n\n\t\t// Empty lines within task section\n\t\tif (inTaskSection && !line.trim()) {\n\t\t\tcontinue;\n\t\t}\n\n\t\t// End of task section (any non-empty, non-task line)\n\t\tif (inTaskSection && line.trim() && !isTaskLine(line)) {\n\t\t\tinTaskSection = false;\n\t\t}\n\n\t\t// Keep all other lines\n\t\tif (!inTaskSection) {\n\t\t\tcleanedLines.push(line);\n\t\t}\n\t}\n\n\treturn cleanedLines;\n}\n\n/**\n * Filters template lines to only include new content not already present\n * @param {string[]} templateLines - Template lines\n * @param {Set<string>} existingLinesSet - Set of existing trimmed lines\n * @returns {string[]} New lines to add\n */\nfunction filterNewTemplateLines(templateLines, existingLinesSet) {\n\treturn templateLines.filter((line) => {\n\t\tconst trimmed = line.trim();\n\t\tif (!trimmed) return false;\n\n\t\t// Skip task-related lines (handled separately)\n\t\tif (isTaskLine(line) || trimmed === TASK_FILES_COMMENT) {\n\t\t\treturn false;\n\t\t}\n\n\t\t// Include only if not already present\n\t\treturn !existingLinesSet.has(trimmed);\n\t});\n}\n\n/**\n * Builds the task files section based on storage preference\n * @param {boolean} storeTasksInGit - Whether to comment out task lines\n * @returns {string[]} Task files section lines\n */\nfunction buildTaskFilesSection(storeTasksInGit) {\n\tconst section = [TASK_FILES_COMMENT];\n\n\tif (storeTasksInGit) {\n\t\tsection.push(`# ${TASK_JSON_PATTERN}`, `# ${TASK_DIR_PATTERN} `);\n\t} else {\n\t\tsection.push(TASK_JSON_PATTERN, `${TASK_DIR_PATTERN} `);\n\t}\n\n\treturn section;\n}\n\n/**\n * Adds a separator line if needed (avoids double spacing)\n * @param {string[]} lines - Current lines array\n */\nfunction addSeparatorIfNeeded(lines) {\n\tif (lines.some((line) => line.trim())) {\n\t\tconst lastLine = lines[lines.length - 1];\n\t\tif (lastLine && lastLine.trim()) {\n\t\t\tlines.push('');\n\t\t}\n\t}\n}\n\n/**\n * Validates input parameters\n * @param {string} targetPath - Path to .gitignore file\n * @param {string} content - Template content\n * @param {boolean} storeTasksInGit - Storage preference\n * @throws {Error} If validation fails\n */\nfunction validateInputs(targetPath, content, storeTasksInGit) {\n\tif (!targetPath || typeof targetPath !== 'string') {\n\t\tthrow new Error('targetPath must be a non-empty string');\n\t}\n\n\tif (!targetPath.endsWith('.gitignore')) {\n\t\tthrow new Error('targetPath must end with .gitignore');\n\t}\n\n\tif (!content || typeof content !== 'string') {\n\t\tthrow new Error('content must be a non-empty string');\n\t}\n\n\tif (typeof storeTasksInGit !== 'boolean') {\n\t\tthrow new Error('storeTasksInGit must be a boolean');\n\t}\n}\n\n/**\n * Creates a new .gitignore file from template\n * @param {string} targetPath - Path to create file at\n * @param {string[]} templateLines - Adjusted template lines\n * @param {function} log - Logging function\n */\nfunction createNewGitignoreFile(targetPath, templateLines, log) {\n\ttry {\n\t\tfs.writeFileSync(targetPath, templateLines.join('\\n') + '\\n');\n\t\tif (typeof log === 'function') {\n\t\t\tlog('success', `Created ${targetPath} with full template`);\n\t\t}\n\t} catch (error) {\n\t\tif (typeof log === 'function') {\n\t\t\tlog('error', `Failed to create ${targetPath}: ${error.message}`);\n\t\t}\n\t\tthrow error;\n\t}\n}\n\n/**\n * Merges template content with existing .gitignore file\n * @param {string} targetPath - Path to existing file\n * @param {string[]} templateLines - Adjusted template lines\n * @param {boolean} storeTasksInGit - Storage preference\n * @param {function} log - Logging function\n */\nfunction mergeWithExistingFile(\n\ttargetPath,\n\ttemplateLines,\n\tstoreTasksInGit,\n\tlog\n) {\n\ttry {\n\t\t// Read and process existing file\n\t\tconst existingContent = fs.readFileSync(targetPath, 'utf8');\n\t\tconst existingLines = existingContent.split('\\n');\n\n\t\t// Remove existing task section\n\t\tconst cleanedExistingLines = removeExistingTaskSection(existingLines);\n\n\t\t// Find new template lines to add\n\t\tconst existingLinesSet = new Set(\n\t\t\tcleanedExistingLines.map((line) => line.trim()).filter((line) => line)\n\t\t);\n\t\tconst newLines = filterNewTemplateLines(templateLines, existingLinesSet);\n\n\t\t// Build final content\n\t\tconst finalLines = [...cleanedExistingLines];\n\n\t\t// Add new template content\n\t\tif (newLines.length > 0) {\n\t\t\taddSeparatorIfNeeded(finalLines);\n\t\t\tfinalLines.push(...newLines);\n\t\t}\n\n\t\t// Add task files section\n\t\taddSeparatorIfNeeded(finalLines);\n\t\tfinalLines.push(...buildTaskFilesSection(storeTasksInGit));\n\n\t\t// Write result\n\t\tfs.writeFileSync(targetPath, finalLines.join('\\n') + '\\n');\n\n\t\tif (typeof log === 'function') {\n\t\t\tconst hasNewContent =\n\t\t\t\tnewLines.length > 0 ? ' and merged new content' : '';\n\t\t\tlog(\n\t\t\t\t'success',\n\t\t\t\t`Updated ${targetPath} according to user preference${hasNewContent}`\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tif (typeof log === 'function') {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`Failed to merge content with ${targetPath}: ${error.message}`\n\t\t\t);\n\t\t}\n\t\tthrow error;\n\t}\n}\n\n/**\n * Manages .gitignore file creation and updates with task file preferences\n * @param {string} targetPath - Path to the .gitignore file\n * @param {string} content - Template content for .gitignore\n * @param {boolean} storeTasksInGit - Whether to store tasks in git or not\n * @param {function} log - Logging function (level, message)\n * @throws {Error} If validation or file operations fail\n */\nfunction manageGitignoreFile(\n\ttargetPath,\n\tcontent,\n\tstoreTasksInGit = true,\n\tlog = null\n) {\n\t// Validate inputs\n\tvalidateInputs(targetPath, content, storeTasksInGit);\n\n\t// Process template with task preference\n\tconst templateLines = content.split('\\n');\n\tconst adjustedTemplateLines = adjustTaskLinesInTemplate(\n\t\ttemplateLines,\n\t\tstoreTasksInGit\n\t);\n\n\t// Handle file creation or merging\n\tif (!fs.existsSync(targetPath)) {\n\t\tcreateNewGitignoreFile(targetPath, adjustedTemplateLines, log);\n\t} else {\n\t\tmergeWithExistingFile(\n\t\t\ttargetPath,\n\t\t\tadjustedTemplateLines,\n\t\t\tstoreTasksInGit,\n\t\t\tlog\n\t\t);\n\t}\n}\n\nexport default manageGitignoreFile;\nexport {\n\tmanageGitignoreFile,\n\tnormalizeLine,\n\tisTaskLine,\n\tbuildTaskFilesSection,\n\tTASK_FILES_COMMENT,\n\tTASK_JSON_PATTERN,\n\tTASK_DIR_PATTERN\n};\n"], ["/claude-task-master/scripts/modules/utils/git-utils.js", "/**\n * git-utils.js\n * Git integration utilities for Task Master\n * Uses raw git commands and gh CLI for operations\n * MCP-friendly: All functions require projectRoot parameter\n */\n\nimport { exec, execSync } from 'child_process';\nimport { promisify } from 'util';\nimport path from 'path';\nimport fs from 'fs';\n\nconst execAsync = promisify(exec);\n\n/**\n * Check if the specified directory is inside a git repository\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<boolean>} True if inside a git repository\n */\nasync function isGitRepository(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for isGitRepository');\n\t}\n\n\ttry {\n\t\tawait execAsync('git rev-parse --git-dir', { cwd: projectRoot });\n\t\treturn true;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Get the current git branch name\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string|null>} Current branch name or null if not in git repo\n */\nasync function getCurrentBranch(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getCurrentBranch');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync('git rev-parse --abbrev-ref HEAD', {\n\t\t\tcwd: projectRoot\n\t\t});\n\t\treturn stdout.trim();\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Get list of all local git branches\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string[]>} Array of branch names\n */\nasync function getLocalBranches(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getLocalBranches');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync(\n\t\t\t'git branch --format=\"%(refname:short)\"',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn stdout\n\t\t\t.trim()\n\t\t\t.split('\\n')\n\t\t\t.filter((branch) => branch.length > 0)\n\t\t\t.map((branch) => branch.trim());\n\t} catch (error) {\n\t\treturn [];\n\t}\n}\n\n/**\n * Get list of all remote branches\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string[]>} Array of remote branch names (without remote prefix)\n */\nasync function getRemoteBranches(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getRemoteBranches');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync(\n\t\t\t'git branch -r --format=\"%(refname:short)\"',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn stdout\n\t\t\t.trim()\n\t\t\t.split('\\n')\n\t\t\t.filter((branch) => branch.length > 0 && !branch.includes('HEAD'))\n\t\t\t.map((branch) => branch.replace(/^origin\\//, '').trim());\n\t} catch (error) {\n\t\treturn [];\n\t}\n}\n\n/**\n * Check if gh CLI is available and authenticated\n * @param {string} [projectRoot] - Directory context (optional for this check)\n * @returns {Promise<boolean>} True if gh CLI is available and authenticated\n */\nasync function isGhCliAvailable(projectRoot = null) {\n\ttry {\n\t\tconst options = projectRoot ? { cwd: projectRoot } : {};\n\t\tawait execAsync('gh auth status', options);\n\t\treturn true;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Get GitHub repository information using gh CLI\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<Object|null>} Repository info or null if not available\n */\nasync function getGitHubRepoInfo(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getGitHubRepoInfo');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync(\n\t\t\t'gh repo view --json name,owner,defaultBranchRef',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn JSON.parse(stdout);\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Sanitize branch name to be a valid tag name\n * @param {string} branchName - Git branch name\n * @returns {string} Sanitized tag name\n */\nfunction sanitizeBranchNameForTag(branchName) {\n\tif (!branchName || typeof branchName !== 'string') {\n\t\treturn 'unknown-branch';\n\t}\n\n\t// Replace invalid characters with hyphens and clean up\n\treturn branchName\n\t\t.replace(/[^a-zA-Z0-9_-]/g, '-') // Replace invalid chars with hyphens\n\t\t.replace(/^-+|-+$/g, '') // Remove leading/trailing hyphens\n\t\t.replace(/-+/g, '-') // Collapse multiple hyphens\n\t\t.toLowerCase() // Convert to lowercase\n\t\t.substring(0, 50); // Limit length\n}\n\n/**\n * Check if a branch name would create a valid tag name\n * @param {string} branchName - Git branch name\n * @returns {boolean} True if branch name can be converted to valid tag\n */\nfunction isValidBranchForTag(branchName) {\n\tif (!branchName || typeof branchName !== 'string') {\n\t\treturn false;\n\t}\n\n\t// Check if it's a reserved branch name that shouldn't become tags\n\tconst reservedBranches = ['main', 'master', 'develop', 'dev', 'HEAD'];\n\tif (reservedBranches.includes(branchName.toLowerCase())) {\n\t\treturn false;\n\t}\n\n\t// Check if sanitized name would be meaningful\n\tconst sanitized = sanitizeBranchNameForTag(branchName);\n\treturn sanitized.length > 0 && sanitized !== 'unknown-branch';\n}\n\n/**\n * Get git repository root directory\n * @param {string} projectRoot - Directory to start search from (required)\n * @returns {Promise<string|null>} Git repository root path or null\n */\nasync function getGitRepositoryRoot(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getGitRepositoryRoot');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync('git rev-parse --show-toplevel', {\n\t\t\tcwd: projectRoot\n\t\t});\n\t\treturn stdout.trim();\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Check if specified directory is the git repository root\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<boolean>} True if directory is git root\n */\nasync function isGitRepositoryRoot(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for isGitRepositoryRoot');\n\t}\n\n\ttry {\n\t\tconst gitRoot = await getGitRepositoryRoot(projectRoot);\n\t\treturn gitRoot && path.resolve(gitRoot) === path.resolve(projectRoot);\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Get the default branch name for the repository\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string|null>} Default branch name or null\n */\nasync function getDefaultBranch(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getDefaultBranch');\n\t}\n\n\ttry {\n\t\t// Try to get from GitHub first (if gh CLI is available)\n\t\tif (await isGhCliAvailable(projectRoot)) {\n\t\t\tconst repoInfo = await getGitHubRepoInfo(projectRoot);\n\t\t\tif (repoInfo && repoInfo.defaultBranchRef) {\n\t\t\t\treturn repoInfo.defaultBranchRef.name;\n\t\t\t}\n\t\t}\n\n\t\t// Fallback to git remote info\n\t\tconst { stdout } = await execAsync(\n\t\t\t'git symbolic-ref refs/remotes/origin/HEAD',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn stdout.replace('refs/remotes/origin/', '').trim();\n\t} catch (error) {\n\t\t// Final fallback - common default branch names\n\t\tconst commonDefaults = ['main', 'master'];\n\t\tconst branches = await getLocalBranches(projectRoot);\n\n\t\tfor (const defaultName of commonDefaults) {\n\t\t\tif (branches.includes(defaultName)) {\n\t\t\t\treturn defaultName;\n\t\t\t}\n\t\t}\n\n\t\treturn null;\n\t}\n}\n\n/**\n * Check if we're currently on the default branch\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<boolean>} True if on default branch\n */\nasync function isOnDefaultBranch(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for isOnDefaultBranch');\n\t}\n\n\ttry {\n\t\tconst currentBranch = await getCurrentBranch(projectRoot);\n\t\tconst defaultBranch = await getDefaultBranch(projectRoot);\n\t\treturn currentBranch && defaultBranch && currentBranch === defaultBranch;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Check and automatically switch tags based on git branch if enabled\n * This runs automatically during task operations, similar to migration\n * @param {string} projectRoot - Project root directory (required)\n * @param {string} tasksPath - Path to tasks.json file\n * @returns {Promise<void>}\n */\nasync function checkAndAutoSwitchGitTag(projectRoot, tasksPath) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for checkAndAutoSwitchGitTag');\n\t}\n\n\t// DISABLED: Automatic git workflow is too rigid and opinionated\n\t// Users should explicitly use git-tag commands if they want integration\n\treturn;\n}\n\n/**\n * Synchronous version of git tag checking and switching\n * This runs during readJSON to ensure git integration happens BEFORE tag resolution\n * @param {string} projectRoot - Project root directory (required)\n * @param {string} tasksPath - Path to tasks.json file\n * @returns {void}\n */\nfunction checkAndAutoSwitchGitTagSync(projectRoot, tasksPath) {\n\tif (!projectRoot) {\n\t\treturn; // Can't proceed without project root\n\t}\n\n\t// DISABLED: Automatic git workflow is too rigid and opinionated\n\t// Users should explicitly use git-tag commands if they want integration\n\treturn;\n}\n\n/**\n * Synchronous check if directory is in a git repository\n * @param {string} projectRoot - Directory to check (required)\n * @returns {boolean} True if inside a git repository\n */\nfunction isGitRepositorySync(projectRoot) {\n\tif (!projectRoot) {\n\t\treturn false;\n\t}\n\n\ttry {\n\t\texecSync('git rev-parse --git-dir', {\n\t\t\tcwd: projectRoot,\n\t\t\tstdio: 'ignore' // Suppress output\n\t\t});\n\t\treturn true;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Synchronous get current git branch name\n * @param {string} projectRoot - Directory to check (required)\n * @returns {string|null} Current branch name or null if not in git repo\n */\nfunction getCurrentBranchSync(projectRoot) {\n\tif (!projectRoot) {\n\t\treturn null;\n\t}\n\n\ttry {\n\t\tconst stdout = execSync('git rev-parse --abbrev-ref HEAD', {\n\t\t\tcwd: projectRoot,\n\t\t\tencoding: 'utf8'\n\t\t});\n\t\treturn stdout.trim();\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Check if the current working directory is inside a Git work-tree.\n * Uses `git rev-parse --is-inside-work-tree` which is more specific than --git-dir\n * for detecting work-trees (excludes bare repos and .git directories).\n * This is ideal for preventing accidental git init in existing work-trees.\n * @returns {boolean} True if inside a Git work-tree, false otherwise.\n */\nfunction insideGitWorkTree() {\n\ttry {\n\t\texecSync('git rev-parse --is-inside-work-tree', {\n\t\t\tstdio: 'ignore',\n\t\t\tcwd: process.cwd()\n\t\t});\n\t\treturn true;\n\t} catch {\n\t\treturn false;\n\t}\n}\n\n// Export all functions\nexport {\n\tisGitRepository,\n\tgetCurrentBranch,\n\tgetLocalBranches,\n\tgetRemoteBranches,\n\tisGhCliAvailable,\n\tgetGitHubRepoInfo,\n\tsanitizeBranchNameForTag,\n\tisValidBranchForTag,\n\tgetGitRepositoryRoot,\n\tisGitRepositoryRoot,\n\tgetDefaultBranch,\n\tisOnDefaultBranch,\n\tcheckAndAutoSwitchGitTag,\n\tcheckAndAutoSwitchGitTagSync,\n\tisGitRepositorySync,\n\tgetCurrentBranchSync,\n\tinsideGitWorkTree\n};\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/delete-tag.js", "/**\n * delete-tag.js\n * Direct function implementation for deleting a tag\n */\n\nimport { deleteTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for deleting a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.name - Name of the tag to delete\n * @param {boolean} [args.yes=false] - Skip confirmation prompts\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function deleteTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, name, yes = false, projectRoot } = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('deleteTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!name || typeof name !== 'string') {\n\t\t\tlog.error('Missing required parameter: name');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Deleting tag: ${name}`);\n\n\t\t// Prepare options\n\t\tconst options = {\n\t\t\tyes // For MCP, we always skip confirmation prompts\n\t\t};\n\n\t\t// Call the deleteTag function\n\t\tconst result = await deleteTag(\n\t\t\ttasksJsonPath,\n\t\t\tname,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttagName: result.tagName,\n\t\t\t\tdeleted: result.deleted,\n\t\t\t\ttasksDeleted: result.tasksDeleted,\n\t\t\t\twasCurrentTag: result.wasCurrentTag,\n\t\t\t\tswitchedToMaster: result.switchedToMaster,\n\t\t\t\tmessage: `Successfully deleted tag \"${result.tagName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in deleteTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'DELETE_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/rules.js", "/**\n * rules.js\n * Direct function implementation for adding or removing rules\n */\n\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport {\n\tconvertAllRulesToProfileRules,\n\tremoveProfileRules,\n\tgetRulesProfile,\n\tisValidProfile\n} from '../../../../src/utils/rule-transformer.js';\nimport { RULE_PROFILES } from '../../../../src/constants/profiles.js';\nimport { RULES_ACTIONS } from '../../../../src/constants/rules-actions.js';\nimport {\n\twouldRemovalLeaveNoProfiles,\n\tgetInstalledProfiles\n} from '../../../../src/utils/profiles.js';\nimport path from 'path';\nimport fs from 'fs';\n\n/**\n * Direct function wrapper for adding or removing rules.\n * @param {Object} args - Command arguments\n * @param {\"add\"|\"remove\"} args.action - Action to perform: add or remove rules\n * @param {string[]} args.profiles - List of profiles to add or remove\n * @param {string} args.projectRoot - Absolute path to the project root\n * @param {boolean} [args.yes=true] - Run non-interactively\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function rulesDirect(args, log, context = {}) {\n\tenableSilentMode();\n\ttry {\n\t\tconst { action, profiles, projectRoot, yes, force } = args;\n\t\tif (\n\t\t\t!action ||\n\t\t\t!Array.isArray(profiles) ||\n\t\t\tprofiles.length === 0 ||\n\t\t\t!projectRoot\n\t\t) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'action, profiles, and projectRoot are required.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst removalResults = [];\n\t\tconst addResults = [];\n\n\t\tif (action === RULES_ACTIONS.REMOVE) {\n\t\t\t// Safety check: Ensure this won't remove all rule profiles (unless forced)\n\t\t\tif (!force && wouldRemovalLeaveNoProfiles(projectRoot, profiles)) {\n\t\t\t\tconst installedProfiles = getInstalledProfiles(projectRoot);\n\t\t\t\tconst remainingProfiles = installedProfiles.filter(\n\t\t\t\t\t(profile) => !profiles.includes(profile)\n\t\t\t\t);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'CRITICAL_REMOVAL_BLOCKED',\n\t\t\t\t\t\tmessage: `CRITICAL: This operation would remove ALL remaining rule profiles (${profiles.join(', ')}), leaving your project with no rules configurations. This could significantly impact functionality. Currently installed profiles: ${installedProfiles.join(', ')}. If you're certain you want to proceed, set force: true or use the CLI with --force flag.`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tfor (const profile of profiles) {\n\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\tremovalResults.push({\n\t\t\t\t\t\tprofileName: profile,\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: `The requested rule profile for '${profile}' is unavailable. Supported profiles are: ${RULE_PROFILES.join(', ')}.`\n\t\t\t\t\t});\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\t\t\t\tconst result = removeProfileRules(projectRoot, profileConfig);\n\t\t\t\tremovalResults.push(result);\n\t\t\t}\n\t\t\tconst successes = removalResults\n\t\t\t\t.filter((r) => r.success)\n\t\t\t\t.map((r) => r.profileName);\n\t\t\tconst skipped = removalResults\n\t\t\t\t.filter((r) => r.skipped)\n\t\t\t\t.map((r) => r.profileName);\n\t\t\tconst errors = removalResults.filter(\n\t\t\t\t(r) => r.error && !r.success && !r.skipped\n\t\t\t);\n\t\t\tconst withNotices = removalResults.filter((r) => r.notice);\n\n\t\t\tlet summary = '';\n\t\t\tif (successes.length > 0) {\n\t\t\t\tsummary += `Successfully removed Task Master rules: ${successes.join(', ')}.`;\n\t\t\t}\n\t\t\tif (skipped.length > 0) {\n\t\t\t\tsummary += `Skipped (default or protected): ${skipped.join(', ')}.`;\n\t\t\t}\n\t\t\tif (errors.length > 0) {\n\t\t\t\tsummary += errors\n\t\t\t\t\t.map((r) => `Error removing ${r.profileName}: ${r.error}`)\n\t\t\t\t\t.join(' ');\n\t\t\t}\n\t\t\tif (withNotices.length > 0) {\n\t\t\t\tsummary += ` Notices: ${withNotices.map((r) => `${r.profileName} - ${r.notice}`).join('; ')}.`;\n\t\t\t}\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: errors.length === 0,\n\t\t\t\tdata: { summary, results: removalResults }\n\t\t\t};\n\t\t} else if (action === RULES_ACTIONS.ADD) {\n\t\t\tfor (const profile of profiles) {\n\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\taddResults.push({\n\t\t\t\t\t\tprofileName: profile,\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: `Profile not found: static import missing for '${profile}'. Valid profiles: ${RULE_PROFILES.join(', ')}`\n\t\t\t\t\t});\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\t\t\t\tconst { success, failed } = convertAllRulesToProfileRules(\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tprofileConfig\n\t\t\t\t);\n\n\t\t\t\t// Determine paths\n\t\t\t\tconst rulesDir = profileConfig.rulesDir;\n\t\t\t\tconst profileRulesDir = path.join(projectRoot, rulesDir);\n\t\t\t\tconst profileDir = profileConfig.profileDir;\n\t\t\t\tconst mcpConfig = profileConfig.mcpConfig !== false;\n\t\t\t\tconst mcpPath =\n\t\t\t\t\tmcpConfig && profileConfig.mcpConfigPath\n\t\t\t\t\t\t? path.join(projectRoot, profileConfig.mcpConfigPath)\n\t\t\t\t\t\t: null;\n\n\t\t\t\t// Check what was created\n\t\t\t\tconst mcpConfigCreated =\n\t\t\t\t\tmcpConfig && mcpPath ? fs.existsSync(mcpPath) : undefined;\n\t\t\t\tconst rulesDirCreated = fs.existsSync(profileRulesDir);\n\t\t\t\tconst profileFolderCreated = fs.existsSync(\n\t\t\t\t\tpath.join(projectRoot, profileDir)\n\t\t\t\t);\n\n\t\t\t\tconst error =\n\t\t\t\t\tfailed > 0 ? `${failed} rule files failed to convert.` : null;\n\t\t\t\tconst resultObj = {\n\t\t\t\t\tprofileName: profile,\n\t\t\t\t\tmcpConfigCreated,\n\t\t\t\t\trulesDirCreated,\n\t\t\t\t\tprofileFolderCreated,\n\t\t\t\t\tskipped: false,\n\t\t\t\t\terror,\n\t\t\t\t\tsuccess:\n\t\t\t\t\t\t(mcpConfig ? mcpConfigCreated : true) &&\n\t\t\t\t\t\trulesDirCreated &&\n\t\t\t\t\t\tsuccess > 0 &&\n\t\t\t\t\t\t!error\n\t\t\t\t};\n\t\t\t\taddResults.push(resultObj);\n\t\t\t}\n\n\t\t\tconst successes = addResults\n\t\t\t\t.filter((r) => r.success)\n\t\t\t\t.map((r) => r.profileName);\n\t\t\tconst errors = addResults.filter((r) => r.error && !r.success);\n\n\t\t\tlet summary = '';\n\t\t\tif (successes.length > 0) {\n\t\t\t\tsummary += `Successfully added rules: ${successes.join(', ')}.`;\n\t\t\t}\n\t\t\tif (errors.length > 0) {\n\t\t\t\tsummary += errors\n\t\t\t\t\t.map((r) => ` Error adding ${r.profileName}: ${r.error}`)\n\t\t\t\t\t.join(' ');\n\t\t\t}\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: errors.length === 0,\n\t\t\t\tdata: { summary, results: addResults }\n\t\t\t};\n\t\t} else {\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_ACTION',\n\t\t\t\t\tmessage: `Unknown action. Use \"${RULES_ACTIONS.ADD}\" or \"${RULES_ACTIONS.REMOVE}\".`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tdisableSilentMode();\n\t\tlog.error(`[rulesDirect] Error: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'RULES_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/use-tag.js", "/**\n * use-tag.js\n * Direct function implementation for switching to a tag\n */\n\nimport { useTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for switching to a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.name - Name of the tag to switch to\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function useTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, name, projectRoot } = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('useTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!name || typeof name !== 'string') {\n\t\t\tlog.error('Missing required parameter: name');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Switching to tag: ${name}`);\n\n\t\t// Call the useTag function\n\t\tconst result = await useTag(\n\t\t\ttasksJsonPath,\n\t\t\tname,\n\t\t\t{}, // options (empty for now)\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttagName: result.currentTag,\n\t\t\t\tswitched: result.switched,\n\t\t\t\tpreviousTag: result.previousTag,\n\t\t\t\ttaskCount: result.taskCount,\n\t\t\t\tmessage: `Successfully switched to tag \"${result.currentTag}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in useTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'USE_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/get-tasks.js", "/**\n * tools/get-tasks.js\n * Tool to get all tasks from Task Master\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { listTasksDirect } from '../core/task-master-core.js';\nimport {\n\tresolveTasksPath,\n\tresolveComplexityReportPath\n} from '../core/utils/path-utils.js';\n\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the getTasks tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerListTasksTool(server) {\n\tserver.addTool({\n\t\tname: 'get_tasks',\n\t\tdescription:\n\t\t\t'Get all tasks from Task Master, optionally filtering by status and including subtasks.',\n\t\tparameters: z.object({\n\t\t\tstatus: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Filter tasks by status (e.g., 'pending', 'done') or multiple statuses separated by commas (e.g., 'blocked,deferred')\"\n\t\t\t\t),\n\t\t\twithSubtasks: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Include subtasks nested within their parent tasks in the response'\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the tasks file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Getting tasks with filters: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Resolve the path to tasks.json using new path utilities\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = resolveTasksPath(args, log);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Resolve the path to complexity report\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = resolveComplexityReportPath(\n\t\t\t\t\t\t{ ...args, tag: resolvedTag },\n\t\t\t\t\t\tsession\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t\t// This is optional, so we don't fail the operation\n\t\t\t\t\tcomplexityReportPath = null;\n\t\t\t\t}\n\n\t\t\t\tconst result = await listTasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tstatus: args.status,\n\t\t\t\t\t\twithSubtasks: args.withSubtasks,\n\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tlog.info(\n\t\t\t\t\t`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error getting tasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error getting tasks: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n\n// We no longer need the formatTasksResponse function as we're returning raw JSON data\n"], ["/claude-task-master/src/profiles/amp.js", "// Amp profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to Amp format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed Amp configuration object\n */\nfunction transformToAmpFormat(mcpConfig) {\n\tconst ampConfig = {};\n\n\t// Transform mcpServers to amp.mcpServers\n\tif (mcpConfig.mcpServers) {\n\t\tampConfig['amp.mcpServers'] = mcpConfig.mcpServers;\n\t}\n\n\t// Preserve any other existing settings\n\tfor (const [key, value] of Object.entries(mcpConfig)) {\n\t\tif (key !== 'mcpServers') {\n\t\t\tampConfig[key] = value;\n\t\t}\n\t}\n\n\treturn ampConfig;\n}\n\n// Lifecycle functions for Amp profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// Handle AGENT.md import for non-destructive integration (Amp uses AGENT.md, copies from AGENTS.md)\n\tconst sourceFile = path.join(assetsDir, 'AGENTS.md');\n\tconst userAgentFile = path.join(targetDir, 'AGENT.md');\n\tconst taskMasterAgentFile = path.join(targetDir, '.taskmaster', 'AGENT.md');\n\tconst importLine = '@./.taskmaster/AGENT.md';\n\tconst importSection = `\\n## Task Master AI Instructions\\n**Import Task Master's development workflow commands and guidelines, treat as if import is in the main AGENT.md file.**\\n${importLine}`;\n\n\tif (fs.existsSync(sourceFile)) {\n\t\ttry {\n\t\t\t// Ensure .taskmaster directory exists\n\t\t\tconst taskMasterDir = path.join(targetDir, '.taskmaster');\n\t\t\tif (!fs.existsSync(taskMasterDir)) {\n\t\t\t\tfs.mkdirSync(taskMasterDir, { recursive: true });\n\t\t\t}\n\n\t\t\t// Copy Task Master instructions to .taskmaster/AGENT.md\n\t\t\tfs.copyFileSync(sourceFile, taskMasterAgentFile);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Amp] Created Task Master instructions at ${taskMasterAgentFile}`\n\t\t\t);\n\n\t\t\t// Handle user's AGENT.md\n\t\t\tif (fs.existsSync(userAgentFile)) {\n\t\t\t\t// Check if import already exists\n\t\t\t\tconst content = fs.readFileSync(userAgentFile, 'utf8');\n\t\t\t\tif (!content.includes(importLine)) {\n\t\t\t\t\t// Append import section at the end\n\t\t\t\t\tconst updatedContent = content.trim() + '\\n' + importSection + '\\n';\n\t\t\t\t\tfs.writeFileSync(userAgentFile, updatedContent);\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Amp] Added Task Master import to existing ${userAgentFile}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Amp] Task Master import already present in ${userAgentFile}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Create minimal AGENT.md with the import section\n\t\t\t\tconst minimalContent = `# Amp Instructions\\n${importSection}\\n`;\n\t\t\t\tfs.writeFileSync(userAgentFile, minimalContent);\n\t\t\t\tlog('info', `[Amp] Created ${userAgentFile} with Task Master import`);\n\t\t\t}\n\t\t} catch (err) {\n\t\t\tlog('error', `[Amp] Failed to set up Amp instructions: ${err.message}`);\n\t\t}\n\t}\n\n\t// MCP transformation will be handled in onPostConvertRulesProfile\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\t// Clean up AGENT.md import (Amp uses AGENT.md, not AGENTS.md)\n\tconst userAgentFile = path.join(targetDir, 'AGENT.md');\n\tconst taskMasterAgentFile = path.join(targetDir, '.taskmaster', 'AGENT.md');\n\tconst importLine = '@./.taskmaster/AGENT.md';\n\n\ttry {\n\t\t// Remove Task Master AGENT.md from .taskmaster\n\t\tif (fs.existsSync(taskMasterAgentFile)) {\n\t\t\tfs.rmSync(taskMasterAgentFile, { force: true });\n\t\t\tlog('debug', `[Amp] Removed ${taskMasterAgentFile}`);\n\t\t}\n\n\t\t// Clean up import from user's AGENT.md\n\t\tif (fs.existsSync(userAgentFile)) {\n\t\t\tconst content = fs.readFileSync(userAgentFile, 'utf8');\n\t\t\tconst lines = content.split('\\n');\n\t\t\tconst filteredLines = [];\n\t\t\tlet skipNextLines = 0;\n\n\t\t\t// Remove the Task Master section\n\t\t\tfor (let i = 0; i < lines.length; i++) {\n\t\t\t\tif (skipNextLines > 0) {\n\t\t\t\t\tskipNextLines--;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Check if this is the start of our Task Master section\n\t\t\t\tif (lines[i].includes('## Task Master AI Instructions')) {\n\t\t\t\t\t// Skip this line and the next two lines (bold text and import)\n\t\t\t\t\tskipNextLines = 2;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Also remove standalone import lines (for backward compatibility)\n\t\t\t\tif (lines[i].trim() === importLine) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tfilteredLines.push(lines[i]);\n\t\t\t}\n\n\t\t\t// Join back and clean up excessive newlines\n\t\t\tlet updatedContent = filteredLines\n\t\t\t\t.join('\\n')\n\t\t\t\t.replace(/\\n{3,}/g, '\\n\\n')\n\t\t\t\t.trim();\n\n\t\t\t// Check if file only contained our minimal template\n\t\t\tif (updatedContent === '# Amp Instructions' || updatedContent === '') {\n\t\t\t\t// File only contained our import, remove it\n\t\t\t\tfs.rmSync(userAgentFile, { force: true });\n\t\t\t\tlog('debug', `[Amp] Removed empty ${userAgentFile}`);\n\t\t\t} else {\n\t\t\t\t// Write back without the import\n\t\t\t\tfs.writeFileSync(userAgentFile, updatedContent + '\\n');\n\t\t\t\tlog('debug', `[Amp] Removed Task Master import from ${userAgentFile}`);\n\t\t\t}\n\t\t}\n\t} catch (err) {\n\t\tlog('error', `[Amp] Failed to remove Amp instructions: ${err.message}`);\n\t}\n\n\t// MCP Removal: Remove amp.mcpServers section\n\tconst mcpConfigPath = path.join(targetDir, '.vscode', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Amp] No .vscode/settings.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the amp.mcpServers section and task-master-ai server\n\t\tif (\n\t\t\tconfig['amp.mcpServers'] &&\n\t\t\tconfig['amp.mcpServers']['task-master-ai']\n\t\t) {\n\t\t\t// Remove task-master-ai server\n\t\t\tdelete config['amp.mcpServers']['task-master-ai'];\n\n\t\t\t// Check if there are other MCP servers in amp.mcpServers\n\t\t\tconst remainingServers = Object.keys(config['amp.mcpServers']);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire amp.mcpServers section\n\t\t\t\tdelete config['amp.mcpServers'];\n\t\t\t\tlog('debug', '[Amp] Removed empty amp.mcpServers section');\n\t\t\t}\n\n\t\t\t// Check if config is now empty\n\t\t\tconst remainingKeys = Object.keys(config);\n\n\t\t\tif (remainingKeys.length === 0) {\n\t\t\t\t// Config is empty, remove entire file\n\t\t\t\tfs.rmSync(mcpConfigPath, { force: true });\n\t\t\t\tlog('info', '[Amp] Removed empty settings.json file');\n\n\t\t\t\t// Check if .vscode directory is empty\n\t\t\t\tconst vscodeDirPath = path.join(targetDir, '.vscode');\n\t\t\t\tif (fs.existsSync(vscodeDirPath)) {\n\t\t\t\t\tconst remainingContents = fs.readdirSync(vscodeDirPath);\n\t\t\t\t\tif (remainingContents.length === 0) {\n\t\t\t\t\t\tfs.rmSync(vscodeDirPath, { recursive: true, force: true });\n\t\t\t\t\t\tlog('debug', '[Amp] Removed empty .vscode directory');\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\tmcpConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, '\\t') + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[Amp] Removed TaskMaster from settings.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[Amp] TaskMaster not found in amp.mcpServers');\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `[Amp] Failed to clean up settings.json: ${error.message}`);\n\t}\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\t// Handle AGENT.md setup (same as onAddRulesProfile)\n\tonAddRulesProfile(targetDir, assetsDir);\n\n\t// Transform MCP config to Amp format\n\tconst mcpConfigPath = path.join(targetDir, '.vscode', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Amp] No .vscode/settings.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in Amp format (has amp.mcpServers)\n\t\tif (mcpConfig['amp.mcpServers']) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[Amp] settings.json already in Amp format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to Amp format\n\t\tconst ampConfig = transformToAmpFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\tmcpConfigPath,\n\t\t\tJSON.stringify(ampConfig, null, '\\t') + '\\n'\n\t\t);\n\n\t\tlog('info', '[Amp] Transformed settings.json to Amp format');\n\t\tlog('debug', '[Amp] Renamed mcpServers to amp.mcpServers');\n\t} catch (error) {\n\t\tlog('error', `[Amp] Failed to transform settings.json: ${error.message}`);\n\t}\n}\n\n// Create and export amp profile using the base factory\nexport const ampProfile = createProfile({\n\tname: 'amp',\n\tdisplayName: 'Amp',\n\turl: 'ampcode.com',\n\tdocsUrl: 'ampcode.com/manual',\n\tprofileDir: '.vscode',\n\trulesDir: '.',\n\tmcpConfig: true,\n\tmcpConfigName: 'settings.json',\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': '.taskmaster/AGENT.md'\n\t},\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/src/profiles/claude.js", "// Claude Code profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n// Helper function to recursively copy directory (adopted from Roo profile)\nfunction copyRecursiveSync(src, dest) {\n\tconst exists = fs.existsSync(src);\n\tconst stats = exists && fs.statSync(src);\n\tconst isDirectory = exists && stats.isDirectory();\n\tif (isDirectory) {\n\t\tif (!fs.existsSync(dest)) fs.mkdirSync(dest, { recursive: true });\n\t\tfs.readdirSync(src).forEach((childItemName) => {\n\t\t\tcopyRecursiveSync(\n\t\t\t\tpath.join(src, childItemName),\n\t\t\t\tpath.join(dest, childItemName)\n\t\t\t);\n\t\t});\n\t} else {\n\t\tfs.copyFileSync(src, dest);\n\t}\n}\n\n// Helper function to recursively remove directory\nfunction removeDirectoryRecursive(dirPath) {\n\tif (fs.existsSync(dirPath)) {\n\t\ttry {\n\t\t\tfs.rmSync(dirPath, { recursive: true, force: true });\n\t\t\treturn true;\n\t\t} catch (err) {\n\t\t\tlog('error', `Failed to remove directory ${dirPath}: ${err.message}`);\n\t\t\treturn false;\n\t\t}\n\t}\n\treturn true;\n}\n\n// Lifecycle functions for Claude Code profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// Copy .claude directory recursively\n\tconst claudeSourceDir = path.join(assetsDir, 'claude');\n\tconst claudeDestDir = path.join(targetDir, '.claude');\n\n\tif (!fs.existsSync(claudeSourceDir)) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Claude] Source directory does not exist: ${claudeSourceDir}`\n\t\t);\n\t\treturn;\n\t}\n\n\ttry {\n\t\tcopyRecursiveSync(claudeSourceDir, claudeDestDir);\n\t\tlog('debug', `[Claude] Copied .claude directory to ${claudeDestDir}`);\n\t} catch (err) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Claude] An error occurred during directory copy: ${err.message}`\n\t\t);\n\t}\n\n\t// Handle CLAUDE.md import for non-destructive integration\n\tconst sourceFile = path.join(assetsDir, 'AGENTS.md');\n\tconst userClaudeFile = path.join(targetDir, 'CLAUDE.md');\n\tconst taskMasterClaudeFile = path.join(targetDir, '.taskmaster', 'CLAUDE.md');\n\tconst importLine = '@./.taskmaster/CLAUDE.md';\n\tconst importSection = `\\n## Task Master AI Instructions\\n**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**\\n${importLine}`;\n\n\tif (fs.existsSync(sourceFile)) {\n\t\ttry {\n\t\t\t// Ensure .taskmaster directory exists\n\t\t\tconst taskMasterDir = path.join(targetDir, '.taskmaster');\n\t\t\tif (!fs.existsSync(taskMasterDir)) {\n\t\t\t\tfs.mkdirSync(taskMasterDir, { recursive: true });\n\t\t\t}\n\n\t\t\t// Copy Task Master instructions to .taskmaster/CLAUDE.md\n\t\t\tfs.copyFileSync(sourceFile, taskMasterClaudeFile);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Claude] Created Task Master instructions at ${taskMasterClaudeFile}`\n\t\t\t);\n\n\t\t\t// Handle user's CLAUDE.md\n\t\t\tif (fs.existsSync(userClaudeFile)) {\n\t\t\t\t// Check if import already exists\n\t\t\t\tconst content = fs.readFileSync(userClaudeFile, 'utf8');\n\t\t\t\tif (!content.includes(importLine)) {\n\t\t\t\t\t// Append import section at the end\n\t\t\t\t\tconst updatedContent = content.trim() + '\\n' + importSection + '\\n';\n\t\t\t\t\tfs.writeFileSync(userClaudeFile, updatedContent);\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Claude] Added Task Master import to existing ${userClaudeFile}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Claude] Task Master import already present in ${userClaudeFile}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Create minimal CLAUDE.md with the import section\n\t\t\t\tconst minimalContent = `# Claude Code Instructions\\n${importSection}\\n`;\n\t\t\t\tfs.writeFileSync(userClaudeFile, minimalContent);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`[Claude] Created ${userClaudeFile} with Task Master import`\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (err) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Claude] Failed to set up Claude instructions: ${err.message}`\n\t\t\t);\n\t\t}\n\t}\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\t// Remove .claude directory recursively\n\tconst claudeDir = path.join(targetDir, '.claude');\n\tif (removeDirectoryRecursive(claudeDir)) {\n\t\tlog('debug', `[Claude] Removed .claude directory from ${claudeDir}`);\n\t}\n\n\t// Clean up CLAUDE.md import\n\tconst userClaudeFile = path.join(targetDir, 'CLAUDE.md');\n\tconst taskMasterClaudeFile = path.join(targetDir, '.taskmaster', 'CLAUDE.md');\n\tconst importLine = '@./.taskmaster/CLAUDE.md';\n\n\ttry {\n\t\t// Remove Task Master CLAUDE.md from .taskmaster\n\t\tif (fs.existsSync(taskMasterClaudeFile)) {\n\t\t\tfs.rmSync(taskMasterClaudeFile, { force: true });\n\t\t\tlog('debug', `[Claude] Removed ${taskMasterClaudeFile}`);\n\t\t}\n\n\t\t// Clean up import from user's CLAUDE.md\n\t\tif (fs.existsSync(userClaudeFile)) {\n\t\t\tconst content = fs.readFileSync(userClaudeFile, 'utf8');\n\t\t\tconst lines = content.split('\\n');\n\t\t\tconst filteredLines = [];\n\t\t\tlet skipNextLines = 0;\n\n\t\t\t// Remove the Task Master section\n\t\t\tfor (let i = 0; i < lines.length; i++) {\n\t\t\t\tif (skipNextLines > 0) {\n\t\t\t\t\tskipNextLines--;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Check if this is the start of our Task Master section\n\t\t\t\tif (lines[i].includes('## Task Master AI Instructions')) {\n\t\t\t\t\t// Skip this line and the next two lines (bold text and import)\n\t\t\t\t\tskipNextLines = 2;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Also remove standalone import lines (for backward compatibility)\n\t\t\t\tif (lines[i].trim() === importLine) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tfilteredLines.push(lines[i]);\n\t\t\t}\n\n\t\t\t// Join back and clean up excessive newlines\n\t\t\tlet updatedContent = filteredLines\n\t\t\t\t.join('\\n')\n\t\t\t\t.replace(/\\n{3,}/g, '\\n\\n')\n\t\t\t\t.trim();\n\n\t\t\t// Check if file only contained our minimal template\n\t\t\tif (\n\t\t\t\tupdatedContent === '# Claude Code Instructions' ||\n\t\t\t\tupdatedContent === ''\n\t\t\t) {\n\t\t\t\t// File only contained our import, remove it\n\t\t\t\tfs.rmSync(userClaudeFile, { force: true });\n\t\t\t\tlog('debug', `[Claude] Removed empty ${userClaudeFile}`);\n\t\t\t} else {\n\t\t\t\t// Write back without the import\n\t\t\t\tfs.writeFileSync(userClaudeFile, updatedContent + '\\n');\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Claude] Removed Task Master import from ${userClaudeFile}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (err) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Claude] Failed to remove Claude instructions: ${err.message}`\n\t\t);\n\t}\n}\n\n/**\n * Transform standard MCP config format to Claude format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed Claude configuration object\n */\nfunction transformToClaudeFormat(mcpConfig) {\n\tconst claudeConfig = {};\n\n\t// Transform mcpServers to servers (keeping the same structure but adding type)\n\tif (mcpConfig.mcpServers) {\n\t\tclaudeConfig.mcpServers = {};\n\n\t\tfor (const [serverName, serverConfig] of Object.entries(\n\t\t\tmcpConfig.mcpServers\n\t\t)) {\n\t\t\t// Transform server configuration with type as first key\n\t\t\tconst reorderedServer = {};\n\n\t\t\t// Add type: \"stdio\" as the first key\n\t\t\treorderedServer.type = 'stdio';\n\n\t\t\t// Then add the rest of the properties in order\n\t\t\tif (serverConfig.command) reorderedServer.command = serverConfig.command;\n\t\t\tif (serverConfig.args) reorderedServer.args = serverConfig.args;\n\t\t\tif (serverConfig.env) reorderedServer.env = serverConfig.env;\n\n\t\t\t// Add any other properties that might exist\n\t\t\tObject.keys(serverConfig).forEach((key) => {\n\t\t\t\tif (!['command', 'args', 'env', 'type'].includes(key)) {\n\t\t\t\t\treorderedServer[key] = serverConfig[key];\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tclaudeConfig.mcpServers[serverName] = reorderedServer;\n\t\t}\n\t}\n\n\treturn claudeConfig;\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\t// For Claude, post-convert is the same as add since we don't transform rules\n\tonAddRulesProfile(targetDir, assetsDir);\n\n\t// Transform MCP configuration to Claude format\n\tconst mcpConfigPath = path.join(targetDir, '.mcp.json');\n\tif (fs.existsSync(mcpConfigPath)) {\n\t\ttry {\n\t\t\tconst mcpConfig = JSON.parse(fs.readFileSync(mcpConfigPath, 'utf8'));\n\t\t\tconst claudeConfig = transformToClaudeFormat(mcpConfig);\n\n\t\t\t// Write back the transformed configuration\n\t\t\tfs.writeFileSync(\n\t\t\t\tmcpConfigPath,\n\t\t\t\tJSON.stringify(claudeConfig, null, '\\t') + '\\n'\n\t\t\t);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Claude] Transformed MCP configuration to Claude format at ${mcpConfigPath}`\n\t\t\t);\n\t\t} catch (err) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Claude] Failed to transform MCP configuration: ${err.message}`\n\t\t\t);\n\t\t}\n\t}\n}\n\n// Create and export claude profile using the base factory\nexport const claudeProfile = createProfile({\n\tname: 'claude',\n\tdisplayName: 'Claude Code',\n\turl: 'claude.ai',\n\tdocsUrl: 'docs.anthropic.com/en/docs/claude-code',\n\tprofileDir: '.', // Root directory\n\trulesDir: '.', // No specific rules directory needed\n\tmcpConfigName: '.mcp.json', // Place MCP config in project root\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': '.taskmaster/CLAUDE.md'\n\t},\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/move-task.js", "/**\n * Direct function wrapper for moveTask\n */\n\nimport { moveTask } from '../../../../scripts/modules/task-manager.js';\nimport { findTasksPath } from '../utils/path-utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Move a task or subtask to a new position\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file\n * @param {string} args.sourceId - ID of the task/subtask to move (e.g., '5' or '5.2' or '5,6,7')\n * @param {string} args.destinationId - ID of the destination (e.g., '7' or '7.3' or '7,8,9')\n * @param {string} args.file - Alternative path to the tasks.json file\n * @param {string} args.projectRoot - Project root directory\n * @param {string} args.tag - Tag for the task (optional)\n * @param {boolean} args.generateFiles - Whether to regenerate task files after moving (default: true)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: Object}>}\n */\nexport async function moveTaskDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst { projectRoot, tag } = args;\n\n\t// Validate required parameters\n\tif (!args.sourceId) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tmessage: 'Source ID is required',\n\t\t\t\tcode: 'MISSING_SOURCE_ID'\n\t\t\t}\n\t\t};\n\t}\n\n\tif (!args.destinationId) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tmessage: 'Destination ID is required',\n\t\t\t\tcode: 'MISSING_DESTINATION_ID'\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\t// Find tasks.json path if not provided\n\t\tlet tasksPath = args.tasksJsonPath || args.file;\n\t\tif (!tasksPath) {\n\t\t\tif (!args.projectRoot) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\t'Project root is required if tasksJsonPath is not provided',\n\t\t\t\t\t\tcode: 'MISSING_PROJECT_ROOT'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t\ttasksPath = findTasksPath(args, log);\n\t\t}\n\n\t\t// Enable silent mode to prevent console output during MCP operation\n\t\tenableSilentMode();\n\n\t\t// Call the core moveTask function with file generation control\n\t\tconst generateFiles = args.generateFiles !== false; // Default to true\n\t\tconst result = await moveTask(\n\t\t\ttasksPath,\n\t\t\targs.sourceId,\n\t\t\targs.destinationId,\n\t\t\tgenerateFiles,\n\t\t\t{\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t}\n\t\t);\n\n\t\t// Restore console output\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\t...result,\n\t\t\t\tmessage: `Successfully moved task/subtask ${args.sourceId} to ${args.destinationId}`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Restore console output in case of error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Failed to move task: ${error.message}`);\n\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tmessage: error.message,\n\t\t\t\tcode: 'MOVE_TASK_ERROR'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/models.js", "/**\n * models.js\n * Direct function for managing AI model configurations via MCP\n */\n\nimport {\n\tgetModelConfiguration,\n\tgetAvailableModelsList,\n\tsetModel\n} from '../../../../scripts/modules/task-manager/models.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\nimport { CUSTOM_PROVIDERS_ARRAY } from '../../../../src/constants/providers.js';\n\n// Define supported roles for model setting\nconst MODEL_ROLES = ['main', 'research', 'fallback'];\n\n/**\n * Determine provider hint from custom provider flags\n * @param {Object} args - Arguments containing provider flags\n * @returns {string|undefined} Provider hint or undefined if no custom provider flag is set\n */\nfunction getProviderHint(args) {\n\treturn CUSTOM_PROVIDERS_ARRAY.find((provider) => args[provider]);\n}\n\n/**\n * Handle setting models for different roles\n * @param {Object} args - Arguments containing role-specific model IDs\n * @param {Object} context - Context object with session, mcpLog, projectRoot\n * @returns {Object|null} Result if a model was set, null if no model setting was requested\n */\nasync function handleModelSetting(args, context) {\n\tfor (const role of MODEL_ROLES) {\n\t\tconst roleKey = `set${role.charAt(0).toUpperCase() + role.slice(1)}`; // setMain, setResearch, setFallback\n\n\t\tif (args[roleKey]) {\n\t\t\tconst providerHint = getProviderHint(args);\n\n\t\t\treturn await setModel(role, args[roleKey], {\n\t\t\t\t...context,\n\t\t\t\tproviderHint\n\t\t\t});\n\t\t}\n\t}\n\treturn null; // No model setting was requested\n}\n\n/**\n * Get or update model configuration\n * @param {Object} args - Arguments passed by the MCP tool\n * @param {Object} log - MCP logger\n * @param {Object} context - MCP context (contains session)\n * @returns {Object} Result object with success, data/error fields\n */\nexport async function modelsDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst { projectRoot } = args; // Extract projectRoot from args\n\n\t// Create a logger wrapper that the core functions can use\n\tconst mcpLog = createLogWrapper(log);\n\n\tlog.info(`Executing models_direct with args: ${JSON.stringify(args)}`);\n\tlog.info(`Using project root: ${projectRoot}`);\n\n\t// Validate flags: only one custom provider flag can be used simultaneously\n\tconst customProviderFlags = CUSTOM_PROVIDERS_ARRAY.filter(\n\t\t(provider) => args[provider]\n\t);\n\n\tif (customProviderFlags.length > 1) {\n\t\tlog.error(\n\t\t\t'Error: Cannot use multiple custom provider flags simultaneously.'\n\t\t);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_ARGS',\n\t\t\t\tmessage:\n\t\t\t\t\t'Cannot use multiple custom provider flags simultaneously. Choose only one: openrouter, ollama, bedrock, azure, or vertex.'\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tenableSilentMode();\n\n\t\ttry {\n\t\t\t// Check for the listAvailableModels flag\n\t\t\tif (args.listAvailableModels === true) {\n\t\t\t\treturn await getAvailableModelsList({\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle setting any model role using unified function\n\t\t\tconst modelContext = { session, mcpLog, projectRoot };\n\t\t\tconst modelSetResult = await handleModelSetting(args, modelContext);\n\t\t\tif (modelSetResult) {\n\t\t\t\treturn modelSetResult;\n\t\t\t}\n\n\t\t\t// Default action: get current configuration\n\t\t\treturn await getModelConfiguration({\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t});\n\t\t} finally {\n\t\t\tdisableSilentMode();\n\t\t}\n\t} catch (error) {\n\t\tlog.error(`Error in models_direct: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message,\n\t\t\t\tdetails: error.stack\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/custom-sdk/language-model.js", "/**\n * src/ai-providers/custom-sdk/mcp/language-model.js\n *\n * MCP Language Model implementation following AI SDK LanguageModelV1 interface.\n * Uses MCP session.requestSampling() for AI operations.\n */\n\nimport {\n\tconvertToMCPFormat,\n\tconvertFromMCPFormat\n} from './message-converter.js';\nimport { MCPError, mapMCPError } from './errors.js';\nimport { extractJson } from './json-extractor.js';\nimport {\n\tconvertSchemaToInstructions,\n\tenhancePromptForJSON\n} from './schema-converter.js';\n\n/**\n * MCP Language Model implementing AI SDK LanguageModelV1 interface\n */\nexport class MCPLanguageModel {\n\tspecificationVersion = 'v1';\n\tdefaultObjectGenerationMode = 'json';\n\tsupportsImageUrls = false;\n\tsupportsStructuredOutputs = true;\n\n\tconstructor(options) {\n\t\tthis.session = options.session; // MCP session object\n\t\tthis.modelId = options.modelId;\n\t\tthis.settings = options.settings || {};\n\t\tthis.provider = 'mcp-ai-sdk';\n\t\tthis.maxTokens = this.settings.maxTokens;\n\t\tthis.temperature = this.settings.temperature;\n\n\t\tthis.validateSession();\n\t}\n\n\t/**\n\t * Validate that the MCP session has required capabilities\n\t */\n\tvalidateSession() {\n\t\tif (!this.session?.clientCapabilities?.sampling) {\n\t\t\tthrow new MCPError('MCP session must have client sampling capabilities');\n\t\t}\n\t}\n\n\t/**\n\t * Generate text using MCP session sampling\n\t * @param {object} options - Generation options\n\t * @param {Array} options.prompt - AI SDK prompt format\n\t * @param {AbortSignal} options.abortSignal - Abort signal\n\t * @returns {Promise<object>} Generation result in AI SDK format\n\t */\n\tasync doGenerate(options) {\n\t\ttry {\n\t\t\t// Convert AI SDK prompt to MCP format\n\t\t\tconst { messages, systemPrompt } = convertToMCPFormat(options.prompt);\n\n\t\t\t// Use MCP session.requestSampling (same as MCPRemoteProvider)\n\t\t\tconst response = await this.session.requestSampling(\n\t\t\t\t{\n\t\t\t\t\tmessages,\n\t\t\t\t\tsystemPrompt,\n\t\t\t\t\ttemperature: this.settings.temperature,\n\t\t\t\t\tmaxTokens: this.settings.maxTokens,\n\t\t\t\t\tincludeContext: 'thisServer'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t// signal: options.abortSignal,\n\t\t\t\t\ttimeout: 240000 // 4 minutes timeout\n\t\t\t\t}\n\t\t\t);\n\n\t\t\t// Convert MCP response back to AI SDK format\n\t\t\tconst result = convertFromMCPFormat(response);\n\n\t\t\treturn {\n\t\t\t\ttext: result.text,\n\t\t\t\tfinishReason: result.finishReason || 'stop',\n\t\t\t\tusage: {\n\t\t\t\t\tpromptTokens: result.usage?.inputTokens || 0,\n\t\t\t\t\tcompletionTokens: result.usage?.outputTokens || 0,\n\t\t\t\t\ttotalTokens:\n\t\t\t\t\t\t(result.usage?.inputTokens || 0) + (result.usage?.outputTokens || 0)\n\t\t\t\t},\n\t\t\t\trawResponse: response,\n\t\t\t\twarnings: result.warnings\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthrow mapMCPError(error);\n\t\t}\n\t}\n\n\t/**\n\t * Generate structured object using MCP session sampling\n\t * @param {object} options - Generation options\n\t * @param {Array} options.prompt - AI SDK prompt format\n\t * @param {import('zod').ZodSchema} options.schema - Zod schema for validation\n\t * @param {string} [options.mode='json'] - Generation mode ('json' or 'tool')\n\t * @param {AbortSignal} options.abortSignal - Abort signal\n\t * @returns {Promise<object>} Generation result with structured object\n\t */\n\tasync doGenerateObject(options) {\n\t\ttry {\n\t\t\tconst { schema, mode = 'json', ...restOptions } = options;\n\n\t\t\tif (!schema) {\n\t\t\t\tthrow new MCPError('Schema is required for object generation');\n\t\t\t}\n\n\t\t\t// Convert schema to JSON instructions\n\t\t\tconst objectName = restOptions.objectName || 'generated_object';\n\t\t\tconst jsonInstructions = convertSchemaToInstructions(schema, objectName);\n\n\t\t\t// Enhance prompt with JSON generation instructions\n\t\t\tconst enhancedPrompt = enhancePromptForJSON(\n\t\t\t\toptions.prompt,\n\t\t\t\tjsonInstructions\n\t\t\t);\n\n\t\t\t// Convert enhanced prompt to MCP format\n\t\t\tconst { messages, systemPrompt } = convertToMCPFormat(enhancedPrompt);\n\n\t\t\t// Use MCP session.requestSampling with enhanced prompt\n\t\t\tconst response = await this.session.requestSampling(\n\t\t\t\t{\n\t\t\t\t\tmessages,\n\t\t\t\t\tsystemPrompt,\n\t\t\t\t\ttemperature: this.settings.temperature,\n\t\t\t\t\tmaxTokens: this.settings.maxTokens,\n\t\t\t\t\tincludeContext: 'thisServer'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\ttimeout: 240000 // 4 minutes timeout\n\t\t\t\t}\n\t\t\t);\n\n\t\t\t// Convert MCP response back to AI SDK format\n\t\t\tconst result = convertFromMCPFormat(response);\n\n\t\t\t// Extract JSON from the response text\n\t\t\tconst jsonText = extractJson(result.text);\n\n\t\t\t// Parse and validate JSON\n\t\t\tlet parsedObject;\n\t\t\ttry {\n\t\t\t\tparsedObject = JSON.parse(jsonText);\n\t\t\t} catch (parseError) {\n\t\t\t\tthrow new MCPError(\n\t\t\t\t\t`Failed to parse JSON response: ${parseError.message}. Response: ${result.text.substring(0, 200)}...`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Validate against schema\n\t\t\ttry {\n\t\t\t\tconst validatedObject = schema.parse(parsedObject);\n\n\t\t\t\treturn {\n\t\t\t\t\tobject: validatedObject,\n\t\t\t\t\tfinishReason: result.finishReason || 'stop',\n\t\t\t\t\tusage: {\n\t\t\t\t\t\tpromptTokens: result.usage?.inputTokens || 0,\n\t\t\t\t\t\tcompletionTokens: result.usage?.outputTokens || 0,\n\t\t\t\t\t\ttotalTokens:\n\t\t\t\t\t\t\t(result.usage?.inputTokens || 0) +\n\t\t\t\t\t\t\t(result.usage?.outputTokens || 0)\n\t\t\t\t\t},\n\t\t\t\t\trawResponse: response,\n\t\t\t\t\twarnings: result.warnings\n\t\t\t\t};\n\t\t\t} catch (validationError) {\n\t\t\t\tthrow new MCPError(\n\t\t\t\t\t`Generated object does not match schema: ${validationError.message}. Generated: ${JSON.stringify(parsedObject, null, 2)}`\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\tthrow mapMCPError(error);\n\t\t}\n\t}\n\n\t/**\n\t * Stream text generation using MCP session sampling\n\t * Note: MCP may not support native streaming, so this may simulate streaming\n\t * @param {object} options - Generation options\n\t * @returns {AsyncIterable} Stream of generation chunks\n\t */\n\tasync doStream(options) {\n\t\ttry {\n\t\t\t// For now, simulate streaming by chunking the complete response\n\t\t\t// TODO: Implement native streaming if MCP supports it\n\t\t\tconst result = await this.doGenerate(options);\n\n\t\t\t// Create async generator that yields chunks\n\t\t\treturn this.simulateStreaming(result);\n\t\t} catch (error) {\n\t\t\tthrow mapMCPError(error);\n\t\t}\n\t}\n\n\t/**\n\t * Simulate streaming by chunking a complete response\n\t * @param {object} result - Complete generation result\n\t * @returns {AsyncIterable} Simulated stream chunks\n\t */\n\tasync *simulateStreaming(result) {\n\t\tconst text = result.text;\n\t\tconst chunkSize = Math.max(1, Math.floor(text.length / 10)); // 10 chunks\n\n\t\tfor (let i = 0; i < text.length; i += chunkSize) {\n\t\t\tconst chunk = text.slice(i, i + chunkSize);\n\t\t\tconst isLast = i + chunkSize >= text.length;\n\n\t\t\tyield {\n\t\t\t\ttype: 'text-delta',\n\t\t\t\ttextDelta: chunk\n\t\t\t};\n\n\t\t\t// Small delay to simulate streaming\n\t\t\tawait new Promise((resolve) => setTimeout(resolve, 50));\n\t\t}\n\n\t\t// Final chunk with finish reason and usage\n\t\tyield {\n\t\t\ttype: 'finish',\n\t\t\tfinishReason: result.finishReason,\n\t\t\tusage: result.usage\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/add-subtask.js", "/**\n * tools/add-subtask.js\n * Tool for adding subtasks to existing tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addSubtaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the addSubtask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddSubtaskTool(server) {\n\tserver.addTool({\n\t\tname: 'add_subtask',\n\t\tdescription: 'Add a subtask to an existing task',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('Parent task ID (required)'),\n\t\t\ttaskId: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Existing task ID to convert to subtask'),\n\t\t\ttitle: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Title for the new subtask (when creating a new subtask)'),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Description for the new subtask'),\n\t\t\tdetails: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Implementation details for the new subtask'),\n\t\t\tstatus: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\"Status for the new subtask (default: 'pending')\"),\n\t\t\tdependencies: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Comma-separated list of dependency IDs for the new subtask'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tskipGenerate: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Skip regenerating task files'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Adding subtask with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await addSubtaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\ttaskId: args.taskId,\n\t\t\t\t\t\ttitle: args.title,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tdetails: args.details,\n\t\t\t\t\t\tstatus: args.status,\n\t\t\t\t\t\tdependencies: args.dependencies,\n\t\t\t\t\t\tskipGenerate: args.skipGenerate,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Subtask added successfully: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to add subtask: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error adding subtask',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in addSubtask tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/update-subtask.js", "/**\n * tools/update-subtask.js\n * Tool to append additional information to a specific subtask\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { updateSubtaskByIdDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the update-subtask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUpdateSubtaskTool(server) {\n\tserver.addTool({\n\t\tname: 'update_subtask',\n\t\tdescription:\n\t\t\t'Appends timestamped information to a specific subtask without replacing existing content. If you just want to update the subtask status, use set_task_status instead.',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'ID of the subtask to update in format \"parentId.subtaskId\" (e.g., \"5.2\"). Parent ID is the ID of the task that contains the subtask.'\n\t\t\t\t),\n\t\t\tprompt: z.string().describe('Information to add to the subtask'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Use Perplexity AI for research-backed updates'),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'update_subtask';\n\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Updating subtask with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateSubtaskByIdDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully updated subtask with ID ${args.id}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to update subtask: ${result.error?.message || 'Unknown error'}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error updating subtask',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/validate-dependencies.js", "/**\n * Direct function wrapper for validateDependenciesCommand\n */\n\nimport { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\n\n/**\n * Validate dependencies in tasks.json\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function validateDependenciesDirect(args, log) {\n\t// Destructure the explicit tasksJsonPath\n\tconst { tasksJsonPath, projectRoot, tag } = args;\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('validateDependenciesDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tlog.info(`Validating dependencies in tasks: ${tasksJsonPath}`);\n\n\t\t// Use the provided tasksJsonPath\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Verify the file exists\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'FILE_NOT_FOUND',\n\t\t\t\t\tmessage: `Tasks file not found at ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tconst options = { projectRoot, tag };\n\t\t// Call the original command function using the provided tasksPath\n\t\tawait validateDependenciesCommand(tasksPath, options);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: 'Dependencies validated successfully',\n\t\t\t\ttasksPath\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error validating dependencies: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'VALIDATION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/update-task.js", "/**\n * tools/update-task.js\n * Tool to update a single task by ID with new information\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { updateTaskByIdDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the update-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUpdateTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'update_task',\n\t\tdescription:\n\t\t\t'Updates a single task by ID with new information or context provided in the prompt.',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string() // ID can be number or string like \"1.2\"\n\t\t\t\t.describe(\n\t\t\t\t\t\"ID of the task (e.g., '15') to update. Subtasks are supported using the update-subtask tool.\"\n\t\t\t\t),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.describe('New information or context to incorporate into the task'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Use Perplexity AI for research-backed updates'),\n\t\t\tappend: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Append timestamped information to task details instead of full update'\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'update_task';\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing ${toolName} tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// 3. Call Direct Function - Include projectRoot\n\t\t\t\tconst result = await updateTaskByIdDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tappend: args.append,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\t// 4. Handle Result\n\t\t\t\tlog.info(\n\t\t\t\t\t`${toolName}: Direct function result: success=${result.success}`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error updating task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/remove-task.js", "/**\n * tools/remove-task.js\n * Tool to remove a task by ID\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { removeTaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the remove-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRemoveTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'remove_task',\n\t\tdescription: 'Remove a task or subtask permanently from the tasks list',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"ID of the task or subtask to remove (e.g., '5' or '5.2'). Can be comma-separated to update multiple tasks/subtasks at once.\"\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\tconfirm: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Whether to skip confirmation prompt (default: false)'),\n\t\t\ttag: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Specify which tag context to operate on. Defaults to the current active tag.'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Removing task(s) with ID(s): ${args.id}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tlog.info(`Using tasks file path: ${tasksJsonPath}`);\n\n\t\t\t\tconst result = await removeTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully removed task: ${args.id}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to remove task: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error removing task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in remove-task tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(`Failed to remove task: ${error.message}`);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/clear-subtasks.js", "/**\n * tools/clear-subtasks.js\n * Tool for clearing subtasks from parent tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { clearSubtasksDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the clearSubtasks tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerClearSubtasksTool(server) {\n\tserver.addTool({\n\t\tname: 'clear_subtasks',\n\t\tdescription: 'Clear subtasks from specified tasks',\n\t\tparameters: z\n\t\t\t.object({\n\t\t\t\tid: z\n\t\t\t\t\t.string()\n\t\t\t\t\t.optional()\n\t\t\t\t\t.describe('Task IDs (comma-separated) to clear subtasks from'),\n\t\t\t\tall: z.boolean().optional().describe('Clear subtasks from all tasks'),\n\t\t\t\tfile: z\n\t\t\t\t\t.string()\n\t\t\t\t\t.optional()\n\t\t\t\t\t.describe(\n\t\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t\t),\n\t\t\t\tprojectRoot: z\n\t\t\t\t\t.string()\n\t\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t\t})\n\t\t\t.refine((data) => data.id || data.all, {\n\t\t\t\tmessage: \"Either 'id' or 'all' parameter must be provided\",\n\t\t\t\tpath: ['id', 'all']\n\t\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await clearSubtasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tall: args.all,\n\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Subtasks cleared successfully: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to clear subtasks: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error clearing subtasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in clearSubtasks tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/update.js", "/**\n * tools/update.js\n * Tool to update tasks based on new context/prompt\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { updateTasksDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the update tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUpdateTool(server) {\n\tserver.addTool({\n\t\tname: 'update',\n\t\tdescription:\n\t\t\t\"Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task or 'update_subtask' for subtasks.\",\n\t\tparameters: z.object({\n\t\t\tfrom: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'\"\n\t\t\t\t),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.describe('Explanation of changes or new context to apply'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Use Perplexity AI for research-backed updates'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file relative to project root'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'The directory of the project. (Optional, usually from session)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'update';\n\t\t\tconst { from, prompt, research, file, projectRoot, tag } = args;\n\n\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\ttag: args.tag\n\t\t\t});\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing ${toolName} tool with normalized root: ${projectRoot}`\n\t\t\t\t);\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath({ projectRoot, file }, log);\n\t\t\t\t\tlog.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json within project root '${projectRoot}': ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateTasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tfrom: from,\n\t\t\t\t\t\tprompt: prompt,\n\t\t\t\t\t\tresearch: research,\n\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tlog.info(\n\t\t\t\t\t`${toolName}: Direct function result: success=${result.success}`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error updating tasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/get-operation-status.js", "// mcp-server/src/tools/get-operation-status.js\nimport { z } from 'zod';\nimport { createErrorResponse, createContentResponse } from './utils.js'; // Assuming these utils exist\n\n/**\n * Register the get_operation_status tool.\n * @param {FastMCP} server - FastMCP server instance.\n * @param {AsyncOperationManager} asyncManager - The async operation manager.\n */\nexport function registerGetOperationStatusTool(server, asyncManager) {\n\tserver.addTool({\n\t\tname: 'get_operation_status',\n\t\tdescription:\n\t\t\t'Retrieves the status and result/error of a background operation.',\n\t\tparameters: z.object({\n\t\t\toperationId: z.string().describe('The ID of the operation to check.')\n\t\t}),\n\t\texecute: async (args, { log }) => {\n\t\t\ttry {\n\t\t\t\tconst { operationId } = args;\n\t\t\t\tlog.info(`Checking status for operation ID: ${operationId}`);\n\n\t\t\t\tconst status = asyncManager.getStatus(operationId);\n\n\t\t\t\t// Status will now always return an object, but it might have status='not_found'\n\t\t\t\tif (status.status === 'not_found') {\n\t\t\t\t\tlog.warn(`Operation ID not found: ${operationId}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\tstatus.error?.message || `Operation ID not found: ${operationId}`,\n\t\t\t\t\t\tstatus.error?.code || 'OPERATION_NOT_FOUND'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tlog.info(`Status for ${operationId}: ${status.status}`);\n\t\t\t\treturn createContentResponse(status);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in get_operation_status tool: ${error.message}`, {\n\t\t\t\t\tstack: error.stack\n\t\t\t\t});\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Failed to get operation status: ${error.message}`,\n\t\t\t\t\t'GET_STATUS_ERROR'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/fix-dependencies.js", "/**\n * Direct function wrapper for fixDependenciesCommand\n */\n\nimport { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\n\n/**\n * Fix invalid dependencies in tasks.json automatically\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.projectRoot - Project root directory\n * @param {string} args.tag - Tag for the project\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function fixDependenciesDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, projectRoot, tag } = args;\n\ttry {\n\t\tlog.info(`Fixing invalid dependencies in tasks: ${tasksJsonPath}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('fixDependenciesDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Verify the file exists\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'FILE_NOT_FOUND',\n\t\t\t\t\tmessage: `Tasks file not found at ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tconst options = { projectRoot, tag };\n\t\t// Call the original command function using the provided path and proper context\n\t\tawait fixDependenciesCommand(tasksPath, options);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: 'Dependencies fixed successfully',\n\t\t\t\ttasksPath,\n\t\t\t\ttag: tag || 'master'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error fixing dependencies: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'FIX_DEPENDENCIES_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/analyze.js", "/**\n * tools/analyze.js\n * Tool for analyzing task complexity and generating recommendations\n */\n\nimport { z } from 'zod';\nimport path from 'path';\nimport fs from 'fs'; // Import fs for directory check/creation\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { analyzeTaskComplexityDirect } from '../core/task-master-core.js'; // Assuming core functions are exported via task-master-core.js\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\nimport { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';\nimport { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js';\n\n/**\n * Register the analyze_project_complexity tool\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAnalyzeProjectComplexityTool(server) {\n\tserver.addTool({\n\t\tname: 'analyze_project_complexity',\n\t\tdescription:\n\t\t\t'Analyze task complexity and generate expansion recommendations.',\n\t\tparameters: z.object({\n\t\t\tthreshold: z.coerce // Use coerce for number conversion from string if needed\n\t\t\t\t.number()\n\t\t\t\t.int()\n\t\t\t\t.min(1)\n\t\t\t\t.max(10)\n\t\t\t\t.optional()\n\t\t\t\t.default(5) // Default threshold\n\t\t\t\t.describe('Complexity score threshold (1-10) to recommend expansion.'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Use Perplexity AI for research-backed analysis.'),\n\t\t\toutput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`Output file path relative to project root (default: ${COMPLEXITY_REPORT_FILE}).`\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the tasks file relative to project root (default: tasks/tasks.json).'\n\t\t\t\t),\n\t\t\tids: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Comma-separated list of task IDs to analyze specifically (e.g., \"1,3,5\").'\n\t\t\t\t),\n\t\t\tfrom: z.coerce\n\t\t\t\t.number()\n\t\t\t\t.int()\n\t\t\t\t.positive()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Starting task ID in a range to analyze.'),\n\t\t\tto: z.coerce\n\t\t\t\t.number()\n\t\t\t\t.int()\n\t\t\t\t.positive()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Ending task ID in a range to analyze.'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'analyze_project_complexity'; // Define tool name for logging\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing ${toolName} tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json within project root '${args.projectRoot}': ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst outputPath = resolveComplexityReportOutputPath(\n\t\t\t\t\targs.output,\n\t\t\t\t\t{\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tlog.info(`${toolName}: Report output path: ${outputPath}`);\n\n\t\t\t\t// Ensure output directory exists\n\t\t\t\tconst outputDir = path.dirname(outputPath);\n\t\t\t\ttry {\n\t\t\t\t\tif (!fs.existsSync(outputDir)) {\n\t\t\t\t\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t\t\t\t\t\tlog.info(`${toolName}: Created output directory: ${outputDir}`);\n\t\t\t\t\t}\n\t\t\t\t} catch (dirError) {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`${toolName}: Failed to create output directory ${outputDir}: ${dirError.message}`\n\t\t\t\t\t);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to create output directory: ${dirError.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// 3. Call Direct Function - Pass projectRoot in first arg object\n\t\t\t\tconst result = await analyzeTaskComplexityDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\toutputPath: outputPath,\n\t\t\t\t\t\tthreshold: args.threshold,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag,\n\t\t\t\t\t\tids: args.ids,\n\t\t\t\t\t\tfrom: args.from,\n\t\t\t\t\t\tto: args.to\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\t// 4. Handle Result\n\t\t\t\tlog.info(\n\t\t\t\t\t`${toolName}: Direct function result: success=${result.success}`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error analyzing task complexity',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/generate.js", "/**\n * tools/generate.js\n * Tool to generate individual task files from tasks.json\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { generateTaskFilesDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\nimport path from 'path';\n\n/**\n * Register the generate tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerGenerateTool(server) {\n\tserver.addTool({\n\t\tname: 'generate',\n\t\tdescription:\n\t\t\t'Generates individual task files in tasks/ directory based on tasks.json',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\toutput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Output directory (default: same directory as tasks file)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Generating task files with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst outputDir = args.output\n\t\t\t\t\t? path.resolve(args.projectRoot, args.output)\n\t\t\t\t\t: path.dirname(tasksJsonPath);\n\n\t\t\t\tconst result = await generateTaskFilesDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\toutputDir: outputDir,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully generated task files: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to generate task files: ${result.error?.message || 'Unknown error'}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error generating task files',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in generate tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/profiles/opencode.js", "// Opencode profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to OpenCode format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed OpenCode configuration object\n */\nfunction transformToOpenCodeFormat(mcpConfig) {\n\tconst openCodeConfig = {\n\t\t$schema: 'https://opencode.ai/config.json'\n\t};\n\n\t// Transform mcpServers to mcp\n\tif (mcpConfig.mcpServers) {\n\t\topenCodeConfig.mcp = {};\n\n\t\tfor (const [serverName, serverConfig] of Object.entries(\n\t\t\tmcpConfig.mcpServers\n\t\t)) {\n\t\t\t// Transform server configuration\n\t\t\tconst transformedServer = {\n\t\t\t\ttype: 'local'\n\t\t\t};\n\n\t\t\t// Combine command and args into single command array\n\t\t\tif (serverConfig.command && serverConfig.args) {\n\t\t\t\ttransformedServer.command = [\n\t\t\t\t\tserverConfig.command,\n\t\t\t\t\t...serverConfig.args\n\t\t\t\t];\n\t\t\t} else if (serverConfig.command) {\n\t\t\t\ttransformedServer.command = [serverConfig.command];\n\t\t\t}\n\n\t\t\t// Add enabled flag\n\t\t\ttransformedServer.enabled = true;\n\n\t\t\t// Transform env to environment\n\t\t\tif (serverConfig.env) {\n\t\t\t\ttransformedServer.environment = serverConfig.env;\n\t\t\t}\n\n\t\t\t// update with transformed config\n\t\t\topenCodeConfig.mcp[serverName] = transformedServer;\n\t\t}\n\t}\n\n\treturn openCodeConfig;\n}\n\n/**\n * Lifecycle function called after MCP config generation to transform to OpenCode format\n * @param {string} targetDir - Target project directory\n * @param {string} assetsDir - Assets directory (unused for OpenCode)\n */\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\tconst openCodeConfigPath = path.join(targetDir, 'opencode.json');\n\n\tif (!fs.existsSync(openCodeConfigPath)) {\n\t\tlog('debug', '[OpenCode] No opencode.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(openCodeConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in OpenCode format (has $schema)\n\t\tif (mcpConfig.$schema) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[OpenCode] opencode.json already in OpenCode format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to OpenCode format\n\t\tconst openCodeConfig = transformToOpenCodeFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\topenCodeConfigPath,\n\t\t\tJSON.stringify(openCodeConfig, null, 2) + '\\n'\n\t\t);\n\n\t\tlog('info', '[OpenCode] Transformed opencode.json to OpenCode format');\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`[OpenCode] Added schema, renamed mcpServers->mcp, combined command+args, added type/enabled, renamed env->environment`\n\t\t);\n\t} catch (error) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[OpenCode] Failed to transform opencode.json: ${error.message}`\n\t\t);\n\t}\n}\n\n/**\n * Lifecycle function called when removing OpenCode profile\n * @param {string} targetDir - Target project directory\n */\nfunction onRemoveRulesProfile(targetDir) {\n\tconst openCodeConfigPath = path.join(targetDir, 'opencode.json');\n\n\tif (!fs.existsSync(openCodeConfigPath)) {\n\t\tlog('debug', '[OpenCode] No opencode.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(openCodeConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the mcp section and taskmaster-ai server\n\t\tif (config.mcp && config.mcp['taskmaster-ai']) {\n\t\t\t// Remove taskmaster-ai server\n\t\t\tdelete config.mcp['taskmaster-ai'];\n\n\t\t\t// Check if there are other MCP servers\n\t\t\tconst remainingServers = Object.keys(config.mcp);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire mcp section\n\t\t\t\tdelete config.mcp;\n\t\t\t}\n\n\t\t\t// Check if config is now empty (only has $schema)\n\t\t\tconst remainingKeys = Object.keys(config).filter(\n\t\t\t\t(key) => key !== '$schema'\n\t\t\t);\n\n\t\t\tif (remainingKeys.length === 0) {\n\t\t\t\t// Config only has schema left, remove entire file\n\t\t\t\tfs.rmSync(openCodeConfigPath, { force: true });\n\t\t\t\tlog('info', '[OpenCode] Removed empty opencode.json file');\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\topenCodeConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, 2) + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[OpenCode] Removed TaskMaster from opencode.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[OpenCode] TaskMaster not found in opencode.json');\n\t\t}\n\t} catch (error) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[OpenCode] Failed to clean up opencode.json: ${error.message}`\n\t\t);\n\t}\n}\n\n// Create and export opencode profile using the base factory\nexport const opencodeProfile = createProfile({\n\tname: 'opencode',\n\tdisplayName: 'OpenCode',\n\turl: 'opencode.ai',\n\tdocsUrl: 'opencode.ai/docs/',\n\tprofileDir: '.', // Root directory\n\trulesDir: '.', // Root directory for AGENTS.md\n\tmcpConfigName: 'opencode.json', // Override default 'mcp.json'\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': 'AGENTS.md'\n\t},\n\tonPostConvert: onPostConvertRulesProfile,\n\tonRemove: onRemoveRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onPostConvertRulesProfile, onRemoveRulesProfile };\n"], ["/claude-task-master/src/profiles/vscode.js", "// VS Code conversion profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { log } from '../../scripts/modules/utils.js';\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to VS Code format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed VS Code configuration object\n */\nfunction transformToVSCodeFormat(mcpConfig) {\n\tconst vscodeConfig = {};\n\n\t// Transform mcpServers to servers\n\tif (mcpConfig.mcpServers) {\n\t\tvscodeConfig.servers = {};\n\n\t\tfor (const [serverName, serverConfig] of Object.entries(\n\t\t\tmcpConfig.mcpServers\n\t\t)) {\n\t\t\t// Transform server configuration\n\t\t\tconst transformedServer = {\n\t\t\t\t...serverConfig\n\t\t\t};\n\n\t\t\t// Add type: \"stdio\" after the env block\n\t\t\tif (transformedServer.env) {\n\t\t\t\t// Reorder properties: keep command, args, env, then add type\n\t\t\t\tconst reorderedServer = {};\n\t\t\t\tif (transformedServer.command)\n\t\t\t\t\treorderedServer.command = transformedServer.command;\n\t\t\t\tif (transformedServer.args)\n\t\t\t\t\treorderedServer.args = transformedServer.args;\n\t\t\t\tif (transformedServer.env) reorderedServer.env = transformedServer.env;\n\t\t\t\treorderedServer.type = 'stdio';\n\n\t\t\t\t// Add any other properties that might exist\n\t\t\t\tObject.keys(transformedServer).forEach((key) => {\n\t\t\t\t\tif (!['command', 'args', 'env', 'type'].includes(key)) {\n\t\t\t\t\t\treorderedServer[key] = transformedServer[key];\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\tvscodeConfig.servers[serverName] = reorderedServer;\n\t\t\t} else {\n\t\t\t\t// If no env block, just add type at the end\n\t\t\t\ttransformedServer.type = 'stdio';\n\t\t\t\tvscodeConfig.servers[serverName] = transformedServer;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn vscodeConfig;\n}\n\n/**\n * Lifecycle function called after MCP config generation to transform to VS Code format\n * @param {string} targetDir - Target project directory\n * @param {string} assetsDir - Assets directory (unused for VS Code)\n */\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\tconst vscodeConfigPath = path.join(targetDir, '.vscode', 'mcp.json');\n\n\tif (!fs.existsSync(vscodeConfigPath)) {\n\t\tlog('debug', '[VS Code] No .vscode/mcp.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(vscodeConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in VS Code format (has servers instead of mcpServers)\n\t\tif (mcpConfig.servers) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[VS Code] mcp.json already in VS Code format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to VS Code format\n\t\tconst vscodeConfig = transformToVSCodeFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\tvscodeConfigPath,\n\t\t\tJSON.stringify(vscodeConfig, null, 2) + '\\n'\n\t\t);\n\n\t\tlog('info', '[VS Code] Transformed mcp.json to VS Code format');\n\t\tlog('debug', `[VS Code] Renamed mcpServers->servers, added type: \"stdio\"`);\n\t} catch (error) {\n\t\tlog('error', `[VS Code] Failed to transform mcp.json: ${error.message}`);\n\t}\n}\n\n/**\n * Lifecycle function called when removing VS Code profile\n * @param {string} targetDir - Target project directory\n */\nfunction onRemoveRulesProfile(targetDir) {\n\tconst vscodeConfigPath = path.join(targetDir, '.vscode', 'mcp.json');\n\n\tif (!fs.existsSync(vscodeConfigPath)) {\n\t\tlog('debug', '[VS Code] No .vscode/mcp.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(vscodeConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the servers section and task-master-ai server\n\t\tif (config.servers && config.servers['task-master-ai']) {\n\t\t\t// Remove task-master-ai server\n\t\t\tdelete config.servers['task-master-ai'];\n\n\t\t\t// Check if there are other MCP servers\n\t\t\tconst remainingServers = Object.keys(config.servers);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire file\n\t\t\t\tfs.rmSync(vscodeConfigPath, { force: true });\n\t\t\t\tlog('info', '[VS Code] Removed empty mcp.json file');\n\n\t\t\t\t// Also remove .vscode directory if it's empty\n\t\t\t\tconst vscodeDir = path.dirname(vscodeConfigPath);\n\t\t\t\ttry {\n\t\t\t\t\tconst dirContents = fs.readdirSync(vscodeDir);\n\t\t\t\t\tif (dirContents.length === 0) {\n\t\t\t\t\t\tfs.rmSync(vscodeDir, { recursive: true, force: true });\n\t\t\t\t\t\tlog('debug', '[VS Code] Removed empty .vscode directory');\n\t\t\t\t\t}\n\t\t\t\t} catch (err) {\n\t\t\t\t\t// Directory might not be empty or might not exist, that's fine\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\tvscodeConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, 2) + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[VS Code] Removed TaskMaster from mcp.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[VS Code] TaskMaster not found in mcp.json');\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `[VS Code] Failed to clean up mcp.json: ${error.message}`);\n\t}\n}\n\n// Create and export vscode profile using the base factory\nexport const vscodeProfile = createProfile({\n\tname: 'vscode',\n\tdisplayName: 'VS Code',\n\turl: 'code.visualstudio.com',\n\tdocsUrl: 'code.visualstudio.com/docs',\n\trulesDir: '.github/instructions', // VS Code instructions location\n\tprofileDir: '.vscode', // VS Code configuration directory\n\tmcpConfigName: 'mcp.json', // VS Code uses mcp.json in .vscode directory\n\ttargetExtension: '.instructions.md',\n\tcustomReplacements: [\n\t\t// Core VS Code directory structure changes\n\t\t{ from: /\\.cursor\\/rules/g, to: '.github/instructions' },\n\t\t{ from: /\\.cursor\\/mcp\\.json/g, to: '.vscode/mcp.json' },\n\n\t\t// Fix any remaining vscode/rules references that might be created during transformation\n\t\t{ from: /\\.vscode\\/rules/g, to: '.github/instructions' },\n\n\t\t// VS Code custom instructions format - use applyTo with quoted patterns instead of globs\n\t\t{ from: /^globs:\\s*(.+)$/gm, to: 'applyTo: \"$1\"' },\n\n\t\t// Remove unsupported property - alwaysApply\n\t\t{ from: /^alwaysApply:\\s*(true|false)\\s*\\n?/gm, to: '' },\n\n\t\t// Essential markdown link transformations for VS Code structure\n\t\t{\n\t\t\tfrom: /\\[(.+?)\\]\\(mdc:\\.cursor\\/rules\\/(.+?)\\.mdc\\)/g,\n\t\t\tto: '[$1](.github/instructions/$2.instructions.md)'\n\t\t},\n\n\t\t// VS Code specific terminology\n\t\t{ from: /rules directory/g, to: 'instructions directory' },\n\t\t{ from: /cursor rules/gi, to: 'VS Code instructions' }\n\t],\n\tonPostConvert: onPostConvertRulesProfile,\n\tonRemove: onRemoveRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onPostConvertRulesProfile, onRemoveRulesProfile };\n"], ["/claude-task-master/mcp-server/src/tools/expand-task.js", "/**\n * tools/expand-task.js\n * Tool to expand a task into subtasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { expandTaskDirect } from '../core/task-master-core.js';\nimport {\n\tfindTasksPath,\n\tfindComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the expand-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerExpandTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'expand_task',\n\t\tdescription: 'Expand a task into subtasks for detailed implementation',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('ID of task to expand'),\n\t\t\tnum: z.string().optional().describe('Number of subtasks to generate'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Use research role for generation'),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Additional context for subtask generation'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the tasks file relative to project root (e.g., tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Force expansion even if subtasks exist'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting expand-task with args: ${JSON.stringify(args)}`);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst complexityReportPath = findComplexityReportPath(\n\t\t\t\t\t{ ...args, tag: resolvedTag },\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tconst result = await expandTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tnum: args.num,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tforce: args.force,\n\t\t\t\t\t\tcomplexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error expanding task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in expand-task tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/test-claude-errors.js", "#!/usr/bin/env node\n\n/**\n * test-claude-errors.js\n *\n * A test script to verify the error handling and retry logic in the callClaude function.\n * This script creates a modified version of dev.js that simulates different error scenarios.\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport dotenv from 'dotenv';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\nimport { execSync, spawn } from 'child_process';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n// Load environment variables from .env file\ndotenv.config();\n\n// Create a simple PRD for testing\nconst createTestPRD = () => {\n\treturn `# Test PRD for Error Handling\n\n## Overview\nThis is a simple test PRD to verify the error handling in the callClaude function.\n\n## Requirements\n1. Create a simple web application\n2. Implement user authentication\n3. Add a dashboard for users\n`;\n};\n\n// Create a modified version of dev.js that simulates errors\nfunction createErrorSimulationScript(errorType, failureCount = 2) {\n\t// Read the original dev.js file\n\tconst devJsPath = path.join(__dirname, 'dev.js');\n\tconst devJsContent = fs.readFileSync(devJsPath, 'utf8');\n\n\t// Create a modified version that simulates errors\n\tlet modifiedContent = devJsContent;\n\n\t// Find the anthropic.messages.create call and replace it with our mock\n\tconst anthropicCallRegex =\n\t\t/const response = await anthropic\\.messages\\.create\\(/;\n\n\tlet mockCode = '';\n\n\tswitch (errorType) {\n\t\tcase 'network':\n\t\t\tmockCode = `\n // Mock for network error simulation\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate network error for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating network error (attempt \\${currentAttempt}/\\${failureCount})\\`);\n throw new Error('Network error: Connection refused');\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tcase 'timeout':\n\t\t\tmockCode = `\n // Mock for timeout error simulation\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate timeout error for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating timeout error (attempt \\${currentAttempt}/\\${failureCount})\\`);\n throw new Error('Request timed out after 60000ms');\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tcase 'invalid-json':\n\t\t\tmockCode = `\n // Mock for invalid JSON response\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate invalid JSON for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating invalid JSON response (attempt \\${currentAttempt}/\\${failureCount})\\`);\n return {\n content: [\n {\n text: \\`\\`\\`json\\\\n{\"meta\": {\"projectName\": \"Test Project\"}, \"tasks\": [{\"id\": 1, \"title\": \"Task 1\"\\`\n }\n ]\n };\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tcase 'empty-tasks':\n\t\t\tmockCode = `\n // Mock for empty tasks array\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate empty tasks array for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating empty tasks array (attempt \\${currentAttempt}/\\${failureCount})\\`);\n return {\n content: [\n {\n text: \\`\\`\\`json\\\\n{\"meta\": {\"projectName\": \"Test Project\"}, \"tasks\": []}\\\\n\\`\\`\\`\n }\n ]\n };\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\t// No modification\n\t\t\tmockCode = `const response = await anthropic.messages.create(`;\n\t}\n\n\t// Replace the anthropic call with our mock\n\tmodifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);\n\n\t// Write the modified script to a temporary file\n\tconst tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);\n\tfs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');\n\n\treturn tempScriptPath;\n}\n\n// Function to run a test with a specific error type\nasync function runErrorTest(errorType, numTasks = 5, failureCount = 2) {\n\tconsole.log(`\\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);\n\n\t// Create a test PRD\n\tconst testPRD = createTestPRD();\n\tconst testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);\n\tfs.writeFileSync(testPRDPath, testPRD, 'utf8');\n\n\t// Create a modified dev.js that simulates the specified error\n\tconst tempScriptPath = createErrorSimulationScript(errorType, failureCount);\n\n\tconsole.log(`Created test PRD at ${testPRDPath}`);\n\tconsole.log(`Created error simulation script at ${tempScriptPath}`);\n\tconsole.log(\n\t\t`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`\n\t);\n\n\ttry {\n\t\t// Run the modified script\n\t\texecSync(\n\t\t\t`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`,\n\t\t\t{\n\t\t\t\tstdio: 'inherit'\n\t\t\t}\n\t\t);\n\t\tconsole.log(`${errorType} error test completed successfully`);\n\t} catch (error) {\n\t\tconsole.error(`${errorType} error test failed:`, error.message);\n\t} finally {\n\t\t// Clean up temporary files\n\t\tif (fs.existsSync(tempScriptPath)) {\n\t\t\tfs.unlinkSync(tempScriptPath);\n\t\t}\n\t\tif (fs.existsSync(testPRDPath)) {\n\t\t\tfs.unlinkSync(testPRDPath);\n\t\t}\n\t}\n}\n\n// Function to run all error tests\nasync function runAllErrorTests() {\n\tconsole.log('Starting error handling tests for callClaude function...');\n\n\t// Test 1: Network error with automatic retry\n\tawait runErrorTest('network', 5, 2);\n\n\t// Test 2: Timeout error with automatic retry\n\tawait runErrorTest('timeout', 5, 2);\n\n\t// Test 3: Invalid JSON response with task reduction\n\tawait runErrorTest('invalid-json', 10, 2);\n\n\t// Test 4: Empty tasks array with task reduction\n\tawait runErrorTest('empty-tasks', 15, 2);\n\n\t// Test 5: Exhausted retries (more failures than MAX_RETRIES)\n\tawait runErrorTest('network', 5, 4);\n\n\tconsole.log('\\nAll error tests completed!');\n}\n\n// Run the tests\nrunAllErrorTests().catch((error) => {\n\tconsole.error('Error running tests:', error);\n\tprocess.exit(1);\n});\n"], ["/claude-task-master/src/ui/confirm.js", "import chalk from 'chalk';\nimport boxen from 'boxen';\n\n/**\n * Confirm removing profile rules (destructive operation)\n * @param {string[]} profiles - Array of profile names to remove\n * @returns {Promise<boolean>} - Promise resolving to true if user confirms, false otherwise\n */\nasync function confirmProfilesRemove(profiles) {\n\tconst profileList = profiles\n\t\t.map((b) => b.charAt(0).toUpperCase() + b.slice(1))\n\t\t.join(', ');\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.yellow(\n\t\t\t\t`WARNING: This will selectively remove Task Master components for: ${profileList}.\n\nWhat will be removed:\n• Task Master specific rule files (e.g., cursor_rules.mdc, taskmaster.mdc, etc.)\n• Task Master MCP server configuration (if no other MCP servers exist)\n\nWhat will be preserved:\n• Your existing custom rule files\n• Other MCP server configurations\n• The profile directory itself (unless completely empty after removal)\n\nThe .[profile] directory will only be removed if ALL of the following are true:\n• All rules in the directory were Task Master rules (no custom rules)\n• No other files or folders exist in the profile directory\n• The MCP configuration was completely removed (no other servers)\n\nAre you sure you want to proceed?`\n\t\t\t),\n\t\t\t{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }\n\t\t)\n\t);\n\tconst inquirer = await import('inquirer');\n\tconst { confirm } = await inquirer.default.prompt([\n\t\t{\n\t\t\ttype: 'confirm',\n\t\t\tname: 'confirm',\n\t\t\tmessage: 'Type y to confirm selective removal, or n to abort:',\n\t\t\tdefault: false\n\t\t}\n\t]);\n\treturn confirm;\n}\n\n/**\n * Confirm removing ALL remaining profile rules (extremely critical operation)\n * @param {string[]} profiles - Array of profile names to remove\n * @param {string[]} remainingProfiles - Array of profiles that would be left after removal\n * @returns {Promise<boolean>} - Promise resolving to true if user confirms, false otherwise\n */\nasync function confirmRemoveAllRemainingProfiles(profiles, remainingProfiles) {\n\tconst profileList = profiles\n\t\t.map((p) => p.charAt(0).toUpperCase() + p.slice(1))\n\t\t.join(', ');\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.red.bold(\n\t\t\t\t`⚠️ CRITICAL WARNING: REMOVING ALL TASK MASTER RULE PROFILES ⚠️\\n\\n` +\n\t\t\t\t\t`You are about to remove Task Master components for: ${profileList}\\n` +\n\t\t\t\t\t`This will leave your project with NO Task Master rule profiles remaining!\\n\\n` +\n\t\t\t\t\t`What will be removed:\\n` +\n\t\t\t\t\t`• All Task Master specific rule files\\n` +\n\t\t\t\t\t`• Task Master MCP server configurations\\n` +\n\t\t\t\t\t`• Profile directories (only if completely empty after removal)\\n\\n` +\n\t\t\t\t\t`What will be preserved:\\n` +\n\t\t\t\t\t`• Your existing custom rule files\\n` +\n\t\t\t\t\t`• Other MCP server configurations\\n` +\n\t\t\t\t\t`• Profile directories with custom content\\n\\n` +\n\t\t\t\t\t`This could impact Task Master functionality but will preserve your custom configurations.\\n\\n` +\n\t\t\t\t\t`Are you absolutely sure you want to proceed?`\n\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'red',\n\t\t\t\tborderStyle: 'double',\n\t\t\t\ttitle: '🚨 CRITICAL OPERATION',\n\t\t\t\ttitleAlignment: 'center'\n\t\t\t}\n\t\t)\n\t);\n\n\tconst inquirer = await import('inquirer');\n\tconst { confirm } = await inquirer.default.prompt([\n\t\t{\n\t\t\ttype: 'confirm',\n\t\t\tname: 'confirm',\n\t\t\tmessage:\n\t\t\t\t'Type y to confirm removing ALL Task Master rule profiles, or n to abort:',\n\t\t\tdefault: false\n\t\t}\n\t]);\n\treturn confirm;\n}\n\nexport { confirmProfilesRemove, confirmRemoveAllRemainingProfiles };\n"], ["/claude-task-master/mcp-server/src/tools/next-task.js", "/**\n * tools/next-task.js\n * Tool to find the next task to work on based on dependencies and status\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { nextTaskDirect } from '../core/task-master-core.js';\nimport {\n\tresolveTasksPath,\n\tresolveComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the nextTask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerNextTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'next_task',\n\t\tdescription:\n\t\t\t'Find the next task to work on based on dependencies and status',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Finding next task with args: ${JSON.stringify(args)}`);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Resolve the path to tasks.json using new path utilities\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = resolveTasksPath(args, session);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Resolve the path to complexity report (optional)\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = resolveComplexityReportPath(\n\t\t\t\t\t\t{ ...args, tag: resolvedTag },\n\t\t\t\t\t\tsession\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t\t// This is optional, so we don't fail the operation\n\t\t\t\t\tcomplexityReportPath = null;\n\t\t\t\t}\n\n\t\t\t\tconst result = await nextTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tlog.info(`Next task result: ${result.success ? 'found' : 'none'}`);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error finding next task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error finding next task: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/task-manager/task-exists.js", "/**\n * Checks if a task with the given ID exists\n * @param {Array} tasks - Array of tasks to search\n * @param {string|number} taskId - ID of task or subtask to check\n * @returns {boolean} Whether the task exists\n */\nfunction taskExists(tasks, taskId) {\n\t// Handle subtask IDs (e.g., \"1.2\")\n\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\tconst [parentIdStr, subtaskIdStr] = taskId.split('.');\n\t\tconst parentId = parseInt(parentIdStr, 10);\n\t\tconst subtaskId = parseInt(subtaskIdStr, 10);\n\n\t\t// Find the parent task\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\n\t\t// If parent exists, check if subtask exists\n\t\treturn (\n\t\t\tparentTask &&\n\t\t\tparentTask.subtasks &&\n\t\t\tparentTask.subtasks.some((st) => st.id === subtaskId)\n\t\t);\n\t}\n\n\t// Handle regular task IDs\n\tconst id = parseInt(taskId, 10);\n\treturn tasks.some((t) => t.id === id);\n}\n\nexport default taskExists;\n"], ["/claude-task-master/scripts/modules/task-manager/response-language.js", "import {\n\tgetConfig,\n\tisConfigFilePresent,\n\twriteConfig\n} from '../config-manager.js';\nimport { findConfigPath } from '../../../src/utils/path-utils.js';\nimport { log } from '../utils.js';\n\nfunction setResponseLanguage(lang, options = {}) {\n\tconst { mcpLog, projectRoot } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CONFIG_MISSING',\n\t\t\t\tmessage:\n\t\t\t\t\t'The configuration file is missing. Run \"task-master init\" to create it.'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Validate response language\n\tif (typeof lang !== 'string' || lang.trim() === '') {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_RESPONSE_LANGUAGE',\n\t\t\t\tmessage: `Invalid response language: ${lang}. Must be a non-empty string.`\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tconst currentConfig = getConfig(projectRoot);\n\t\tcurrentConfig.global.responseLanguage = lang;\n\t\tconst writeResult = writeConfig(currentConfig, projectRoot);\n\n\t\tif (!writeResult) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'WRITE_ERROR',\n\t\t\t\t\tmessage: 'Error writing updated configuration to configuration file'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst successMessage = `Successfully set response language to: ${lang}`;\n\t\treport('info', successMessage);\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tresponseLanguage: lang,\n\t\t\t\tmessage: successMessage\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error setting response language: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'SET_RESPONSE_LANGUAGE_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\nexport default setResponseLanguage;\n"], ["/claude-task-master/src/profiles/base-profile.js", "// Base profile factory for rule-transformer\nimport path from 'path';\n\n/**\n * Creates a standardized profile configuration for different editors\n * @param {Object} editorConfig - Editor-specific configuration\n * @param {string} editorConfig.name - Profile name (e.g., 'cursor', 'vscode')\n * @param {string} [editorConfig.displayName] - Display name for the editor (defaults to name)\n * @param {string} editorConfig.url - Editor website URL\n * @param {string} editorConfig.docsUrl - Editor documentation URL\n * @param {string} editorConfig.profileDir - Directory for profile configuration\n * @param {string} [editorConfig.rulesDir] - Directory for rules files (defaults to profileDir/rules)\n * @param {boolean} [editorConfig.mcpConfig=true] - Whether to create MCP configuration\n * @param {string} [editorConfig.mcpConfigName='mcp.json'] - Name of MCP config file\n * @param {string} [editorConfig.fileExtension='.mdc'] - Source file extension\n * @param {string} [editorConfig.targetExtension='.md'] - Target file extension\n * @param {Object} [editorConfig.toolMappings={}] - Tool name mappings\n * @param {Array} [editorConfig.customReplacements=[]] - Custom text replacements\n * @param {Object} [editorConfig.fileMap={}] - Custom file name mappings\n * @param {boolean} [editorConfig.supportsRulesSubdirectories=false] - Whether to use taskmaster/ subdirectory for taskmaster-specific rules (only Cursor uses this by default)\n * @param {boolean} [editorConfig.includeDefaultRules=true] - Whether to include default rule files\n * @param {Function} [editorConfig.onAdd] - Lifecycle hook for profile addition\n * @param {Function} [editorConfig.onRemove] - Lifecycle hook for profile removal\n * @param {Function} [editorConfig.onPostConvert] - Lifecycle hook for post-conversion\n * @returns {Object} - Complete profile configuration\n */\nexport function createProfile(editorConfig) {\n\tconst {\n\t\tname,\n\t\tdisplayName = name,\n\t\turl,\n\t\tdocsUrl,\n\t\tprofileDir = `.${name.toLowerCase()}`,\n\t\trulesDir = `${profileDir}/rules`,\n\t\tmcpConfig = true,\n\t\tmcpConfigName = mcpConfig ? 'mcp.json' : null,\n\t\tfileExtension = '.mdc',\n\t\ttargetExtension = '.md',\n\t\ttoolMappings = {},\n\t\tcustomReplacements = [],\n\t\tfileMap = {},\n\t\tsupportsRulesSubdirectories = false,\n\t\tincludeDefaultRules = true,\n\t\tonAdd,\n\t\tonRemove,\n\t\tonPostConvert\n\t} = editorConfig;\n\n\tconst mcpConfigPath = mcpConfigName\n\t\t? path.join(profileDir, mcpConfigName)\n\t\t: null;\n\n\t// Standard file mapping with custom overrides\n\t// Use taskmaster subdirectory only if profile supports it\n\tconst taskmasterPrefix = supportsRulesSubdirectories ? 'taskmaster/' : '';\n\tconst defaultFileMap = {\n\t\t'rules/cursor_rules.mdc': `${name.toLowerCase()}_rules${targetExtension}`,\n\t\t'rules/dev_workflow.mdc': `${taskmasterPrefix}dev_workflow${targetExtension}`,\n\t\t'rules/self_improve.mdc': `self_improve${targetExtension}`,\n\t\t'rules/taskmaster.mdc': `${taskmasterPrefix}taskmaster${targetExtension}`\n\t};\n\n\t// Build final fileMap - merge defaults with custom entries when includeDefaultRules is true\n\tconst finalFileMap = includeDefaultRules\n\t\t? { ...defaultFileMap, ...fileMap }\n\t\t: fileMap;\n\n\t// Base global replacements that work for all editors\n\tconst baseGlobalReplacements = [\n\t\t// Handle URLs in any context\n\t\t{ from: /cursor\\.so/gi, to: url },\n\t\t{ from: /cursor\\s*\\.\\s*so/gi, to: url },\n\t\t{ from: /https?:\\/\\/cursor\\.so/gi, to: `https://${url}` },\n\t\t{ from: /https?:\\/\\/www\\.cursor\\.so/gi, to: `https://www.${url}` },\n\n\t\t// Handle tool references\n\t\t{ from: /\\bedit_file\\b/gi, to: toolMappings.edit_file || 'edit_file' },\n\t\t{\n\t\t\tfrom: /\\bsearch tool\\b/gi,\n\t\t\tto: `${toolMappings.search || 'search'} tool`\n\t\t},\n\t\t{ from: /\\bSearch Tool\\b/g, to: `${toolMappings.search || 'Search'} Tool` },\n\n\t\t// Handle basic terms with proper case handling\n\t\t{\n\t\t\tfrom: /\\bcursor\\b/gi,\n\t\t\tto: (match) =>\n\t\t\t\tmatch.charAt(0) === 'C' ? displayName : name.toLowerCase()\n\t\t},\n\t\t{ from: /Cursor/g, to: displayName },\n\t\t{ from: /CURSOR/g, to: displayName.toUpperCase() },\n\n\t\t// Handle file extensions if different\n\t\t...(targetExtension !== fileExtension\n\t\t\t? [\n\t\t\t\t\t{\n\t\t\t\t\t\tfrom: new RegExp(`\\\\${fileExtension}(?!\\\\])\\\\b`, 'g'),\n\t\t\t\t\t\tto: targetExtension\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t: []),\n\n\t\t// Handle documentation URLs\n\t\t{ from: /docs\\.cursor\\.com/gi, to: docsUrl },\n\n\t\t// Custom editor-specific replacements\n\t\t...customReplacements\n\t];\n\n\t// Standard tool mappings\n\tconst defaultToolMappings = {\n\t\tsearch: 'search',\n\t\tread_file: 'read_file',\n\t\tedit_file: 'edit_file',\n\t\tcreate_file: 'create_file',\n\t\trun_command: 'run_command',\n\t\tterminal_command: 'terminal_command',\n\t\tuse_mcp: 'use_mcp',\n\t\tswitch_mode: 'switch_mode',\n\t\t...toolMappings\n\t};\n\n\t// Create conversion config\n\tconst conversionConfig = {\n\t\t// Profile name replacements\n\t\tprofileTerms: [\n\t\t\t{ from: /cursor\\.so/g, to: url },\n\t\t\t{ from: /\\[cursor\\.so\\]/g, to: `[${url}]` },\n\t\t\t{ from: /href=\"https:\\/\\/cursor\\.so/g, to: `href=\"https://${url}` },\n\t\t\t{ from: /\\(https:\\/\\/cursor\\.so/g, to: `(https://${url}` },\n\t\t\t{\n\t\t\t\tfrom: /\\bcursor\\b/gi,\n\t\t\t\tto: (match) => (match === 'Cursor' ? displayName : name.toLowerCase())\n\t\t\t},\n\t\t\t{ from: /Cursor/g, to: displayName }\n\t\t],\n\n\t\t// File extension replacements\n\t\tfileExtensions:\n\t\t\ttargetExtension !== fileExtension\n\t\t\t\t? [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfrom: new RegExp(`\\\\${fileExtension}\\\\b`, 'g'),\n\t\t\t\t\t\t\tto: targetExtension\n\t\t\t\t\t\t}\n\t\t\t\t\t]\n\t\t\t\t: [],\n\n\t\t// Documentation URL replacements\n\t\tdocUrls: [\n\t\t\t{\n\t\t\t\tfrom: new RegExp(`https:\\\\/\\\\/docs\\\\.cursor\\\\.com\\\\/[^\\\\s)'\\\"]+`, 'g'),\n\t\t\t\tto: (match) => match.replace('docs.cursor.com', docsUrl)\n\t\t\t},\n\t\t\t{\n\t\t\t\tfrom: new RegExp(`https:\\\\/\\\\/${docsUrl}\\\\/`, 'g'),\n\t\t\t\tto: `https://${docsUrl}/`\n\t\t\t}\n\t\t],\n\n\t\t// Tool references - direct replacements\n\t\ttoolNames: defaultToolMappings,\n\n\t\t// Tool references in context - more specific replacements\n\t\ttoolContexts: Object.entries(defaultToolMappings).flatMap(\n\t\t\t([original, mapped]) => [\n\t\t\t\t{\n\t\t\t\t\tfrom: new RegExp(`\\\\b${original} tool\\\\b`, 'g'),\n\t\t\t\t\tto: `${mapped} tool`\n\t\t\t\t},\n\t\t\t\t{ from: new RegExp(`\\\\bthe ${original}\\\\b`, 'g'), to: `the ${mapped}` },\n\t\t\t\t{ from: new RegExp(`\\\\bThe ${original}\\\\b`, 'g'), to: `The ${mapped}` },\n\t\t\t\t{\n\t\t\t\t\tfrom: new RegExp(`\\\\bCursor ${original}\\\\b`, 'g'),\n\t\t\t\t\tto: `${displayName} ${mapped}`\n\t\t\t\t}\n\t\t\t]\n\t\t),\n\n\t\t// Tool group and category names\n\t\ttoolGroups: [\n\t\t\t{ from: /\\bSearch tools\\b/g, to: 'Read Group tools' },\n\t\t\t{ from: /\\bEdit tools\\b/g, to: 'Edit Group tools' },\n\t\t\t{ from: /\\bRun tools\\b/g, to: 'Command Group tools' },\n\t\t\t{ from: /\\bMCP servers\\b/g, to: 'MCP Group tools' },\n\t\t\t{ from: /\\bSearch Group\\b/g, to: 'Read Group' },\n\t\t\t{ from: /\\bEdit Group\\b/g, to: 'Edit Group' },\n\t\t\t{ from: /\\bRun Group\\b/g, to: 'Command Group' }\n\t\t],\n\n\t\t// File references in markdown links\n\t\tfileReferences: {\n\t\t\tpathPattern: /\\[(.+?)\\]\\(mdc:\\.cursor\\/rules\\/(.+?)\\.mdc\\)/g,\n\t\t\treplacement: (match, text, filePath) => {\n\t\t\t\tconst baseName = path.basename(filePath, '.mdc');\n\t\t\t\tconst newFileName =\n\t\t\t\t\tfinalFileMap[`rules/${baseName}.mdc`] ||\n\t\t\t\t\t`${baseName}${targetExtension}`;\n\t\t\t\t// Update the link text to match the new filename (strip directory path for display)\n\t\t\t\tconst newLinkText = path.basename(newFileName);\n\t\t\t\t// For Cursor, keep the mdc: protocol; for others, use standard relative paths\n\t\t\t\tif (name.toLowerCase() === 'cursor') {\n\t\t\t\t\treturn `[${newLinkText}](mdc:${rulesDir}/${newFileName})`;\n\t\t\t\t} else {\n\t\t\t\t\treturn `[${newLinkText}](${rulesDir}/${newFileName})`;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\tfunction getTargetRuleFilename(sourceFilename) {\n\t\tif (finalFileMap[sourceFilename]) {\n\t\t\treturn finalFileMap[sourceFilename];\n\t\t}\n\t\treturn targetExtension !== fileExtension\n\t\t\t? sourceFilename.replace(\n\t\t\t\t\tnew RegExp(`\\\\${fileExtension}$`),\n\t\t\t\t\ttargetExtension\n\t\t\t\t)\n\t\t\t: sourceFilename;\n\t}\n\n\treturn {\n\t\tprofileName: name, // Use name for programmatic access (tests expect this)\n\t\tdisplayName: displayName, // Keep displayName for UI purposes\n\t\tprofileDir,\n\t\trulesDir,\n\t\tmcpConfig,\n\t\tmcpConfigName,\n\t\tmcpConfigPath,\n\t\tsupportsRulesSubdirectories,\n\t\tincludeDefaultRules,\n\t\tfileMap: finalFileMap,\n\t\tglobalReplacements: baseGlobalReplacements,\n\t\tconversionConfig,\n\t\tgetTargetRuleFilename,\n\t\ttargetExtension,\n\t\t// Optional lifecycle hooks\n\t\t...(onAdd && { onAddRulesProfile: onAdd }),\n\t\t...(onRemove && { onRemoveRulesProfile: onRemove }),\n\t\t...(onPostConvert && { onPostConvertRulesProfile: onPostConvert })\n\t};\n}\n\n// Common tool mappings for editors that share similar tool sets\nexport const COMMON_TOOL_MAPPINGS = {\n\t// Most editors (Cursor, Cline, Windsurf) keep original tool names\n\tSTANDARD: {},\n\n\t// Roo Code uses different tool names\n\tROO_STYLE: {\n\t\tedit_file: 'apply_diff',\n\t\tsearch: 'search_files',\n\t\tcreate_file: 'write_to_file',\n\t\trun_command: 'execute_command',\n\t\tterminal_command: 'execute_command',\n\t\tuse_mcp: 'use_mcp_tool'\n\t}\n};\n"], ["/claude-task-master/mcp-server/src/logger.js", "import chalk from 'chalk';\nimport { isSilentMode } from '../../scripts/modules/utils.js';\nimport { getLogLevel } from '../../scripts/modules/config-manager.js';\n\n// Define log levels\nconst LOG_LEVELS = {\n\tdebug: 0,\n\tinfo: 1,\n\twarn: 2,\n\terror: 3,\n\tsuccess: 4\n};\n\n// Get log level from config manager or default to info\nconst LOG_LEVEL = LOG_LEVELS[getLogLevel().toLowerCase()] ?? LOG_LEVELS.info;\n\n/**\n * Logs a message with the specified level\n * @param {string} level - The log level (debug, info, warn, error, success)\n * @param {...any} args - Arguments to log\n */\nfunction log(level, ...args) {\n\t// Skip logging if silent mode is enabled\n\tif (isSilentMode()) {\n\t\treturn;\n\t}\n\n\t// Use text prefixes instead of emojis\n\tconst prefixes = {\n\t\tdebug: chalk.gray('[DEBUG]'),\n\t\tinfo: chalk.blue('[INFO]'),\n\t\twarn: chalk.yellow('[WARN]'),\n\t\terror: chalk.red('[ERROR]'),\n\t\tsuccess: chalk.green('[SUCCESS]')\n\t};\n\n\tif (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {\n\t\tconst prefix = prefixes[level] || '';\n\t\tlet coloredArgs = args;\n\n\t\ttry {\n\t\t\tswitch (level) {\n\t\t\t\tcase 'error':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.red(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'warn':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.yellow(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'success':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.green(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'info':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.blue(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'debug':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.gray(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\t// default: use original args (no color)\n\t\t\t}\n\t\t} catch (colorError) {\n\t\t\t// Fallback if chalk fails on an argument\n\t\t\t// Use console.error here for internal logger errors, separate from normal logging\n\t\t\tconsole.error('Internal Logger Error applying chalk color:', colorError);\n\t\t\tcoloredArgs = args;\n\t\t}\n\n\t\t// Revert to console.log - FastMCP's context logger (context.log)\n\t\t// is responsible for directing logs correctly (e.g., to stderr)\n\t\t// during tool execution without upsetting the client connection.\n\t\t// Logs outside of tool execution (like startup) will go to stdout.\n\t\tconsole.log(prefix, ...coloredArgs);\n\t}\n}\n\n/**\n * Create a logger object with methods for different log levels\n * @returns {Object} Logger object with info, error, debug, warn, and success methods\n */\nexport function createLogger() {\n\tconst createLogMethod =\n\t\t(level) =>\n\t\t(...args) =>\n\t\t\tlog(level, ...args);\n\n\treturn {\n\t\tdebug: createLogMethod('debug'),\n\t\tinfo: createLogMethod('info'),\n\t\twarn: createLogMethod('warn'),\n\t\terror: createLogMethod('error'),\n\t\tsuccess: createLogMethod('success'),\n\t\tlog: log // Also expose the raw log function\n\t};\n}\n\n// Export a default logger instance\nconst logger = createLogger();\n\nexport default logger;\nexport { log, LOG_LEVELS };\n"], ["/claude-task-master/src/profiles/zed.js", "// Zed profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to Zed format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed Zed configuration object\n */\nfunction transformToZedFormat(mcpConfig) {\n\tconst zedConfig = {};\n\n\t// Transform mcpServers to context_servers\n\tif (mcpConfig.mcpServers) {\n\t\tzedConfig['context_servers'] = mcpConfig.mcpServers;\n\t}\n\n\t// Preserve any other existing settings\n\tfor (const [key, value] of Object.entries(mcpConfig)) {\n\t\tif (key !== 'mcpServers') {\n\t\t\tzedConfig[key] = value;\n\t\t}\n\t}\n\n\treturn zedConfig;\n}\n\n// Lifecycle functions for Zed profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// MCP transformation will be handled in onPostConvertRulesProfile\n\t// File copying is handled by the base profile via fileMap\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\t// Clean up .rules (Zed uses .rules directly in root)\n\tconst userRulesFile = path.join(targetDir, '.rules');\n\n\ttry {\n\t\t// Remove Task Master .rules\n\t\tif (fs.existsSync(userRulesFile)) {\n\t\t\tfs.rmSync(userRulesFile, { force: true });\n\t\t\tlog('debug', `[Zed] Removed ${userRulesFile}`);\n\t\t}\n\t} catch (err) {\n\t\tlog('error', `[Zed] Failed to remove Zed instructions: ${err.message}`);\n\t}\n\n\t// MCP Removal: Remove context_servers section\n\tconst mcpConfigPath = path.join(targetDir, '.zed', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Zed] No .zed/settings.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the context_servers section and task-master-ai server\n\t\tif (\n\t\t\tconfig['context_servers'] &&\n\t\t\tconfig['context_servers']['task-master-ai']\n\t\t) {\n\t\t\t// Remove task-master-ai server\n\t\t\tdelete config['context_servers']['task-master-ai'];\n\n\t\t\t// Check if there are other MCP servers in context_servers\n\t\t\tconst remainingServers = Object.keys(config['context_servers']);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire context_servers section\n\t\t\t\tdelete config['context_servers'];\n\t\t\t\tlog('debug', '[Zed] Removed empty context_servers section');\n\t\t\t}\n\n\t\t\t// Check if config is now empty\n\t\t\tconst remainingKeys = Object.keys(config);\n\n\t\t\tif (remainingKeys.length === 0) {\n\t\t\t\t// Config is empty, remove entire file\n\t\t\t\tfs.rmSync(mcpConfigPath, { force: true });\n\t\t\t\tlog('info', '[Zed] Removed empty settings.json file');\n\n\t\t\t\t// Check if .zed directory is empty\n\t\t\t\tconst zedDirPath = path.join(targetDir, '.zed');\n\t\t\t\tif (fs.existsSync(zedDirPath)) {\n\t\t\t\t\tconst remainingContents = fs.readdirSync(zedDirPath);\n\t\t\t\t\tif (remainingContents.length === 0) {\n\t\t\t\t\t\tfs.rmSync(zedDirPath, { recursive: true, force: true });\n\t\t\t\t\t\tlog('debug', '[Zed] Removed empty .zed directory');\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\tmcpConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, '\\t') + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[Zed] Removed TaskMaster from settings.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[Zed] TaskMaster not found in context_servers');\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `[Zed] Failed to clean up settings.json: ${error.message}`);\n\t}\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\t// Handle .rules setup (same as onAddRulesProfile)\n\tonAddRulesProfile(targetDir, assetsDir);\n\n\t// Transform MCP config to Zed format\n\tconst mcpConfigPath = path.join(targetDir, '.zed', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Zed] No .zed/settings.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in Zed format (has context_servers)\n\t\tif (mcpConfig['context_servers']) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[Zed] settings.json already in Zed format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to Zed format\n\t\tconst zedConfig = transformToZedFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\tmcpConfigPath,\n\t\t\tJSON.stringify(zedConfig, null, '\\t') + '\\n'\n\t\t);\n\n\t\tlog('info', '[Zed] Transformed settings.json to Zed format');\n\t\tlog('debug', '[Zed] Renamed mcpServers to context_servers');\n\t} catch (error) {\n\t\tlog('error', `[Zed] Failed to transform settings.json: ${error.message}`);\n\t}\n}\n\n// Create and export zed profile using the base factory\nexport const zedProfile = createProfile({\n\tname: 'zed',\n\tdisplayName: 'Zed',\n\turl: 'zed.dev',\n\tdocsUrl: 'zed.dev/docs',\n\tprofileDir: '.zed',\n\trulesDir: '.',\n\tmcpConfig: true,\n\tmcpConfigName: 'settings.json',\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': '.rules'\n\t},\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/mcp-server/src/tools/remove-subtask.js", "/**\n * tools/remove-subtask.js\n * Tool for removing subtasks from parent tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { removeSubtaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the removeSubtask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRemoveSubtaskTool(server) {\n\tserver.addTool({\n\t\tname: 'remove_subtask',\n\t\tdescription: 'Remove a subtask from its parent task',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Subtask ID to remove in format 'parentId.subtaskId' (required)\"\n\t\t\t\t),\n\t\t\tconvert: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Convert the subtask to a standalone task instead of deleting it'\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tskipGenerate: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Skip regenerating task files'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Removing subtask with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await removeSubtaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tconvert: args.convert,\n\t\t\t\t\t\tskipGenerate: args.skipGenerate,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Subtask removed successfully: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to remove subtask: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error removing subtask',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in removeSubtask tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/utils/path-utils.js", "import path from 'path';\nimport {\n\tfindTasksPath as coreFindTasksPath,\n\tfindPRDPath as coreFindPrdPath,\n\tfindComplexityReportPath as coreFindComplexityReportPath,\n\tfindProjectRoot as coreFindProjectRoot,\n\tnormalizeProjectRoot\n} from '../../../../src/utils/path-utils.js';\nimport { PROJECT_MARKERS } from '../../../../src/constants/paths.js';\n\n/**\n * MCP-specific path utilities that extend core path utilities with session support\n * This module handles session-specific path resolution for the MCP server\n */\n\n/**\n * Silent logger for MCP context to prevent console output\n */\nconst silentLogger = {\n\tinfo: () => {},\n\twarn: () => {},\n\terror: () => {},\n\tdebug: () => {},\n\tsuccess: () => {}\n};\n\n/**\n * Cache for last found project root to improve performance\n */\nexport const lastFoundProjectRoot = null;\n\n/**\n * Find PRD file with MCP support\n * @param {string} [explicitPath] - Explicit path to PRD file (highest priority)\n * @param {Object} [args] - Arguments object for context\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to PRD file or null if not found\n */\nexport function findPrdPath(explicitPath, args = null, log = silentLogger) {\n\treturn coreFindPrdPath(explicitPath, args, log);\n}\n\n/**\n * Resolve tasks.json path from arguments\n * Prioritizes explicit path parameter, then uses fallback logic\n * @param {Object} args - Arguments object containing projectRoot and optional file path\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to tasks.json or null if not found\n */\nexport function resolveTasksPath(args, log = silentLogger) {\n\t// Get explicit path from args.file if provided\n\tconst explicitPath = args?.file;\n\tconst rawProjectRoot = args?.projectRoot;\n\n\t// If explicit path is provided and absolute, use it directly\n\tif (explicitPath && path.isAbsolute(explicitPath)) {\n\t\treturn explicitPath;\n\t}\n\n\t// Normalize project root if provided\n\tconst projectRoot = rawProjectRoot\n\t\t? normalizeProjectRoot(rawProjectRoot)\n\t\t: null;\n\n\t// If explicit path is relative, resolve it relative to normalized projectRoot\n\tif (explicitPath && projectRoot) {\n\t\treturn path.resolve(projectRoot, explicitPath);\n\t}\n\n\t// Use core findTasksPath with explicit path and normalized projectRoot context\n\tif (projectRoot) {\n\t\treturn coreFindTasksPath(explicitPath, { projectRoot }, log);\n\t}\n\n\t// Fallback to core function without projectRoot context\n\treturn coreFindTasksPath(explicitPath, null, log);\n}\n\n/**\n * Resolve PRD path from arguments\n * @param {Object} args - Arguments object containing projectRoot and optional input path\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to PRD file or null if not found\n */\nexport function resolvePrdPath(args, log = silentLogger) {\n\t// Get explicit path from args.input if provided\n\tconst explicitPath = args?.input;\n\tconst rawProjectRoot = args?.projectRoot;\n\n\t// If explicit path is provided and absolute, use it directly\n\tif (explicitPath && path.isAbsolute(explicitPath)) {\n\t\treturn explicitPath;\n\t}\n\n\t// Normalize project root if provided\n\tconst projectRoot = rawProjectRoot\n\t\t? normalizeProjectRoot(rawProjectRoot)\n\t\t: null;\n\n\t// If explicit path is relative, resolve it relative to normalized projectRoot\n\tif (explicitPath && projectRoot) {\n\t\treturn path.resolve(projectRoot, explicitPath);\n\t}\n\n\t// Use core findPRDPath with explicit path and normalized projectRoot context\n\tif (projectRoot) {\n\t\treturn coreFindPrdPath(explicitPath, { projectRoot }, log);\n\t}\n\n\t// Fallback to core function without projectRoot context\n\treturn coreFindPrdPath(explicitPath, null, log);\n}\n\n/**\n * Resolve complexity report path from arguments\n * @param {Object} args - Arguments object containing projectRoot and optional complexityReport path\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to complexity report or null if not found\n */\nexport function resolveComplexityReportPath(args, log = silentLogger) {\n\t// Get explicit path from args.complexityReport if provided\n\tconst explicitPath = args?.complexityReport;\n\tconst rawProjectRoot = args?.projectRoot;\n\tconst tag = args?.tag;\n\n\t// If explicit path is provided and absolute, use it directly\n\tif (explicitPath && path.isAbsolute(explicitPath)) {\n\t\treturn explicitPath;\n\t}\n\n\t// Normalize project root if provided\n\tconst projectRoot = rawProjectRoot\n\t\t? normalizeProjectRoot(rawProjectRoot)\n\t\t: null;\n\n\t// If explicit path is relative, resolve it relative to normalized projectRoot\n\tif (explicitPath && projectRoot) {\n\t\treturn path.resolve(projectRoot, explicitPath);\n\t}\n\n\t// Use core findComplexityReportPath with explicit path and normalized projectRoot context\n\tif (projectRoot) {\n\t\treturn coreFindComplexityReportPath(\n\t\t\texplicitPath,\n\t\t\t{ projectRoot, tag },\n\t\t\tlog\n\t\t);\n\t}\n\n\t// Fallback to core function without projectRoot context\n\treturn coreFindComplexityReportPath(explicitPath, null, log);\n}\n\n/**\n * Resolve any project-relative path from arguments\n * @param {string} relativePath - Relative path to resolve\n * @param {Object} args - Arguments object containing projectRoot\n * @returns {string} - Resolved absolute path\n */\nexport function resolveProjectPath(relativePath, args) {\n\t// Ensure we have a projectRoot from args\n\tif (!args?.projectRoot) {\n\t\tthrow new Error('projectRoot is required in args to resolve project paths');\n\t}\n\n\t// Normalize the project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(args.projectRoot);\n\n\t// If already absolute, return as-is\n\tif (path.isAbsolute(relativePath)) {\n\t\treturn relativePath;\n\t}\n\n\t// Resolve relative to normalized projectRoot\n\treturn path.resolve(projectRoot, relativePath);\n}\n\n/**\n * Find project root using core utility\n * @param {string} [startDir] - Directory to start searching from\n * @returns {string|null} - Project root path or null if not found\n */\nexport function findProjectRoot(startDir) {\n\treturn coreFindProjectRoot(startDir);\n}\n\n// MAIN EXPORTS FOR MCP TOOLS - these are the functions MCP tools should use\n\n/**\n * Find tasks.json path from arguments - primary MCP function\n * @param {Object} args - Arguments object containing projectRoot and optional file path\n * @param {Object} [log] - Log function to prevent console logging\n * @returns {string|null} - Resolved path to tasks.json or null if not found\n */\nexport function findTasksPath(args, log = silentLogger) {\n\treturn resolveTasksPath(args, log);\n}\n\n/**\n * Find complexity report path from arguments - primary MCP function\n * @param {Object} args - Arguments object containing projectRoot and optional complexityReport path\n * @param {Object} [log] - Log function to prevent console logging\n * @returns {string|null} - Resolved path to complexity report or null if not found\n */\nexport function findComplexityReportPath(args, log = silentLogger) {\n\treturn resolveComplexityReportPath(args, log);\n}\n\n/**\n * Find PRD path - primary MCP function\n * @param {string} [explicitPath] - Explicit path to PRD file\n * @param {Object} [args] - Arguments object for context (not used in current implementation)\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to PRD file or null if not found\n */\nexport function findPRDPath(explicitPath, args = null, log = silentLogger) {\n\treturn findPrdPath(explicitPath, args, log);\n}\n\n// Legacy aliases for backward compatibility - DEPRECATED\nexport const findTasksJsonPath = findTasksPath;\nexport const findComplexityReportJsonPath = findComplexityReportPath;\n\n// Re-export PROJECT_MARKERS for MCP tools that import it from this module\nexport { PROJECT_MARKERS };\n"], ["/claude-task-master/mcp-server/src/core/context-manager.js", "/**\n * context-manager.js\n * Context and cache management for Task Master MCP Server\n */\n\nimport { FastMCP } from 'fastmcp';\nimport { LRUCache } from 'lru-cache';\n\n/**\n * Configuration options for the ContextManager\n * @typedef {Object} ContextManagerConfig\n * @property {number} maxCacheSize - Maximum number of items in the cache\n * @property {number} ttl - Time to live for cached items in milliseconds\n * @property {number} maxContextSize - Maximum size of context window in tokens\n */\n\nexport class ContextManager {\n\t/**\n\t * Create a new ContextManager instance\n\t * @param {ContextManagerConfig} config - Configuration options\n\t */\n\tconstructor(config = {}) {\n\t\tthis.config = {\n\t\t\tmaxCacheSize: config.maxCacheSize || 1000,\n\t\t\tttl: config.ttl || 1000 * 60 * 5, // 5 minutes default\n\t\t\tmaxContextSize: config.maxContextSize || 4000\n\t\t};\n\n\t\t// Initialize LRU cache for context data\n\t\tthis.cache = new LRUCache({\n\t\t\tmax: this.config.maxCacheSize,\n\t\t\tttl: this.config.ttl,\n\t\t\tupdateAgeOnGet: true\n\t\t});\n\n\t\t// Cache statistics\n\t\tthis.stats = {\n\t\t\thits: 0,\n\t\t\tmisses: 0,\n\t\t\tinvalidations: 0\n\t\t};\n\t}\n\n\t/**\n\t * Create a new context or retrieve from cache\n\t * @param {string} contextId - Unique identifier for the context\n\t * @param {Object} metadata - Additional metadata for the context\n\t * @returns {Object} Context object with metadata\n\t */\n\tasync getContext(contextId, metadata = {}) {\n\t\tconst cacheKey = this._getCacheKey(contextId, metadata);\n\n\t\t// Try to get from cache first\n\t\tconst cached = this.cache.get(cacheKey);\n\t\tif (cached) {\n\t\t\tthis.stats.hits++;\n\t\t\treturn cached;\n\t\t}\n\n\t\tthis.stats.misses++;\n\n\t\t// Create new context if not in cache\n\t\tconst context = {\n\t\t\tid: contextId,\n\t\t\tmetadata: {\n\t\t\t\t...metadata,\n\t\t\t\tcreated: new Date().toISOString()\n\t\t\t}\n\t\t};\n\n\t\t// Cache the new context\n\t\tthis.cache.set(cacheKey, context);\n\n\t\treturn context;\n\t}\n\n\t/**\n\t * Update an existing context\n\t * @param {string} contextId - Context identifier\n\t * @param {Object} updates - Updates to apply to the context\n\t * @returns {Object} Updated context\n\t */\n\tasync updateContext(contextId, updates) {\n\t\tconst context = await this.getContext(contextId);\n\n\t\t// Apply updates to context\n\t\tObject.assign(context.metadata, updates);\n\n\t\t// Update cache\n\t\tconst cacheKey = this._getCacheKey(contextId, context.metadata);\n\t\tthis.cache.set(cacheKey, context);\n\n\t\treturn context;\n\t}\n\n\t/**\n\t * Invalidate a context in the cache\n\t * @param {string} contextId - Context identifier\n\t * @param {Object} metadata - Metadata used in the cache key\n\t */\n\tinvalidateContext(contextId, metadata = {}) {\n\t\tconst cacheKey = this._getCacheKey(contextId, metadata);\n\t\tthis.cache.delete(cacheKey);\n\t\tthis.stats.invalidations++;\n\t}\n\n\t/**\n\t * Get cached data associated with a specific key.\n\t * Increments cache hit stats if found.\n\t * @param {string} key - The cache key.\n\t * @returns {any | undefined} The cached data or undefined if not found/expired.\n\t */\n\tgetCachedData(key) {\n\t\tconst cached = this.cache.get(key);\n\t\tif (cached !== undefined) {\n\t\t\t// Check for undefined specifically, as null/false might be valid cached values\n\t\t\tthis.stats.hits++;\n\t\t\treturn cached;\n\t\t}\n\t\tthis.stats.misses++;\n\t\treturn undefined;\n\t}\n\n\t/**\n\t * Set data in the cache with a specific key.\n\t * @param {string} key - The cache key.\n\t * @param {any} data - The data to cache.\n\t */\n\tsetCachedData(key, data) {\n\t\tthis.cache.set(key, data);\n\t}\n\n\t/**\n\t * Invalidate a specific cache key.\n\t * Increments invalidation stats.\n\t * @param {string} key - The cache key to invalidate.\n\t */\n\tinvalidateCacheKey(key) {\n\t\tthis.cache.delete(key);\n\t\tthis.stats.invalidations++;\n\t}\n\n\t/**\n\t * Get cache statistics\n\t * @returns {Object} Cache statistics\n\t */\n\tgetStats() {\n\t\treturn {\n\t\t\thits: this.stats.hits,\n\t\t\tmisses: this.stats.misses,\n\t\t\tinvalidations: this.stats.invalidations,\n\t\t\tsize: this.cache.size,\n\t\t\tmaxSize: this.config.maxCacheSize,\n\t\t\tttl: this.config.ttl\n\t\t};\n\t}\n\n\t/**\n\t * Generate a cache key from context ID and metadata\n\t * @private\n\t * @deprecated No longer used for direct cache key generation outside the manager.\n\t * Prefer generating specific keys in calling functions.\n\t */\n\t_getCacheKey(contextId, metadata) {\n\t\t// Kept for potential backward compatibility or internal use if needed later.\n\t\treturn `${contextId}:${JSON.stringify(metadata)}`;\n\t}\n}\n\n// Export a singleton instance with default config\nexport const contextManager = new ContextManager();\n"], ["/claude-task-master/mcp-server/src/tools/add-task.js", "/**\n * tools/add-task.js\n * Tool to add a new task using AI\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addTaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the addTask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'add_task',\n\t\tdescription: 'Add a new task using AI',\n\t\tparameters: z.object({\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Description of the task to add (required if not using manual fields)'\n\t\t\t\t),\n\t\t\ttitle: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Task title (for manual task creation)'),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Task description (for manual task creation)'),\n\t\t\tdetails: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Implementation details (for manual task creation)'),\n\t\t\ttestStrategy: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Test strategy (for manual task creation)'),\n\t\t\tdependencies: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Comma-separated list of task IDs this task depends on'),\n\t\t\tpriority: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Task priority (high, medium, low)'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Whether to use research capabilities for task creation')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting add-task with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct functionP\n\t\t\t\tconst result = await addTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\ttitle: args.title,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tdetails: args.details,\n\t\t\t\t\t\ttestStrategy: args.testStrategy,\n\t\t\t\t\t\tdependencies: args.dependencies,\n\t\t\t\t\t\tpriority: args.priority,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error adding task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in add-task tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/validate-dependencies.js", "/**\n * tools/validate-dependencies.js\n * Tool for validating task dependencies\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { validateDependenciesDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the validateDependencies tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerValidateDependenciesTool(server) {\n\tserver.addTool({\n\t\tname: 'validate_dependencies',\n\t\tdescription:\n\t\t\t'Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Validating dependencies with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await validateDependenciesDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(\n\t\t\t\t\t\t`Successfully validated dependencies: ${result.data.message}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to validate dependencies: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error validating dependencies',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in validateDependencies tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/remove-dependency.js", "/**\n * tools/remove-dependency.js\n * Tool for removing a dependency from a task\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { removeDependencyDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the removeDependency tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRemoveDependencyTool(server) {\n\tserver.addTool({\n\t\tname: 'remove_dependency',\n\t\tdescription: 'Remove a dependency from a task',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('Task ID to remove dependency from'),\n\t\t\tdependsOn: z.string().describe('Task ID to remove as a dependency'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(\n\t\t\t\t\t`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await removeDependencyDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tdependsOn: args.dependsOn,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully removed dependency: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to remove dependency: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error removing dependency',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in removeDependency tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/index.js", "#!/usr/bin/env node\n\n/**\n * Task Master\n * Copyright (c) 2025 Eyal Toledano, Ralph Khreish\n *\n * This software is licensed under the MIT License with Commons Clause.\n * You may use this software for any purpose, including commercial applications,\n * and modify and redistribute it freely, subject to the following restrictions:\n *\n * 1. You may not sell this software or offer it as a service.\n * 2. The origin of this software must not be misrepresented.\n * 3. Altered source versions must be plainly marked as such.\n *\n * For the full license text, see the LICENSE file in the root directory.\n */\n\n/**\n * Claude Task Master\n * A task management system for AI-driven development with Claude\n */\n\n// This file serves as the main entry point for the package\n// The primary functionality is provided through the CLI commands\n\nimport { fileURLToPath } from 'url';\nimport { dirname, resolve } from 'path';\nimport { createRequire } from 'module';\nimport { spawn } from 'child_process';\nimport { Command } from 'commander';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\nconst require = createRequire(import.meta.url);\n\n// Get package information\nconst packageJson = require('./package.json');\n\n// Export the path to the dev.js script for programmatic usage\nexport const devScriptPath = resolve(__dirname, './scripts/dev.js');\n\n// Export a function to initialize a new project programmatically\nexport const initProject = async (options = {}) => {\n\tconst init = await import('./scripts/init.js');\n\treturn init.initializeProject(options);\n};\n\n// Export a function to run init as a CLI command\nexport const runInitCLI = async (options = {}) => {\n\ttry {\n\t\tconst init = await import('./scripts/init.js');\n\t\tconst result = await init.initializeProject(options);\n\t\treturn result;\n\t} catch (error) {\n\t\tconsole.error('Initialization failed:', error.message);\n\t\tif (process.env.DEBUG === 'true') {\n\t\t\tconsole.error('Debug stack trace:', error.stack);\n\t\t}\n\t\tthrow error; // Re-throw to be handled by the command handler\n\t}\n};\n\n// Export version information\nexport const version = packageJson.version;\n\n// CLI implementation\nif (import.meta.url === `file://${process.argv[1]}`) {\n\tconst program = new Command();\n\n\tprogram\n\t\t.name('task-master')\n\t\t.description('Claude Task Master CLI')\n\t\t.version(version);\n\n\tprogram\n\t\t.command('init')\n\t\t.description('Initialize a new project')\n\t\t.option('-y, --yes', 'Skip prompts and use default values')\n\t\t.option('-n, --name <n>', 'Project name')\n\t\t.option('-d, --description <description>', 'Project description')\n\t\t.option('-v, --version <version>', 'Project version', '0.1.0')\n\t\t.option('-a, --author <author>', 'Author name')\n\t\t.option('--skip-install', 'Skip installing dependencies')\n\t\t.option('--dry-run', 'Show what would be done without making changes')\n\t\t.option('--aliases', 'Add shell aliases (tm, taskmaster)')\n\t\t.option('--no-aliases', 'Skip shell aliases (tm, taskmaster)')\n\t\t.option('--git', 'Initialize Git repository')\n\t\t.option('--no-git', 'Skip Git repository initialization')\n\t\t.option('--git-tasks', 'Store tasks in Git')\n\t\t.option('--no-git-tasks', 'No Git storage of tasks')\n\t\t.action(async (cmdOptions) => {\n\t\t\ttry {\n\t\t\t\tawait runInitCLI(cmdOptions);\n\t\t\t} catch (err) {\n\t\t\t\tconsole.error('Init failed:', err.message);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\tprogram\n\t\t.command('dev')\n\t\t.description('Run the dev.js script')\n\t\t.allowUnknownOption(true)\n\t\t.action(() => {\n\t\t\tconst args = process.argv.slice(process.argv.indexOf('dev') + 1);\n\t\t\tconst child = spawn('node', [devScriptPath, ...args], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\t// Add shortcuts for common dev.js commands\n\tprogram\n\t\t.command('list')\n\t\t.description('List all tasks')\n\t\t.action(() => {\n\t\t\tconst child = spawn('node', [devScriptPath, 'list'], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\tprogram\n\t\t.command('next')\n\t\t.description('Show the next task to work on')\n\t\t.action(() => {\n\t\t\tconst child = spawn('node', [devScriptPath, 'next'], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\tprogram\n\t\t.command('generate')\n\t\t.description('Generate task files')\n\t\t.action(() => {\n\t\t\tconst child = spawn('node', [devScriptPath, 'generate'], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\tprogram.parse(process.argv);\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/message-converter.js", "/**\n * @fileoverview Converts AI SDK prompt format to Claude Code message format\n */\n\n/**\n * Convert AI SDK prompt to Claude Code messages format\n * @param {Array} prompt - AI SDK prompt array\n * @param {Object} [mode] - Generation mode\n * @param {string} mode.type - Mode type ('regular', 'object-json', 'object-tool')\n * @returns {{messagesPrompt: string, systemPrompt?: string}}\n */\nexport function convertToClaudeCodeMessages(prompt, mode) {\n\tconst messages = [];\n\tlet systemPrompt;\n\n\tfor (const message of prompt) {\n\t\tswitch (message.role) {\n\t\t\tcase 'system':\n\t\t\t\tsystemPrompt = message.content;\n\t\t\t\tbreak;\n\n\t\t\tcase 'user':\n\t\t\t\tif (typeof message.content === 'string') {\n\t\t\t\t\tmessages.push(message.content);\n\t\t\t\t} else {\n\t\t\t\t\t// Handle multi-part content\n\t\t\t\t\tconst textParts = message.content\n\t\t\t\t\t\t.filter((part) => part.type === 'text')\n\t\t\t\t\t\t.map((part) => part.text)\n\t\t\t\t\t\t.join('\\n');\n\n\t\t\t\t\tif (textParts) {\n\t\t\t\t\t\tmessages.push(textParts);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Note: Image parts are not supported by Claude Code CLI\n\t\t\t\t\tconst imageParts = message.content.filter(\n\t\t\t\t\t\t(part) => part.type === 'image'\n\t\t\t\t\t);\n\t\t\t\t\tif (imageParts.length > 0) {\n\t\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t\t'Claude Code CLI does not support image inputs. Images will be ignored.'\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase 'assistant':\n\t\t\t\tif (typeof message.content === 'string') {\n\t\t\t\t\tmessages.push(`Assistant: ${message.content}`);\n\t\t\t\t} else {\n\t\t\t\t\tconst textParts = message.content\n\t\t\t\t\t\t.filter((part) => part.type === 'text')\n\t\t\t\t\t\t.map((part) => part.text)\n\t\t\t\t\t\t.join('\\n');\n\n\t\t\t\t\tif (textParts) {\n\t\t\t\t\t\tmessages.push(`Assistant: ${textParts}`);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Handle tool calls if present\n\t\t\t\t\tconst toolCalls = message.content.filter(\n\t\t\t\t\t\t(part) => part.type === 'tool-call'\n\t\t\t\t\t);\n\t\t\t\t\tif (toolCalls.length > 0) {\n\t\t\t\t\t\t// For now, we'll just note that tool calls were made\n\t\t\t\t\t\tmessages.push(`Assistant: [Tool calls made]`);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase 'tool':\n\t\t\t\t// Tool results could be included in the conversation\n\t\t\t\tmessages.push(\n\t\t\t\t\t`Tool Result (${message.content[0].toolName}): ${JSON.stringify(\n\t\t\t\t\t\tmessage.content[0].result\n\t\t\t\t\t)}`\n\t\t\t\t);\n\t\t\t\tbreak;\n\t\t}\n\t}\n\n\t// For the SDK, we need to provide a single prompt string\n\t// Format the conversation history properly\n\n\t// Combine system prompt with messages\n\tlet finalPrompt = '';\n\n\t// Add system prompt at the beginning if present\n\tif (systemPrompt) {\n\t\tfinalPrompt = systemPrompt;\n\t}\n\n\tif (messages.length === 0) {\n\t\treturn { messagesPrompt: finalPrompt, systemPrompt };\n\t}\n\n\t// Format messages\n\tconst formattedMessages = [];\n\tfor (let i = 0; i < messages.length; i++) {\n\t\tconst msg = messages[i];\n\t\t// Check if this is a user or assistant message based on content\n\t\tif (msg.startsWith('Assistant:') || msg.startsWith('Tool Result')) {\n\t\t\tformattedMessages.push(msg);\n\t\t} else {\n\t\t\t// User messages\n\t\t\tformattedMessages.push(`Human: ${msg}`);\n\t\t}\n\t}\n\n\t// Combine system prompt with messages\n\tif (finalPrompt) {\n\t\tfinalPrompt = finalPrompt + '\\n\\n' + formattedMessages.join('\\n\\n');\n\t} else {\n\t\tfinalPrompt = formattedMessages.join('\\n\\n');\n\t}\n\n\t// For JSON mode, add explicit instruction to ensure JSON output\n\tif (mode?.type === 'object-json') {\n\t\t// Make the JSON instruction even more explicit\n\t\tfinalPrompt = `${finalPrompt}\n\nCRITICAL INSTRUCTION: You MUST respond with ONLY valid JSON. Follow these rules EXACTLY:\n1. Start your response with an opening brace {\n2. End your response with a closing brace }\n3. Do NOT include any text before the opening brace\n4. Do NOT include any text after the closing brace\n5. Do NOT use markdown code blocks or backticks\n6. Do NOT include explanations or commentary\n7. The ENTIRE response must be valid JSON that can be parsed with JSON.parse()\n\nBegin your response with { and end with }`;\n\t}\n\n\treturn {\n\t\tmessagesPrompt: finalPrompt,\n\t\tsystemPrompt\n\t};\n}\n"], ["/claude-task-master/mcp-server/src/core/task-master-core.js", "/**\n * task-master-core.js\n * Central module that imports and re-exports all direct function implementations\n * for improved organization and maintainability.\n */\n\n// Import direct function implementations\nimport { listTasksDirect } from './direct-functions/list-tasks.js';\nimport { getCacheStatsDirect } from './direct-functions/cache-stats.js';\nimport { parsePRDDirect } from './direct-functions/parse-prd.js';\nimport { updateTasksDirect } from './direct-functions/update-tasks.js';\nimport { updateTaskByIdDirect } from './direct-functions/update-task-by-id.js';\nimport { updateSubtaskByIdDirect } from './direct-functions/update-subtask-by-id.js';\nimport { generateTaskFilesDirect } from './direct-functions/generate-task-files.js';\nimport { setTaskStatusDirect } from './direct-functions/set-task-status.js';\nimport { showTaskDirect } from './direct-functions/show-task.js';\nimport { nextTaskDirect } from './direct-functions/next-task.js';\nimport { expandTaskDirect } from './direct-functions/expand-task.js';\nimport { addTaskDirect } from './direct-functions/add-task.js';\nimport { addSubtaskDirect } from './direct-functions/add-subtask.js';\nimport { removeSubtaskDirect } from './direct-functions/remove-subtask.js';\nimport { analyzeTaskComplexityDirect } from './direct-functions/analyze-task-complexity.js';\nimport { clearSubtasksDirect } from './direct-functions/clear-subtasks.js';\nimport { expandAllTasksDirect } from './direct-functions/expand-all-tasks.js';\nimport { removeDependencyDirect } from './direct-functions/remove-dependency.js';\nimport { validateDependenciesDirect } from './direct-functions/validate-dependencies.js';\nimport { fixDependenciesDirect } from './direct-functions/fix-dependencies.js';\nimport { complexityReportDirect } from './direct-functions/complexity-report.js';\nimport { addDependencyDirect } from './direct-functions/add-dependency.js';\nimport { removeTaskDirect } from './direct-functions/remove-task.js';\nimport { initializeProjectDirect } from './direct-functions/initialize-project.js';\nimport { modelsDirect } from './direct-functions/models.js';\nimport { moveTaskDirect } from './direct-functions/move-task.js';\nimport { researchDirect } from './direct-functions/research.js';\nimport { addTagDirect } from './direct-functions/add-tag.js';\nimport { deleteTagDirect } from './direct-functions/delete-tag.js';\nimport { listTagsDirect } from './direct-functions/list-tags.js';\nimport { useTagDirect } from './direct-functions/use-tag.js';\nimport { renameTagDirect } from './direct-functions/rename-tag.js';\nimport { copyTagDirect } from './direct-functions/copy-tag.js';\n\n// Re-export utility functions\nexport { findTasksPath } from './utils/path-utils.js';\n\n// Use Map for potential future enhancements like introspection or dynamic dispatch\nexport const directFunctions = new Map([\n\t['listTasksDirect', listTasksDirect],\n\t['getCacheStatsDirect', getCacheStatsDirect],\n\t['parsePRDDirect', parsePRDDirect],\n\t['updateTasksDirect', updateTasksDirect],\n\t['updateTaskByIdDirect', updateTaskByIdDirect],\n\t['updateSubtaskByIdDirect', updateSubtaskByIdDirect],\n\t['generateTaskFilesDirect', generateTaskFilesDirect],\n\t['setTaskStatusDirect', setTaskStatusDirect],\n\t['showTaskDirect', showTaskDirect],\n\t['nextTaskDirect', nextTaskDirect],\n\t['expandTaskDirect', expandTaskDirect],\n\t['addTaskDirect', addTaskDirect],\n\t['addSubtaskDirect', addSubtaskDirect],\n\t['removeSubtaskDirect', removeSubtaskDirect],\n\t['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],\n\t['clearSubtasksDirect', clearSubtasksDirect],\n\t['expandAllTasksDirect', expandAllTasksDirect],\n\t['removeDependencyDirect', removeDependencyDirect],\n\t['validateDependenciesDirect', validateDependenciesDirect],\n\t['fixDependenciesDirect', fixDependenciesDirect],\n\t['complexityReportDirect', complexityReportDirect],\n\t['addDependencyDirect', addDependencyDirect],\n\t['removeTaskDirect', removeTaskDirect],\n\t['initializeProjectDirect', initializeProjectDirect],\n\t['modelsDirect', modelsDirect],\n\t['moveTaskDirect', moveTaskDirect],\n\t['researchDirect', researchDirect],\n\t['addTagDirect', addTagDirect],\n\t['deleteTagDirect', deleteTagDirect],\n\t['listTagsDirect', listTagsDirect],\n\t['useTagDirect', useTagDirect],\n\t['renameTagDirect', renameTagDirect],\n\t['copyTagDirect', copyTagDirect]\n]);\n\n// Re-export all direct function implementations\nexport {\n\tlistTasksDirect,\n\tgetCacheStatsDirect,\n\tparsePRDDirect,\n\tupdateTasksDirect,\n\tupdateTaskByIdDirect,\n\tupdateSubtaskByIdDirect,\n\tgenerateTaskFilesDirect,\n\tsetTaskStatusDirect,\n\tshowTaskDirect,\n\tnextTaskDirect,\n\texpandTaskDirect,\n\taddTaskDirect,\n\taddSubtaskDirect,\n\tremoveSubtaskDirect,\n\tanalyzeTaskComplexityDirect,\n\tclearSubtasksDirect,\n\texpandAllTasksDirect,\n\tremoveDependencyDirect,\n\tvalidateDependenciesDirect,\n\tfixDependenciesDirect,\n\tcomplexityReportDirect,\n\taddDependencyDirect,\n\tremoveTaskDirect,\n\tinitializeProjectDirect,\n\tmodelsDirect,\n\tmoveTaskDirect,\n\tresearchDirect,\n\taddTagDirect,\n\tdeleteTagDirect,\n\tlistTagsDirect,\n\tuseTagDirect,\n\trenameTagDirect,\n\tcopyTagDirect\n};\n"], ["/claude-task-master/mcp-server/src/index.js", "import { FastMCP } from 'fastmcp';\nimport path from 'path';\nimport dotenv from 'dotenv';\nimport { fileURLToPath } from 'url';\nimport fs from 'fs';\nimport logger from './logger.js';\nimport { registerTaskMasterTools } from './tools/index.js';\nimport ProviderRegistry from '../../src/provider-registry/index.js';\nimport { MCPProvider } from './providers/mcp-provider.js';\n\n// Load environment variables\ndotenv.config();\n\n// Constants\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\n/**\n * Main MCP server class that integrates with Task Master\n */\nclass TaskMasterMCPServer {\n\tconstructor() {\n\t\t// Get version from package.json using synchronous fs\n\t\tconst packagePath = path.join(__dirname, '../../package.json');\n\t\tconst packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8'));\n\n\t\tthis.options = {\n\t\t\tname: 'Task Master MCP Server',\n\t\t\tversion: packageJson.version\n\t\t};\n\n\t\tthis.server = new FastMCP(this.options);\n\t\tthis.initialized = false;\n\n\t\t// Bind methods\n\t\tthis.init = this.init.bind(this);\n\t\tthis.start = this.start.bind(this);\n\t\tthis.stop = this.stop.bind(this);\n\n\t\t// Setup logging\n\t\tthis.logger = logger;\n\t}\n\n\t/**\n\t * Initialize the MCP server with necessary tools and routes\n\t */\n\tasync init() {\n\t\tif (this.initialized) return;\n\n\t\t// Pass the manager instance to the tool registration function\n\t\tregisterTaskMasterTools(this.server, this.asyncManager);\n\n\t\tthis.initialized = true;\n\n\t\treturn this;\n\t}\n\n\t/**\n\t * Start the MCP server\n\t */\n\tasync start() {\n\t\tif (!this.initialized) {\n\t\t\tawait this.init();\n\t\t}\n\n\t\tthis.server.on('connect', (event) => {\n\t\t\tevent.session.server.sendLoggingMessage({\n\t\t\t\tdata: {\n\t\t\t\t\tcontext: event.session.context,\n\t\t\t\t\tmessage: `MCP Server connected: ${event.session.name}`\n\t\t\t\t},\n\t\t\t\tlevel: 'info'\n\t\t\t});\n\t\t\tthis.registerRemoteProvider(event.session);\n\t\t});\n\n\t\t// Start the FastMCP server with increased timeout\n\t\tawait this.server.start({\n\t\t\ttransportType: 'stdio',\n\t\t\ttimeout: 120000 // 2 minutes timeout (in milliseconds)\n\t\t});\n\n\t\treturn this;\n\t}\n\n\t/**\n\t * Register both MCP providers with the provider registry\n\t */\n\tregisterRemoteProvider(session) {\n\t\t// Check if the server has at least one session\n\t\tif (session) {\n\t\t\t// Make sure session has required capabilities\n\t\t\tif (!session.clientCapabilities || !session.clientCapabilities.sampling) {\n\t\t\t\tsession.server.sendLoggingMessage({\n\t\t\t\t\tdata: {\n\t\t\t\t\t\tcontext: session.context,\n\t\t\t\t\t\tmessage: `MCP session missing required sampling capabilities, providers not registered`\n\t\t\t\t\t},\n\t\t\t\t\tlevel: 'info'\n\t\t\t\t});\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Register MCP provider with the Provider Registry\n\n\t\t\t// Register the unified MCP provider\n\t\t\tconst mcpProvider = new MCPProvider();\n\t\t\tmcpProvider.setSession(session);\n\n\t\t\t// Register provider with the registry\n\t\t\tconst providerRegistry = ProviderRegistry.getInstance();\n\t\t\tproviderRegistry.registerProvider('mcp', mcpProvider);\n\n\t\t\tsession.server.sendLoggingMessage({\n\t\t\t\tdata: {\n\t\t\t\t\tcontext: session.context,\n\t\t\t\t\tmessage: `MCP Server connected`\n\t\t\t\t},\n\t\t\t\tlevel: 'info'\n\t\t\t});\n\t\t} else {\n\t\t\tsession.server.sendLoggingMessage({\n\t\t\t\tdata: {\n\t\t\t\t\tcontext: session.context,\n\t\t\t\t\tmessage: `No MCP sessions available, providers not registered`\n\t\t\t\t},\n\t\t\t\tlevel: 'warn'\n\t\t\t});\n\t\t}\n\t}\n\n\t/**\n\t * Stop the MCP server\n\t */\n\tasync stop() {\n\t\tif (this.server) {\n\t\t\tawait this.server.stop();\n\t\t}\n\t}\n}\n\nexport default TaskMasterMCPServer;\n"], ["/claude-task-master/src/ai-providers/google-vertex.js", "/**\n * google-vertex.js\n * AI provider implementation for Google Vertex AI models using Vercel AI SDK.\n */\n\nimport { createVertex } from '@ai-sdk/google-vertex';\nimport { BaseAIProvider } from './base-provider.js';\nimport { resolveEnvVariable } from '../../scripts/modules/utils.js';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Vertex-specific error classes\nclass VertexAuthError extends Error {\n\tconstructor(message) {\n\t\tsuper(message);\n\t\tthis.name = 'VertexAuthError';\n\t\tthis.code = 'vertex_auth_error';\n\t}\n}\n\nclass VertexConfigError extends Error {\n\tconstructor(message) {\n\t\tsuper(message);\n\t\tthis.name = 'VertexConfigError';\n\t\tthis.code = 'vertex_config_error';\n\t}\n}\n\nclass VertexApiError extends Error {\n\tconstructor(message, statusCode) {\n\t\tsuper(message);\n\t\tthis.name = 'VertexApiError';\n\t\tthis.code = 'vertex_api_error';\n\t\tthis.statusCode = statusCode;\n\t}\n}\n\nexport class VertexAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Google Vertex AI';\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name for Google Vertex AI.\n\t * @returns {string} The environment variable name\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'GOOGLE_API_KEY';\n\t}\n\n\t/**\n\t * Validates Vertex AI-specific authentication parameters\n\t * @param {object} params - Parameters to validate\n\t * @throws {Error} If required parameters are missing\n\t */\n\tvalidateAuth(params) {\n\t\tconst { apiKey, projectId, location, credentials } = params;\n\n\t\t// Check for API key OR service account credentials\n\t\tif (!apiKey && !credentials) {\n\t\t\tthrow new VertexAuthError(\n\t\t\t\t'Either Google API key (GOOGLE_API_KEY) or service account credentials (GOOGLE_APPLICATION_CREDENTIALS) is required for Vertex AI'\n\t\t\t);\n\t\t}\n\n\t\t// Project ID is required for Vertex AI\n\t\tif (!projectId) {\n\t\t\tthrow new VertexConfigError(\n\t\t\t\t'Google Cloud project ID is required for Vertex AI. Set VERTEX_PROJECT_ID environment variable.'\n\t\t\t);\n\t\t}\n\n\t\t// Location is required for Vertex AI\n\t\tif (!location) {\n\t\t\tthrow new VertexConfigError(\n\t\t\t\t'Google Cloud location is required for Vertex AI. Set VERTEX_LOCATION environment variable (e.g., \"us-central1\").'\n\t\t\t);\n\t\t}\n\t}\n\n\t/**\n\t * Creates and returns a Google Vertex AI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.apiKey] - Google API key\n\t * @param {string} params.projectId - Google Cloud project ID\n\t * @param {string} params.location - Google Cloud location (e.g., \"us-central1\")\n\t * @param {object} [params.credentials] - Service account credentials object\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Google Vertex AI client function\n\t * @throws {Error} If required parameters are missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\t// Validate required parameters\n\t\t\tthis.validateAuth(params);\n\n\t\t\tconst { apiKey, projectId, location, credentials, baseURL } = params;\n\n\t\t\t// Configure auth options - either API key or service account\n\t\t\tconst authOptions = {};\n\t\t\tif (apiKey) {\n\t\t\t\tauthOptions.apiKey = apiKey;\n\t\t\t} else if (credentials) {\n\t\t\t\tauthOptions.googleAuthOptions = credentials;\n\t\t\t}\n\n\t\t\t// Return Vertex AI client\n\t\t\treturn createVertex({\n\t\t\t\t...authOptions,\n\t\t\t\tprojectId,\n\t\t\t\tlocation,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\t/**\n\t * Handle errors from Vertex AI\n\t * @param {string} operation - Description of the operation that failed\n\t * @param {Error} error - The error object\n\t * @throws {Error} Rethrows the error with additional context\n\t */\n\thandleError(operation, error) {\n\t\tlog('error', `Vertex AI ${operation} error:`, error);\n\n\t\t// Handle known error types\n\t\tif (\n\t\t\terror.name === 'VertexAuthError' ||\n\t\t\terror.name === 'VertexConfigError' ||\n\t\t\terror.name === 'VertexApiError'\n\t\t) {\n\t\t\tthrow error;\n\t\t}\n\n\t\t// Handle network/API errors\n\t\tif (error.response) {\n\t\t\tconst statusCode = error.response.status;\n\t\t\tconst errorMessage = error.response.data?.error?.message || error.message;\n\n\t\t\t// Categorize by status code\n\t\t\tif (statusCode === 401 || statusCode === 403) {\n\t\t\t\tthrow new VertexAuthError(`Authentication failed: ${errorMessage}`);\n\t\t\t} else if (statusCode === 400) {\n\t\t\t\tthrow new VertexConfigError(`Invalid request: ${errorMessage}`);\n\t\t\t} else {\n\t\t\t\tthrow new VertexApiError(\n\t\t\t\t\t`API error (${statusCode}): ${errorMessage}`,\n\t\t\t\t\tstatusCode\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Generic error handling\n\t\tthrow new Error(`Vertex AI ${operation} failed: ${error.message}`);\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/fix-dependencies.js", "/**\n * tools/fix-dependencies.js\n * Tool for automatically fixing invalid task dependencies\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { fixDependenciesDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n/**\n * Register the fixDependencies tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerFixDependenciesTool(server) {\n\tserver.addTool({\n\t\tname: 'fix_dependencies',\n\t\tdescription: 'Fix invalid dependencies in tasks automatically',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await fixDependenciesDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully fixed dependencies: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to fix dependencies: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error fixing dependencies',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in fixDependencies tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/expand-all.js", "/**\n * tools/expand-all.js\n * Tool for expanding all pending tasks with subtasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { expandAllTasksDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the expandAll tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerExpandAllTool(server) {\n\tserver.addTool({\n\t\tname: 'expand_all',\n\t\tdescription:\n\t\t\t'Expand all pending tasks into subtasks based on complexity or defaults',\n\t\tparameters: z.object({\n\t\t\tnum: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Target number of subtasks per task (uses complexity/defaults otherwise)'\n\t\t\t\t),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Enable research-backed subtask generation (e.g., using Perplexity)'\n\t\t\t\t),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Additional context to guide subtask generation for all tasks'\n\t\t\t\t),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Force regeneration of subtasks for tasks that already have them'\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file in the /tasks folder inside the project root (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the project root directory (derived from session if possible)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Tool expand_all execution started with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`Resolved tasks.json path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await expandAllTasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tnum: args.num,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tforce: args.force,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error expanding all tasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Unexpected error in expand_all tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\tif (error.stack) {\n\t\t\t\t\tlog.error(error.stack);\n\t\t\t\t}\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`An unexpected error occurred: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/index.js", "/**\n * tools/index.js\n * Export all Task Master CLI tools for MCP server\n */\n\nimport { registerListTasksTool } from './get-tasks.js';\nimport logger from '../logger.js';\nimport { registerSetTaskStatusTool } from './set-task-status.js';\nimport { registerParsePRDTool } from './parse-prd.js';\nimport { registerUpdateTool } from './update.js';\nimport { registerUpdateTaskTool } from './update-task.js';\nimport { registerUpdateSubtaskTool } from './update-subtask.js';\nimport { registerGenerateTool } from './generate.js';\nimport { registerShowTaskTool } from './get-task.js';\nimport { registerNextTaskTool } from './next-task.js';\nimport { registerExpandTaskTool } from './expand-task.js';\nimport { registerAddTaskTool } from './add-task.js';\nimport { registerAddSubtaskTool } from './add-subtask.js';\nimport { registerRemoveSubtaskTool } from './remove-subtask.js';\nimport { registerAnalyzeProjectComplexityTool } from './analyze.js';\nimport { registerClearSubtasksTool } from './clear-subtasks.js';\nimport { registerExpandAllTool } from './expand-all.js';\nimport { registerRemoveDependencyTool } from './remove-dependency.js';\nimport { registerValidateDependenciesTool } from './validate-dependencies.js';\nimport { registerFixDependenciesTool } from './fix-dependencies.js';\nimport { registerComplexityReportTool } from './complexity-report.js';\nimport { registerAddDependencyTool } from './add-dependency.js';\nimport { registerRemoveTaskTool } from './remove-task.js';\nimport { registerInitializeProjectTool } from './initialize-project.js';\nimport { registerModelsTool } from './models.js';\nimport { registerMoveTaskTool } from './move-task.js';\nimport { registerResponseLanguageTool } from './response-language.js';\nimport { registerAddTagTool } from './add-tag.js';\nimport { registerDeleteTagTool } from './delete-tag.js';\nimport { registerListTagsTool } from './list-tags.js';\nimport { registerUseTagTool } from './use-tag.js';\nimport { registerRenameTagTool } from './rename-tag.js';\nimport { registerCopyTagTool } from './copy-tag.js';\nimport { registerResearchTool } from './research.js';\nimport { registerRulesTool } from './rules.js';\n\n/**\n * Register all Task Master tools with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerTaskMasterTools(server) {\n\ttry {\n\t\t// Register each tool in a logical workflow order\n\n\t\t// Group 1: Initialization & Setup\n\t\tregisterInitializeProjectTool(server);\n\t\tregisterModelsTool(server);\n\t\tregisterRulesTool(server);\n\t\tregisterParsePRDTool(server);\n\n\t\t// Group 2: Task Analysis & Expansion\n\t\tregisterAnalyzeProjectComplexityTool(server);\n\t\tregisterExpandTaskTool(server);\n\t\tregisterExpandAllTool(server);\n\n\t\t// Group 3: Task Listing & Viewing\n\t\tregisterListTasksTool(server);\n\t\tregisterShowTaskTool(server);\n\t\tregisterNextTaskTool(server);\n\t\tregisterComplexityReportTool(server);\n\n\t\t// Group 4: Task Status & Management\n\t\tregisterSetTaskStatusTool(server);\n\t\tregisterGenerateTool(server);\n\n\t\t// Group 5: Task Creation & Modification\n\t\tregisterAddTaskTool(server);\n\t\tregisterAddSubtaskTool(server);\n\t\tregisterUpdateTool(server);\n\t\tregisterUpdateTaskTool(server);\n\t\tregisterUpdateSubtaskTool(server);\n\t\tregisterRemoveTaskTool(server);\n\t\tregisterRemoveSubtaskTool(server);\n\t\tregisterClearSubtasksTool(server);\n\t\tregisterMoveTaskTool(server);\n\n\t\t// Group 6: Dependency Management\n\t\tregisterAddDependencyTool(server);\n\t\tregisterRemoveDependencyTool(server);\n\t\tregisterValidateDependenciesTool(server);\n\t\tregisterFixDependenciesTool(server);\n\t\tregisterResponseLanguageTool(server);\n\n\t\t// Group 7: Tag Management\n\t\tregisterListTagsTool(server);\n\t\tregisterAddTagTool(server);\n\t\tregisterDeleteTagTool(server);\n\t\tregisterUseTagTool(server);\n\t\tregisterRenameTagTool(server);\n\t\tregisterCopyTagTool(server);\n\n\t\t// Group 8: Research Features\n\t\tregisterResearchTool(server);\n\t} catch (error) {\n\t\tlogger.error(`Error registering Task Master tools: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\nexport default {\n\tregisterTaskMasterTools\n};\n"], ["/claude-task-master/scripts/test-claude.js", "#!/usr/bin/env node\n\n/**\n * test-claude.js\n *\n * A simple test script to verify the improvements to the callClaude function.\n * This script tests different scenarios:\n * 1. Normal operation with a small PRD\n * 2. Testing with a large number of tasks (to potentially trigger task reduction)\n * 3. Simulating a failure to test retry logic\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport dotenv from 'dotenv';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n// Load environment variables from .env file\ndotenv.config();\n\n// Create a simple PRD for testing\nconst createTestPRD = (size = 'small', taskComplexity = 'simple') => {\n\tlet content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\\n\\n`;\n\n\t// Add more content based on size\n\tif (size === 'small') {\n\t\tcontent += `\n## Overview\nThis is a small test PRD to verify the callClaude function improvements.\n\n## Requirements\n1. Create a simple web application\n2. Implement user authentication\n3. Add a dashboard for users\n4. Create an admin panel\n5. Implement data visualization\n\n## Technical Stack\n- Frontend: React\n- Backend: Node.js\n- Database: MongoDB\n`;\n\t} else if (size === 'medium') {\n\t\t// Medium-sized PRD with more requirements\n\t\tcontent += `\n## Overview\nThis is a medium-sized test PRD to verify the callClaude function improvements.\n\n## Requirements\n1. Create a web application with multiple pages\n2. Implement user authentication with OAuth\n3. Add a dashboard for users with customizable widgets\n4. Create an admin panel with user management\n5. Implement data visualization with charts and graphs\n6. Add real-time notifications\n7. Implement a search feature\n8. Add user profile management\n9. Implement role-based access control\n10. Add a reporting system\n11. Implement file uploads and management\n12. Add a commenting system\n13. Implement a rating system\n14. Add a recommendation engine\n15. Implement a payment system\n\n## Technical Stack\n- Frontend: React with TypeScript\n- Backend: Node.js with Express\n- Database: MongoDB with Mongoose\n- Authentication: JWT and OAuth\n- Deployment: Docker and Kubernetes\n- CI/CD: GitHub Actions\n- Monitoring: Prometheus and Grafana\n`;\n\t} else if (size === 'large') {\n\t\t// Large PRD with many requirements\n\t\tcontent += `\n## Overview\nThis is a large test PRD to verify the callClaude function improvements.\n\n## Requirements\n`;\n\t\t// Generate 30 requirements\n\t\tfor (let i = 1; i <= 30; i++) {\n\t\t\tcontent += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\\n`;\n\t\t}\n\n\t\tcontent += `\n## Technical Stack\n- Frontend: React with TypeScript\n- Backend: Node.js with Express\n- Database: MongoDB with Mongoose\n- Authentication: JWT and OAuth\n- Deployment: Docker and Kubernetes\n- CI/CD: GitHub Actions\n- Monitoring: Prometheus and Grafana\n\n## User Stories\n`;\n\t\t// Generate 20 user stories\n\t\tfor (let i = 1; i <= 20; i++) {\n\t\t\tcontent += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\\n`;\n\t\t}\n\n\t\tcontent += `\n## Non-Functional Requirements\n- Performance: The system should respond within 200ms\n- Scalability: The system should handle 10,000 concurrent users\n- Availability: The system should have 99.9% uptime\n- Security: The system should comply with OWASP top 10\n- Accessibility: The system should comply with WCAG 2.1 AA\n`;\n\t}\n\n\t// Add complexity if needed\n\tif (taskComplexity === 'complex') {\n\t\tcontent += `\n## Complex Requirements\n- Implement a real-time collaboration system\n- Add a machine learning-based recommendation engine\n- Implement a distributed caching system\n- Add a microservices architecture\n- Implement a custom analytics engine\n- Add support for multiple languages and locales\n- Implement a custom search engine with advanced filtering\n- Add a custom workflow engine\n- Implement a custom reporting system\n- Add a custom dashboard builder\n`;\n\t}\n\n\treturn content;\n};\n\n// Function to run the tests\nasync function runTests() {\n\tconsole.log('Starting tests for callClaude function improvements...');\n\n\ttry {\n\t\t// Instead of importing the callClaude function directly, we'll use the dev.js script\n\t\t// with our test PRDs by running it as a child process\n\n\t\t// Test 1: Small PRD, 5 tasks\n\t\tconsole.log('\\n=== Test 1: Small PRD, 5 tasks ===');\n\t\tconst smallPRD = createTestPRD('small', 'simple');\n\t\tconst smallPRDPath = path.join(__dirname, 'test-small-prd.txt');\n\t\tfs.writeFileSync(smallPRDPath, smallPRD, 'utf8');\n\n\t\tconsole.log(`Created test PRD at ${smallPRDPath}`);\n\t\tconsole.log('Running dev.js with small PRD...');\n\n\t\t// Use the child_process module to run the dev.js script\n\t\tconst { execSync } = await import('child_process');\n\n\t\ttry {\n\t\t\tconst smallResult = execSync(\n\t\t\t\t`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --num-tasks=5`,\n\t\t\t\t{\n\t\t\t\t\tstdio: 'inherit'\n\t\t\t\t}\n\t\t\t);\n\t\t\tconsole.log('Small PRD test completed successfully');\n\t\t} catch (error) {\n\t\t\tconsole.error('Small PRD test failed:', error.message);\n\t\t}\n\n\t\t// Test 2: Medium PRD, 15 tasks\n\t\tconsole.log('\\n=== Test 2: Medium PRD, 15 tasks ===');\n\t\tconst mediumPRD = createTestPRD('medium', 'simple');\n\t\tconst mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');\n\t\tfs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');\n\n\t\tconsole.log(`Created test PRD at ${mediumPRDPath}`);\n\t\tconsole.log('Running dev.js with medium PRD...');\n\n\t\ttry {\n\t\t\tconst mediumResult = execSync(\n\t\t\t\t`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --num-tasks=15`,\n\t\t\t\t{\n\t\t\t\t\tstdio: 'inherit'\n\t\t\t\t}\n\t\t\t);\n\t\t\tconsole.log('Medium PRD test completed successfully');\n\t\t} catch (error) {\n\t\t\tconsole.error('Medium PRD test failed:', error.message);\n\t\t}\n\n\t\t// Test 3: Large PRD, 25 tasks\n\t\tconsole.log('\\n=== Test 3: Large PRD, 25 tasks ===');\n\t\tconst largePRD = createTestPRD('large', 'complex');\n\t\tconst largePRDPath = path.join(__dirname, 'test-large-prd.txt');\n\t\tfs.writeFileSync(largePRDPath, largePRD, 'utf8');\n\n\t\tconsole.log(`Created test PRD at ${largePRDPath}`);\n\t\tconsole.log('Running dev.js with large PRD...');\n\n\t\ttry {\n\t\t\tconst largeResult = execSync(\n\t\t\t\t`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --num-tasks=25`,\n\t\t\t\t{\n\t\t\t\t\tstdio: 'inherit'\n\t\t\t\t}\n\t\t\t);\n\t\t\tconsole.log('Large PRD test completed successfully');\n\t\t} catch (error) {\n\t\t\tconsole.error('Large PRD test failed:', error.message);\n\t\t}\n\n\t\tconsole.log('\\nAll tests completed!');\n\t} catch (error) {\n\t\tconsole.error('Test failed:', error);\n\t} finally {\n\t\t// Clean up test files\n\t\tconsole.log('\\nCleaning up test files...');\n\t\tconst testFiles = [\n\t\t\tpath.join(__dirname, 'test-small-prd.txt'),\n\t\t\tpath.join(__dirname, 'test-medium-prd.txt'),\n\t\t\tpath.join(__dirname, 'test-large-prd.txt')\n\t\t];\n\n\t\ttestFiles.forEach((file) => {\n\t\t\tif (fs.existsSync(file)) {\n\t\t\t\tfs.unlinkSync(file);\n\t\t\t\tconsole.log(`Deleted ${file}`);\n\t\t\t}\n\t\t});\n\n\t\tconsole.log('Cleanup complete.');\n\t}\n}\n\n// Run the tests\nrunTests().catch((error) => {\n\tconsole.error('Error running tests:', error);\n\tprocess.exit(1);\n});\n"], ["/claude-task-master/mcp-server/src/tools/add-tag.js", "/**\n * tools/add-tag.js\n * Tool to create a new tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the addTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddTagTool(server) {\n\tserver.addTool({\n\t\tname: 'add_tag',\n\t\tdescription: 'Create a new tag for organizing tasks in different contexts',\n\t\tparameters: z.object({\n\t\t\tname: z.string().describe('Name of the new tag to create'),\n\t\t\tcopyFromCurrent: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Whether to copy tasks from the current tag (default: false)'\n\t\t\t\t),\n\t\t\tcopyFromTag: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Specific tag to copy tasks from'),\n\t\t\tfromBranch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Create tag name from current git branch (ignores name parameter)'\n\t\t\t\t),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Optional description for the tag'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting add-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await addTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tname: args.name,\n\t\t\t\t\t\tcopyFromCurrent: args.copyFromCurrent,\n\t\t\t\t\t\tcopyFromTag: args.copyFromTag,\n\t\t\t\t\t\tfromBranch: args.fromBranch,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error creating tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in add-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/rename-tag.js", "/**\n * tools/rename-tag.js\n * Tool to rename an existing tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { renameTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the renameTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRenameTagTool(server) {\n\tserver.addTool({\n\t\tname: 'rename_tag',\n\t\tdescription: 'Rename an existing tag',\n\t\tparameters: z.object({\n\t\t\toldName: z.string().describe('Current name of the tag to rename'),\n\t\t\tnewName: z.string().describe('New name for the tag'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting rename-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await renameTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\toldName: args.oldName,\n\t\t\t\t\t\tnewName: args.newName,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error renaming tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in rename-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/task-manager.js", "/**\n * task-manager.js\n * Task management functions for the Task Master CLI\n */\n\nimport { findTaskById } from './utils.js';\nimport parsePRD from './task-manager/parse-prd.js';\nimport updateTasks from './task-manager/update-tasks.js';\nimport updateTaskById from './task-manager/update-task-by-id.js';\nimport generateTaskFiles from './task-manager/generate-task-files.js';\nimport setTaskStatus from './task-manager/set-task-status.js';\nimport updateSingleTaskStatus from './task-manager/update-single-task-status.js';\nimport listTasks from './task-manager/list-tasks.js';\nimport expandTask from './task-manager/expand-task.js';\nimport expandAllTasks from './task-manager/expand-all-tasks.js';\nimport clearSubtasks from './task-manager/clear-subtasks.js';\nimport addTask from './task-manager/add-task.js';\nimport analyzeTaskComplexity from './task-manager/analyze-task-complexity.js';\nimport findNextTask from './task-manager/find-next-task.js';\nimport addSubtask from './task-manager/add-subtask.js';\nimport removeSubtask from './task-manager/remove-subtask.js';\nimport updateSubtaskById from './task-manager/update-subtask-by-id.js';\nimport removeTask from './task-manager/remove-task.js';\nimport taskExists from './task-manager/task-exists.js';\nimport isTaskDependentOn from './task-manager/is-task-dependent.js';\nimport setResponseLanguage from './task-manager/response-language.js';\nimport moveTask from './task-manager/move-task.js';\nimport { migrateProject } from './task-manager/migrate.js';\nimport { performResearch } from './task-manager/research.js';\nimport { readComplexityReport } from './utils.js';\n\n// Export task manager functions\nexport {\n\tparsePRD,\n\tupdateTasks,\n\tupdateTaskById,\n\tupdateSubtaskById,\n\tgenerateTaskFiles,\n\tsetTaskStatus,\n\tupdateSingleTaskStatus,\n\tlistTasks,\n\texpandTask,\n\texpandAllTasks,\n\tclearSubtasks,\n\taddTask,\n\taddSubtask,\n\tremoveSubtask,\n\tfindNextTask,\n\tanalyzeTaskComplexity,\n\tremoveTask,\n\tfindTaskById,\n\ttaskExists,\n\tisTaskDependentOn,\n\tsetResponseLanguage,\n\tmoveTask,\n\treadComplexityReport,\n\tmigrateProject,\n\tperformResearch\n};\n"], ["/claude-task-master/mcp-test.js", "#!/usr/bin/env node\n\nimport { Config } from 'fastmcp';\nimport path from 'path';\nimport fs from 'fs';\n\n// Log the current directory\nconsole.error(`Current working directory: ${process.cwd()}`);\n\ntry {\n\tconsole.error('Attempting to load FastMCP Config...');\n\n\t// Check if .cursor/mcp.json exists\n\tconst mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json');\n\tconsole.error(`Checking if mcp.json exists at: ${mcpPath}`);\n\n\tif (fs.existsSync(mcpPath)) {\n\t\tconsole.error('mcp.json file found');\n\t\tconsole.error(\n\t\t\t`File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}`\n\t\t);\n\t} else {\n\t\tconsole.error('mcp.json file not found');\n\t}\n\n\t// Try to create Config\n\tconst config = new Config();\n\tconsole.error('Config created successfully');\n\n\t// Check if env property exists\n\tif (config.env) {\n\t\tconsole.error(\n\t\t\t`Config.env exists with keys: ${Object.keys(config.env).join(', ')}`\n\t\t);\n\n\t\t// Print each env var value (careful with sensitive values)\n\t\tfor (const [key, value] of Object.entries(config.env)) {\n\t\t\tif (key.includes('KEY')) {\n\t\t\t\tconsole.error(`${key}: [value hidden]`);\n\t\t\t} else {\n\t\t\t\tconsole.error(`${key}: ${value}`);\n\t\t\t}\n\t\t}\n\t} else {\n\t\tconsole.error('Config.env does not exist');\n\t}\n} catch (error) {\n\tconsole.error(`Error loading Config: ${error.message}`);\n\tconsole.error(`Stack trace: ${error.stack}`);\n}\n\n// Log process.env to see if values from mcp.json were loaded automatically\nconsole.error('\\nChecking if process.env already has values from mcp.json:');\nconst envVars = [\n\t'ANTHROPIC_API_KEY',\n\t'PERPLEXITY_API_KEY',\n\t'MODEL',\n\t'PERPLEXITY_MODEL',\n\t'MAX_TOKENS',\n\t'TEMPERATURE',\n\t'DEFAULT_SUBTASKS',\n\t'DEFAULT_PRIORITY'\n];\n\nfor (const varName of envVars) {\n\tif (process.env[varName]) {\n\t\tif (varName.includes('KEY')) {\n\t\t\tconsole.error(`${varName}: [value hidden]`);\n\t\t} else {\n\t\t\tconsole.error(`${varName}: ${process.env[varName]}`);\n\t\t}\n\t} else {\n\t\tconsole.error(`${varName}: not set`);\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/copy-tag.js", "/**\n * tools/copy-tag.js\n * Tool to copy an existing tag to a new tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { copyTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the copyTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerCopyTagTool(server) {\n\tserver.addTool({\n\t\tname: 'copy_tag',\n\t\tdescription:\n\t\t\t'Copy an existing tag to create a new tag with all tasks and metadata',\n\t\tparameters: z.object({\n\t\t\tsourceName: z.string().describe('Name of the source tag to copy from'),\n\t\t\ttargetName: z.string().describe('Name of the new tag to create'),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Optional description for the new tag'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting copy-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await copyTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tsourceName: args.sourceName,\n\t\t\t\t\t\ttargetName: args.targetName,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error copying tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in copy-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/add-dependency.js", "/**\n * tools/add-dependency.js\n * Tool for adding a dependency to a task\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addDependencyDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the addDependency tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddDependencyTool(server) {\n\tserver.addTool({\n\t\tname: 'add_dependency',\n\t\tdescription: 'Add a dependency relationship between two tasks',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('ID of task that will depend on another task'),\n\t\t\tdependsOn: z\n\t\t\t\t.string()\n\t\t\t\t.describe('ID of task that will become a dependency'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`\n\t\t\t\t);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function with the resolved path\n\t\t\t\tconst result = await addDependencyDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\t// Pass the explicitly resolved path\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\t// Pass other relevant args\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tdependsOn: args.dependsOn,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t\t// Remove context object\n\t\t\t\t);\n\n\t\t\t\t// Log result\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully added dependency: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to add dependency: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\t// Use handleApiResult to format the response\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error adding dependency',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in addDependency tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/complexity-report.js", "/**\n * complexity-report.js\n * Direct function implementation for displaying complexity analysis report\n */\n\nimport {\n\treadComplexityReport,\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for displaying the complexity report with error handling and caching.\n *\n * @param {Object} args - Command arguments containing reportPath.\n * @param {string} args.reportPath - Explicit path to the complexity report file.\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Result object with success status and data/error information\n */\nexport async function complexityReportDirect(args, log) {\n\t// Destructure expected args\n\tconst { reportPath } = args;\n\ttry {\n\t\tlog.info(`Getting complexity report with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if reportPath was provided\n\t\tif (!reportPath) {\n\t\t\tlog.error('complexityReportDirect called without reportPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: 'reportPath is required' }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided report path\n\t\tlog.info(`Looking for complexity report at: ${reportPath}`);\n\n\t\t// Generate cache key based on report path\n\t\tconst cacheKey = `complexityReport:${reportPath}`;\n\n\t\t// Define the core action function to read the report\n\t\tconst coreActionFn = async () => {\n\t\t\ttry {\n\t\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\t\tenableSilentMode();\n\n\t\t\t\tconst report = readComplexityReport(reportPath);\n\n\t\t\t\t// Restore normal logging\n\t\t\t\tdisableSilentMode();\n\n\t\t\t\tif (!report) {\n\t\t\t\t\tlog.warn(`No complexity report found at ${reportPath}`);\n\t\t\t\t\treturn {\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: {\n\t\t\t\t\t\t\tcode: 'FILE_NOT_FOUND_ERROR',\n\t\t\t\t\t\t\tmessage: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\t}\n\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: true,\n\t\t\t\t\tdata: {\n\t\t\t\t\t\treport,\n\t\t\t\t\t\treportPath\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t} catch (error) {\n\t\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\t\tdisableSilentMode();\n\n\t\t\t\tlog.error(`Error reading complexity report: ${error.message}`);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'READ_ERROR',\n\t\t\t\t\t\tmessage: error.message\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t};\n\n\t\t// Use the caching utility\n\t\ttry {\n\t\t\tconst result = await coreActionFn();\n\t\t\tlog.info('complexityReportDirect completed');\n\t\t\treturn result;\n\t\t} catch (error) {\n\t\t\t// Ensure silent mode is disabled\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Unexpected error during complexityReport: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\t\tmessage: error.message\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in complexityReportDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/base-provider.js", "import { generateObject, generateText, streamText } from 'ai';\nimport { log } from '../../scripts/modules/utils.js';\n\n/**\n * Base class for all AI providers\n */\nexport class BaseAIProvider {\n\tconstructor() {\n\t\tif (this.constructor === BaseAIProvider) {\n\t\t\tthrow new Error('BaseAIProvider cannot be instantiated directly');\n\t\t}\n\n\t\t// Each provider must set their name\n\t\tthis.name = this.constructor.name;\n\t}\n\n\t/**\n\t * Validates authentication parameters - can be overridden by providers\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Default: require API key (most providers need this)\n\t\tif (!params.apiKey) {\n\t\t\tthrow new Error(`${this.name} API key is required`);\n\t\t}\n\t}\n\n\t/**\n\t * Validates common parameters across all methods\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateParams(params) {\n\t\t// Validate authentication (can be overridden by providers)\n\t\tthis.validateAuth(params);\n\n\t\t// Validate required model ID\n\t\tif (!params.modelId) {\n\t\t\tthrow new Error(`${this.name} Model ID is required`);\n\t\t}\n\n\t\t// Validate optional parameters\n\t\tthis.validateOptionalParams(params);\n\t}\n\n\t/**\n\t * Validates optional parameters like temperature and maxTokens\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateOptionalParams(params) {\n\t\tif (\n\t\t\tparams.temperature !== undefined &&\n\t\t\t(params.temperature < 0 || params.temperature > 1)\n\t\t) {\n\t\t\tthrow new Error('Temperature must be between 0 and 1');\n\t\t}\n\t\tif (params.maxTokens !== undefined && params.maxTokens <= 0) {\n\t\t\tthrow new Error('maxTokens must be greater than 0');\n\t\t}\n\t}\n\n\t/**\n\t * Validates message array structure\n\t */\n\tvalidateMessages(messages) {\n\t\tif (!messages || !Array.isArray(messages) || messages.length === 0) {\n\t\t\tthrow new Error('Invalid or empty messages array provided');\n\t\t}\n\n\t\tfor (const msg of messages) {\n\t\t\tif (!msg.role || !msg.content) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t'Invalid message format. Each message must have role and content'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Common error handler\n\t */\n\thandleError(operation, error) {\n\t\tconst errorMessage = error.message || 'Unknown error occurred';\n\t\tlog('error', `${this.name} ${operation} failed: ${errorMessage}`, {\n\t\t\terror\n\t\t});\n\t\tthrow new Error(\n\t\t\t`${this.name} API error during ${operation}: ${errorMessage}`\n\t\t);\n\t}\n\n\t/**\n\t * Creates and returns a client instance for the provider\n\t * @abstract\n\t */\n\tgetClient(params) {\n\t\tthrow new Error('getClient must be implemented by provider');\n\t}\n\n\t/**\n\t * Returns if the API key is required\n\t * @abstract\n\t * @returns {boolean} if the API key is required, defaults to true\n\t */\n\tisRequiredApiKey() {\n\t\treturn true;\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name\n\t * @abstract\n\t * @returns {string|null} The environment variable name, or null if no API key is required\n\t */\n\tgetRequiredApiKeyName() {\n\t\tthrow new Error('getRequiredApiKeyName must be implemented by provider');\n\t}\n\n\t/**\n\t * Generates text using the provider's model\n\t */\n\tasync generateText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Generating ${this.name} text with model: ${params.modelId}`\n\t\t\t);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst result = await generateText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tmessages: params.messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} generateText completed successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\ttext: result.text,\n\t\t\t\tusage: {\n\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthis.handleError('text generation', error);\n\t\t}\n\t}\n\n\t/**\n\t * Streams text using the provider's model\n\t */\n\tasync streamText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog('debug', `Streaming ${this.name} text with model: ${params.modelId}`);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst stream = await streamText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tmessages: params.messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} streamText initiated successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn stream;\n\t\t} catch (error) {\n\t\t\tthis.handleError('text streaming', error);\n\t\t}\n\t}\n\n\t/**\n\t * Generates a structured object using the provider's model\n\t */\n\tasync generateObject(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tif (!params.schema) {\n\t\t\t\tthrow new Error('Schema is required for object generation');\n\t\t\t}\n\t\t\tif (!params.objectName) {\n\t\t\t\tthrow new Error('Object name is required for object generation');\n\t\t\t}\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Generating ${this.name} object ('${params.objectName}') with model: ${params.modelId}`\n\t\t\t);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst result = await generateObject({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tmessages: params.messages,\n\t\t\t\tschema: params.schema,\n\t\t\t\tmode: 'auto',\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} generateObject completed successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\tobject: result.object,\n\t\t\t\tusage: {\n\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthis.handleError('object generation', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/use-tag.js", "/**\n * tools/use-tag.js\n * Tool to switch to a different tag context\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { useTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the useTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUseTagTool(server) {\n\tserver.addTool({\n\t\tname: 'use_tag',\n\t\tdescription: 'Switch to a different tag context for task operations',\n\t\tparameters: z.object({\n\t\t\tname: z.string().describe('Name of the tag to switch to'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting use-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await useTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tname: args.name,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error switching tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in use-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/update-config-tokens.js", "/**\n * update-config-tokens.js\n * Updates config.json with correct maxTokens values from supported-models.json\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n/**\n * Updates the config file with correct maxTokens values from supported-models.json\n * @param {string} configPath - Path to the config.json file to update\n * @returns {boolean} True if successful, false otherwise\n */\nexport function updateConfigMaxTokens(configPath) {\n\ttry {\n\t\t// Load supported models\n\t\tconst supportedModelsPath = path.join(__dirname, 'supported-models.json');\n\t\tconst supportedModels = JSON.parse(\n\t\t\tfs.readFileSync(supportedModelsPath, 'utf-8')\n\t\t);\n\n\t\t// Load config\n\t\tconst config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));\n\n\t\t// Update each role's maxTokens if the model exists in supported-models.json\n\t\tconst roles = ['main', 'research', 'fallback'];\n\n\t\tfor (const role of roles) {\n\t\t\tif (config.models && config.models[role]) {\n\t\t\t\tconst provider = config.models[role].provider;\n\t\t\t\tconst modelId = config.models[role].modelId;\n\n\t\t\t\t// Find the model in supported models\n\t\t\t\tif (supportedModels[provider]) {\n\t\t\t\t\tconst modelData = supportedModels[provider].find(\n\t\t\t\t\t\t(m) => m.id === modelId\n\t\t\t\t\t);\n\t\t\t\t\tif (modelData && modelData.max_tokens) {\n\t\t\t\t\t\tconfig.models[role].maxTokens = modelData.max_tokens;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Write back the updated config\n\t\tfs.writeFileSync(configPath, JSON.stringify(config, null, 2));\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.error('Error updating config maxTokens:', error.message);\n\t\treturn false;\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/parse-prd.js", "/**\n * tools/parsePRD.js\n * Tool to parse PRD document and generate tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\twithNormalizedProjectRoot,\n\tcreateErrorResponse\n} from './utils.js';\nimport { parsePRDDirect } from '../core/task-master-core.js';\nimport {\n\tPRD_FILE,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_TASKS_FILE\n} from '../../../src/constants/paths.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the parse_prd tool\n * @param {Object} server - FastMCP server instance\n */\nexport function registerParsePRDTool(server) {\n\tserver.addTool({\n\t\tname: 'parse_prd',\n\t\tdescription: `Parse a Product Requirements Document (PRD) text file to automatically generate initial tasks. Reinitializing the project is not necessary to run this tool. It is recommended to run parse-prd after initializing the project and creating/importing a prd.txt file in the project root's ${TASKMASTER_DOCS_DIR} directory.`,\n\n\t\tparameters: z.object({\n\t\t\tinput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.default(PRD_FILE)\n\t\t\t\t.describe('Absolute path to the PRD document file (.txt, .md, etc.)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on'),\n\t\t\toutput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`Output path for tasks.json file (default: ${TASKMASTER_TASKS_FILE})`\n\t\t\t\t),\n\t\t\tnumTasks: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Approximate number of top-level tasks to generate (default: 10). As the agent, if you have enough information, ensure to enter a number of tasks that would logically scale with project complexity. Setting to 0 will allow Taskmaster to determine the appropriate number of tasks based on the complexity of the PRD. Avoid entering numbers above 50 due to context window limitations.'\n\t\t\t\t),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Overwrite existing output file without prompting.'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Enable Taskmaster to use the research role for potentially more informed task generation. Requires appropriate API key.'\n\t\t\t\t),\n\t\t\tappend: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Append generated tasks to existing file.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tconst result = await parsePRDDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\t...args,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error parsing PRD',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in parse_prd: ${error.message}`);\n\t\t\t\treturn createErrorResponse(`Failed to parse PRD: ${error.message}`);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/delete-tag.js", "/**\n * tools/delete-tag.js\n * Tool to delete an existing tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { deleteTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the deleteTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerDeleteTagTool(server) {\n\tserver.addTool({\n\t\tname: 'delete_tag',\n\t\tdescription: 'Delete an existing tag and all its tasks',\n\t\tparameters: z.object({\n\t\t\tname: z.string().describe('Name of the tag to delete'),\n\t\t\tyes: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Skip confirmation prompts (default: true for MCP)'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting delete-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function (always skip confirmation for MCP)\n\t\t\t\tconst result = await deleteTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tname: args.name,\n\t\t\t\t\t\tyes: args.yes !== undefined ? args.yes : true, // Default to true for MCP\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error deleting tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in delete-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/initialize-project.js", "import { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { initializeProjectDirect } from '../core/task-master-core.js';\nimport { RULE_PROFILES } from '../../../src/constants/profiles.js';\n\nexport function registerInitializeProjectTool(server) {\n\tserver.addTool({\n\t\tname: 'initialize_project',\n\t\tdescription:\n\t\t\t'Initializes a new Task Master project structure by calling the core initialization logic. Creates necessary folders and configuration files for Task Master in the current directory.',\n\t\tparameters: z.object({\n\t\t\tskipInstall: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe(\n\t\t\t\t\t'Skip installing dependencies automatically. Never do this unless you are sure the project is already installed.'\n\t\t\t\t),\n\t\t\taddAliases: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe('Add shell aliases (tm, taskmaster) to shell config file.'),\n\t\t\tinitGit: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe('Initialize Git repository in project root.'),\n\t\t\tstoreTasksInGit: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe('Store tasks in Git (tasks.json and tasks/ directory).'),\n\t\t\tyes: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe(\n\t\t\t\t\t'Skip prompts and use default values. Always set to true for MCP tools.'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.'\n\t\t\t\t),\n\t\t\trules: z\n\t\t\t\t.array(z.enum(RULE_PROFILES))\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`List of rule profiles to include at initialization. If omitted, defaults to Cursor profile only. Available options: ${RULE_PROFILES.join(', ')}`\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, context) => {\n\t\t\tconst { log } = context;\n\t\t\tconst session = context.session;\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing initialize_project tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst result = await initializeProjectDirect(args, log, { session });\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Initialization failed',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconst errorMessage = `Project initialization tool failed: ${error.message || 'Unknown error'}`;\n\t\t\t\tlog.error(errorMessage, error);\n\t\t\t\treturn createErrorResponse(errorMessage, { details: error.stack });\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/providers/mcp-provider.js", "/**\n * mcp-server/src/providers/mcp-provider.js\n *\n * Implementation for MCP custom AI SDK provider that integrates with\n * the existing MCP server infrastructure and provider registry.\n * Follows the Claude Code provider pattern for session-based providers.\n */\n\nimport { createMCP } from '../custom-sdk/index.js';\nimport { BaseAIProvider } from '../../../src/ai-providers/base-provider.js';\n\nexport class MCPProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'mcp';\n\t\tthis.session = null; // MCP server session object\n\t}\n\n\tgetRequiredApiKeyName() {\n\t\treturn 'MCP_API_KEY';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Override validateAuth to validate MCP session instead of API key\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Validate MCP session instead of API key\n\t\tif (!this.session) {\n\t\t\tthrow new Error('MCP Provider requires active MCP session');\n\t\t}\n\n\t\tif (!this.session.clientCapabilities?.sampling) {\n\t\t\tthrow new Error('MCP session must have client sampling capabilities');\n\t\t}\n\t}\n\n\t/**\n\t * Creates and returns an MCP AI SDK client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @returns {Function} MCP AI SDK client function\n\t * @throws {Error} If initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\t// Pass MCP session to AI SDK implementation\n\t\t\treturn createMCP({\n\t\t\t\tsession: this.session,\n\t\t\t\tdefaultSettings: {\n\t\t\t\t\ttemperature: params.temperature,\n\t\t\t\t\tmaxTokens: params.maxTokens\n\t\t\t\t}\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\t/**\n\t * Method called by MCP server on connect events\n\t * @param {object} session - MCP session object\n\t */\n\tsetSession(session) {\n\t\tthis.session = session;\n\n\t\tif (!session) {\n\t\t\tthis.logger?.warn('Set null session on MCP Provider');\n\t\t} else {\n\t\t\tthis.logger?.debug('Updated MCP Provider session');\n\t\t}\n\t}\n\n\t/**\n\t * Get current session status\n\t * @returns {boolean} True if session is available and valid\n\t */\n\thasValidSession() {\n\t\treturn !!(this.session && this.session.clientCapabilities?.sampling);\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/custom-sdk/schema-converter.js", "/**\n * @fileoverview Schema conversion utilities for MCP AI SDK provider\n */\n\n/**\n * Convert Zod schema to human-readable JSON instructions\n * @param {import('zod').ZodSchema} schema - Zod schema object\n * @param {string} [objectName='result'] - Name of the object being generated\n * @returns {string} Instructions for JSON generation\n */\nexport function convertSchemaToInstructions(schema, objectName = 'result') {\n\ttry {\n\t\t// Generate example structure from schema\n\t\tconst exampleStructure = generateExampleFromSchema(schema);\n\n\t\treturn `\nCRITICAL JSON GENERATION INSTRUCTIONS:\n\nYou must respond with ONLY valid JSON that matches this exact structure for \"${objectName}\":\n\n${JSON.stringify(exampleStructure, null, 2)}\n\nSTRICT REQUIREMENTS:\n1. Response must start with { and end with }\n2. Use double quotes for all strings and property names\n3. Do not include any text before or after the JSON\n4. Do not wrap in markdown code blocks\n5. Do not include explanations or comments\n6. Follow the exact property names and types shown above\n7. All required fields must be present\n\nBegin your response immediately with the opening brace {`;\n\t} catch (error) {\n\t\t// Fallback to basic JSON instructions if schema parsing fails\n\t\treturn `\nCRITICAL JSON GENERATION INSTRUCTIONS:\n\nYou must respond with ONLY valid JSON for \"${objectName}\".\n\nSTRICT REQUIREMENTS:\n1. Response must start with { and end with }\n2. Use double quotes for all strings and property names \n3. Do not include any text before or after the JSON\n4. Do not wrap in markdown code blocks\n5. Do not include explanations or comments\n\nBegin your response immediately with the opening brace {`;\n\t}\n}\n\n/**\n * Generate example structure from Zod schema\n * @param {import('zod').ZodSchema} schema - Zod schema\n * @returns {any} Example object matching the schema\n */\nfunction generateExampleFromSchema(schema) {\n\t// This is a simplified schema-to-example converter\n\t// For production, you might want to use a more sophisticated library\n\n\tif (!schema || typeof schema._def === 'undefined') {\n\t\treturn {};\n\t}\n\n\tconst def = schema._def;\n\n\tswitch (def.typeName) {\n\t\tcase 'ZodObject':\n\t\t\tconst result = {};\n\t\t\tconst shape = def.shape();\n\n\t\t\tfor (const [key, fieldSchema] of Object.entries(shape)) {\n\t\t\t\tresult[key] = generateExampleFromSchema(fieldSchema);\n\t\t\t}\n\n\t\t\treturn result;\n\n\t\tcase 'ZodString':\n\t\t\treturn 'string';\n\n\t\tcase 'ZodNumber':\n\t\t\treturn 0;\n\n\t\tcase 'ZodBoolean':\n\t\t\treturn false;\n\n\t\tcase 'ZodArray':\n\t\t\tconst elementExample = generateExampleFromSchema(def.type);\n\t\t\treturn [elementExample];\n\n\t\tcase 'ZodOptional':\n\t\t\treturn generateExampleFromSchema(def.innerType);\n\n\t\tcase 'ZodNullable':\n\t\t\treturn generateExampleFromSchema(def.innerType);\n\n\t\tcase 'ZodEnum':\n\t\t\treturn def.values[0] || 'enum_value';\n\n\t\tcase 'ZodLiteral':\n\t\t\treturn def.value;\n\n\t\tcase 'ZodUnion':\n\t\t\t// Use the first option from the union\n\t\t\tif (def.options && def.options.length > 0) {\n\t\t\t\treturn generateExampleFromSchema(def.options[0]);\n\t\t\t}\n\t\t\treturn 'union_value';\n\n\t\tcase 'ZodRecord':\n\t\t\treturn {\n\t\t\t\tkey: generateExampleFromSchema(def.valueType)\n\t\t\t};\n\n\t\tdefault:\n\t\t\t// For unknown types, return a placeholder\n\t\t\treturn `<${def.typeName || 'unknown'}>`;\n\t}\n}\n\n/**\n * Enhance prompt with JSON generation instructions\n * @param {Array} prompt - AI SDK prompt array\n * @param {string} jsonInstructions - JSON generation instructions\n * @returns {Array} Enhanced prompt array\n */\nexport function enhancePromptForJSON(prompt, jsonInstructions) {\n\tconst enhancedPrompt = [...prompt];\n\n\t// Find system message or create one\n\tlet systemMessageIndex = enhancedPrompt.findIndex(\n\t\t(msg) => msg.role === 'system'\n\t);\n\n\tif (systemMessageIndex >= 0) {\n\t\t// Append to existing system message\n\t\tconst currentContent = enhancedPrompt[systemMessageIndex].content;\n\t\tenhancedPrompt[systemMessageIndex] = {\n\t\t\t...enhancedPrompt[systemMessageIndex],\n\t\t\tcontent: currentContent + '\\n\\n' + jsonInstructions\n\t\t};\n\t} else {\n\t\t// Add new system message at the beginning\n\t\tenhancedPrompt.unshift({\n\t\t\trole: 'system',\n\t\t\tcontent: jsonInstructions\n\t\t});\n\t}\n\n\treturn enhancedPrompt;\n}\n"], ["/claude-task-master/src/profiles/kiro.js", "// Kiro profile for rule-transformer\nimport { createProfile } from './base-profile.js';\nimport fs from 'fs';\nimport path from 'path';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Create and export kiro profile using the base factory\nexport const kiroProfile = createProfile({\n\tname: 'kiro',\n\tdisplayName: 'Kiro',\n\turl: 'kiro.dev',\n\tdocsUrl: 'kiro.dev/docs',\n\tprofileDir: '.kiro',\n\trulesDir: '.kiro/steering', // Kiro rules location (full path)\n\tmcpConfig: true,\n\tmcpConfigName: 'settings/mcp.json', // Create directly in settings subdirectory\n\tincludeDefaultRules: true, // Include default rules to get all the standard files\n\ttargetExtension: '.md',\n\tfileMap: {\n\t\t// Override specific mappings - the base profile will create:\n\t\t// 'rules/cursor_rules.mdc': 'kiro_rules.md'\n\t\t// 'rules/dev_workflow.mdc': 'dev_workflow.md'\n\t\t// 'rules/self_improve.mdc': 'self_improve.md'\n\t\t// 'rules/taskmaster.mdc': 'taskmaster.md'\n\t\t// We can add additional custom mappings here if needed\n\t\t'rules/taskmaster_hooks_workflow.mdc': 'taskmaster_hooks_workflow.md'\n\t},\n\tcustomReplacements: [\n\t\t// Core Kiro directory structure changes\n\t\t{ from: /\\.cursor\\/rules/g, to: '.kiro/steering' },\n\t\t{ from: /\\.cursor\\/mcp\\.json/g, to: '.kiro/settings/mcp.json' },\n\n\t\t// Fix any remaining kiro/rules references that might be created during transformation\n\t\t{ from: /\\.kiro\\/rules/g, to: '.kiro/steering' },\n\n\t\t// Essential markdown link transformations for Kiro structure\n\t\t{\n\t\t\tfrom: /\\[(.+?)\\]\\(mdc:\\.cursor\\/rules\\/(.+?)\\.mdc\\)/g,\n\t\t\tto: '[$1](.kiro/steering/$2.md)'\n\t\t},\n\n\t\t// Kiro specific terminology\n\t\t{ from: /rules directory/g, to: 'steering directory' },\n\t\t{ from: /cursor rules/gi, to: 'Kiro steering files' },\n\n\t\t// Transform frontmatter to Kiro format\n\t\t// This regex matches the entire frontmatter block and replaces it\n\t\t{\n\t\t\tfrom: /^---\\n(?:description:\\s*[^\\n]*\\n)?(?:globs:\\s*[^\\n]*\\n)?(?:alwaysApply:\\s*true\\n)?---/m,\n\t\t\tto: '---\\ninclusion: always\\n---'\n\t\t}\n\t],\n\n\t// Add lifecycle hook to copy Kiro hooks\n\tonPostConvert: (projectRoot, assetsDir) => {\n\t\tconst hooksSourceDir = path.join(assetsDir, 'kiro-hooks');\n\t\tconst hooksTargetDir = path.join(projectRoot, '.kiro', 'hooks');\n\n\t\t// Create hooks directory if it doesn't exist\n\t\tif (!fs.existsSync(hooksTargetDir)) {\n\t\t\tfs.mkdirSync(hooksTargetDir, { recursive: true });\n\t\t}\n\n\t\t// Copy all .kiro.hook files\n\t\tif (fs.existsSync(hooksSourceDir)) {\n\t\t\tconst hookFiles = fs\n\t\t\t\t.readdirSync(hooksSourceDir)\n\t\t\t\t.filter((f) => f.endsWith('.kiro.hook'));\n\n\t\t\thookFiles.forEach((file) => {\n\t\t\t\tconst sourcePath = path.join(hooksSourceDir, file);\n\t\t\t\tconst targetPath = path.join(hooksTargetDir, file);\n\n\t\t\t\tfs.copyFileSync(sourcePath, targetPath);\n\t\t\t});\n\n\t\t\tif (hookFiles.length > 0) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`[Kiro] Installed ${hookFiles.length} Taskmaster hooks in .kiro/hooks/`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n});\n"], ["/claude-task-master/mcp-server/src/tools/research.js", "/**\n * tools/research.js\n * Tool to perform AI-powered research queries with project context\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { researchDirect } from '../core/task-master-core.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the research tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerResearchTool(server) {\n\tserver.addTool({\n\t\tname: 'research',\n\t\tdescription: 'Perform AI-powered research queries with project context',\n\n\t\tparameters: z.object({\n\t\t\tquery: z.string().describe('Research query/prompt (required)'),\n\t\t\ttaskIds: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Comma-separated list of task/subtask IDs for context (e.g., \"15,16.2,17\")'\n\t\t\t\t),\n\t\t\tfilePaths: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Comma-separated list of file paths for context (e.g., \"src/api.js,docs/readme.md\")'\n\t\t\t\t),\n\t\t\tcustomContext: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Additional custom context text to include in the research'),\n\t\t\tincludeProjectTree: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Include project file tree structure in context (default: false)'\n\t\t\t\t),\n\t\t\tdetailLevel: z\n\t\t\t\t.enum(['low', 'medium', 'high'])\n\t\t\t\t.optional()\n\t\t\t\t.describe('Detail level for the research response (default: medium)'),\n\t\t\tsaveTo: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Automatically save research results to specified task/subtask ID (e.g., \"15\" or \"15.2\")'\n\t\t\t\t),\n\t\t\tsaveToFile: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Save research results to .taskmaster/docs/research/ directory (default: false)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(\n\t\t\t\t\t`Starting research with query: \"${args.query.substring(0, 100)}${args.query.length > 100 ? '...' : ''}\"`\n\t\t\t\t);\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await researchDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\tquery: args.query,\n\t\t\t\t\t\ttaskIds: args.taskIds,\n\t\t\t\t\t\tfilePaths: args.filePaths,\n\t\t\t\t\t\tcustomContext: args.customContext,\n\t\t\t\t\t\tincludeProjectTree: args.includeProjectTree || false,\n\t\t\t\t\t\tdetailLevel: args.detailLevel || 'medium',\n\t\t\t\t\t\tsaveTo: args.saveTo,\n\t\t\t\t\t\tsaveToFile: args.saveToFile || false,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error performing research',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in research tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/complexity-report.js", "/**\n * tools/complexity-report.js\n * Tool for displaying the complexity analysis report\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { complexityReportDirect } from '../core/task-master-core.js';\nimport { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';\nimport { findComplexityReportPath } from '../core/utils/path-utils.js';\nimport { getCurrentTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the complexityReport tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerComplexityReportTool(server) {\n\tserver.addTool({\n\t\tname: 'complexity_report',\n\t\tdescription: 'Display the complexity analysis report in a readable format',\n\t\tparameters: z.object({\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`Path to the report file (default: ${COMPLEXITY_REPORT_FILE})`\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Getting complexity report with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst resolvedTag = getCurrentTag(args.projectRoot);\n\n\t\t\t\tconst pathArgs = {\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\tcomplexityReport: args.file,\n\t\t\t\t\ttag: resolvedTag\n\t\t\t\t};\n\n\t\t\t\tconst reportPath = findComplexityReportPath(pathArgs, log);\n\t\t\t\tlog.info('Reading complexity report from path: ', reportPath);\n\n\t\t\t\tif (!reportPath) {\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t'No complexity report found. Run task-master analyze-complexity first.'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await complexityReportDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\treportPath: reportPath\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info('Successfully retrieved complexity report');\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to retrieve complexity report: ${result.error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error retrieving complexity report',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in complexity-report tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Failed to retrieve complexity report: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/constants/task-status.js", "/**\n * @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus\n */\n\n/**\n * Task status options list\n * @type {TaskStatus[]}\n * @description Defines possible task statuses:\n * - pending: Task waiting to start\n * - done: Task completed\n * - in-progress: Task in progress\n * - review: Task completed and waiting for review\n * - deferred: Task postponed or paused\n * - cancelled: Task cancelled and will not be completed\n */\nexport const TASK_STATUS_OPTIONS = [\n\t'pending',\n\t'done',\n\t'in-progress',\n\t'review',\n\t'deferred',\n\t'cancelled'\n];\n\n/**\n * Check if a given status is a valid task status\n * @param {string} status - The status to check\n * @returns {boolean} True if the status is valid, false otherwise\n */\nexport function isValidTaskStatus(status) {\n\treturn TASK_STATUS_OPTIONS.includes(status);\n}\n"], ["/claude-task-master/mcp-server/src/tools/list-tags.js", "/**\n * tools/list-tags.js\n * Tool to list all available tags\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { listTagsDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the listTags tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerListTagsTool(server) {\n\tserver.addTool({\n\t\tname: 'list_tags',\n\t\tdescription: 'List all available tags with task counts and metadata',\n\t\tparameters: z.object({\n\t\t\tshowMetadata: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Whether to include metadata in the output (default: false)'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting list-tags with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await listTagsDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tshowMetadata: args.showMetadata,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error listing tags',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in list-tags tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/task-manager/is-task-dependent.js", "/**\n * Check if a task is dependent on another task (directly or indirectly)\n * Used to prevent circular dependencies\n * @param {Array} allTasks - Array of all tasks\n * @param {Object} task - The task to check\n * @param {number} targetTaskId - The task ID to check dependency against\n * @returns {boolean} Whether the task depends on the target task\n */\nfunction isTaskDependentOn(allTasks, task, targetTaskId) {\n\t// If the task is a subtask, check if its parent is the target\n\tif (task.parentTaskId === targetTaskId) {\n\t\treturn true;\n\t}\n\n\t// Check direct dependencies\n\tif (task.dependencies && task.dependencies.includes(targetTaskId)) {\n\t\treturn true;\n\t}\n\n\t// Check dependencies of dependencies (recursive)\n\tif (task.dependencies) {\n\t\tfor (const depId of task.dependencies) {\n\t\t\tconst depTask = allTasks.find((t) => t.id === depId);\n\t\t\tif (depTask && isTaskDependentOn(allTasks, depTask, targetTaskId)) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check subtasks for dependencies\n\tif (task.subtasks) {\n\t\tfor (const subtask of task.subtasks) {\n\t\t\tif (isTaskDependentOn(allTasks, subtask, targetTaskId)) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false;\n}\n\nexport default isTaskDependentOn;\n"], ["/claude-task-master/mcp-server/src/tools/rules.js", "/**\n * tools/rules.js\n * Tool to add or remove rules from a project (MCP server)\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { rulesDirect } from '../core/direct-functions/rules.js';\nimport { RULE_PROFILES } from '../../../src/constants/profiles.js';\n\n/**\n * Register the rules tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRulesTool(server) {\n\tserver.addTool({\n\t\tname: 'rules',\n\t\tdescription: 'Add or remove rule profiles from the project.',\n\t\tparameters: z.object({\n\t\t\taction: z\n\t\t\t\t.enum(['add', 'remove'])\n\t\t\t\t.describe('Whether to add or remove rule profiles.'),\n\t\t\tprofiles: z\n\t\t\t\t.array(z.enum(RULE_PROFILES))\n\t\t\t\t.min(1)\n\t\t\t\t.describe(\n\t\t\t\t\t`List of rule profiles to add or remove (e.g., [\\\"cursor\\\", \\\"roo\\\"]). Available options: ${RULE_PROFILES.join(', ')}`\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The root directory of the project. Must be an absolute path.'\n\t\t\t\t),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe(\n\t\t\t\t\t'DANGEROUS: Force removal even if it would leave no rule profiles. Only use if you are absolutely certain.'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`[rules tool] Executing action: ${args.action} for profiles: ${args.profiles.join(', ')} in ${args.projectRoot}`\n\t\t\t\t);\n\t\t\t\tconst result = await rulesDirect(args, log, { session });\n\t\t\t\treturn handleApiResult(result, log);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`[rules tool] Error: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message, { details: error.stack });\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/profiles/roo.js", "// Roo Code conversion profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\nimport { ROO_MODES } from '../constants/profiles.js';\n\n// Lifecycle functions for Roo profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// Use the provided assets directory to find the roocode directory\n\tconst sourceDir = path.join(assetsDir, 'roocode');\n\n\tif (!fs.existsSync(sourceDir)) {\n\t\tlog('error', `[Roo] Source directory does not exist: ${sourceDir}`);\n\t\treturn;\n\t}\n\n\tcopyRecursiveSync(sourceDir, targetDir);\n\tlog('debug', `[Roo] Copied roocode directory to ${targetDir}`);\n\n\tconst rooModesDir = path.join(sourceDir, '.roo');\n\n\t// Copy .roomodes to project root\n\tconst roomodesSrc = path.join(sourceDir, '.roomodes');\n\tconst roomodesDest = path.join(targetDir, '.roomodes');\n\tif (fs.existsSync(roomodesSrc)) {\n\t\ttry {\n\t\t\tfs.copyFileSync(roomodesSrc, roomodesDest);\n\t\t\tlog('debug', `[Roo] Copied .roomodes to ${roomodesDest}`);\n\t\t} catch (err) {\n\t\t\tlog('error', `[Roo] Failed to copy .roomodes: ${err.message}`);\n\t\t}\n\t}\n\n\tfor (const mode of ROO_MODES) {\n\t\tconst src = path.join(rooModesDir, `rules-${mode}`, `${mode}-rules`);\n\t\tconst dest = path.join(targetDir, '.roo', `rules-${mode}`, `${mode}-rules`);\n\t\tif (fs.existsSync(src)) {\n\t\t\ttry {\n\t\t\t\tconst destDir = path.dirname(dest);\n\t\t\t\tif (!fs.existsSync(destDir)) fs.mkdirSync(destDir, { recursive: true });\n\t\t\t\tfs.copyFileSync(src, dest);\n\t\t\t\tlog('debug', `[Roo] Copied ${mode}-rules to ${dest}`);\n\t\t\t} catch (err) {\n\t\t\t\tlog('error', `[Roo] Failed to copy ${src} to ${dest}: ${err.message}`);\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunction copyRecursiveSync(src, dest) {\n\tconst exists = fs.existsSync(src);\n\tconst stats = exists && fs.statSync(src);\n\tconst isDirectory = exists && stats.isDirectory();\n\tif (isDirectory) {\n\t\tif (!fs.existsSync(dest)) fs.mkdirSync(dest, { recursive: true });\n\t\tfs.readdirSync(src).forEach((childItemName) => {\n\t\t\tcopyRecursiveSync(\n\t\t\t\tpath.join(src, childItemName),\n\t\t\t\tpath.join(dest, childItemName)\n\t\t\t);\n\t\t});\n\t} else {\n\t\tfs.copyFileSync(src, dest);\n\t}\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\tconst roomodesPath = path.join(targetDir, '.roomodes');\n\tif (fs.existsSync(roomodesPath)) {\n\t\ttry {\n\t\t\tfs.rmSync(roomodesPath, { force: true });\n\t\t\tlog('debug', `[Roo] Removed .roomodes from ${roomodesPath}`);\n\t\t} catch (err) {\n\t\t\tlog('error', `[Roo] Failed to remove .roomodes: ${err.message}`);\n\t\t}\n\t}\n\n\tconst rooDir = path.join(targetDir, '.roo');\n\tif (fs.existsSync(rooDir)) {\n\t\tfs.readdirSync(rooDir).forEach((entry) => {\n\t\t\tif (entry.startsWith('rules-')) {\n\t\t\t\tconst modeDir = path.join(rooDir, entry);\n\t\t\t\ttry {\n\t\t\t\t\tfs.rmSync(modeDir, { recursive: true, force: true });\n\t\t\t\t\tlog('debug', `[Roo] Removed ${entry} directory from ${modeDir}`);\n\t\t\t\t} catch (err) {\n\t\t\t\t\tlog('error', `[Roo] Failed to remove ${modeDir}: ${err.message}`);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t\tif (fs.readdirSync(rooDir).length === 0) {\n\t\t\ttry {\n\t\t\t\tfs.rmSync(rooDir, { recursive: true, force: true });\n\t\t\t\tlog('debug', `[Roo] Removed empty .roo directory from ${rooDir}`);\n\t\t\t} catch (err) {\n\t\t\t\tlog('error', `[Roo] Failed to remove .roo directory: ${err.message}`);\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\tonAddRulesProfile(targetDir, assetsDir);\n}\n\n// Create and export roo profile using the base factory\nexport const rooProfile = createProfile({\n\tname: 'roo',\n\tdisplayName: 'Roo Code',\n\turl: 'roocode.com',\n\tdocsUrl: 'docs.roocode.com',\n\ttoolMappings: COMMON_TOOL_MAPPINGS.ROO_STYLE,\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/mcp-server/src/tools/models.js", "/**\n * models.js\n * MCP tool for managing AI model configurations\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { modelsDirect } from '../core/task-master-core.js';\n\n/**\n * Register the models tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerModelsTool(server) {\n\tserver.addTool({\n\t\tname: 'models',\n\t\tdescription:\n\t\t\t'Get information about available AI models or set model configurations. Run without arguments to get the current model configuration and API key status for the selected model providers.',\n\t\tparameters: z.object({\n\t\t\tsetMain: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Set the primary model for task generation/updates. Model provider API key is required in the MCP config ENV.'\n\t\t\t\t),\n\t\t\tsetResearch: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Set the model for research-backed operations. Model provider API key is required in the MCP config ENV.'\n\t\t\t\t),\n\t\t\tsetFallback: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Set the model to use if the primary fails. Model provider API key is required in the MCP config ENV.'\n\t\t\t\t),\n\t\t\tlistAvailableModels: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'List all available models not currently in use. Input/output costs values are in dollars (3 is $3.00).'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\topenrouter: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom OpenRouter model.'),\n\t\t\tollama: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom Ollama model.'),\n\t\t\tbedrock: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom AWS Bedrock model.'),\n\t\t\tazure: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom Azure OpenAI model.'),\n\t\t\tvertex: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Indicates the set model ID is a custom Google Vertex AI model.'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting models tool with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tconst result = await modelsDirect(\n\t\t\t\t\t{ ...args, projectRoot: args.projectRoot },\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error managing models',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in models tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/test-version-check-full.js", "import {\n\tcheckForUpdate,\n\tdisplayUpgradeNotification,\n\tcompareVersions\n} from './scripts/modules/commands.js';\nimport fs from 'fs';\nimport path from 'path';\n\n// Force our current version for testing\nprocess.env.FORCE_VERSION = '0.9.30';\n\n// Create a mock package.json in memory for testing\nconst mockPackageJson = {\n\tname: 'task-master-ai',\n\tversion: '0.9.30'\n};\n\n// Modified version of checkForUpdate that doesn't use HTTP for testing\nasync function testCheckForUpdate(simulatedLatestVersion) {\n\t// Get current version - use our forced version\n\tconst currentVersion = process.env.FORCE_VERSION || '0.9.30';\n\n\tconsole.log(`Using simulated current version: ${currentVersion}`);\n\tconsole.log(`Using simulated latest version: ${simulatedLatestVersion}`);\n\n\t// Compare versions\n\tconst needsUpdate =\n\t\tcompareVersions(currentVersion, simulatedLatestVersion) < 0;\n\n\treturn {\n\t\tcurrentVersion,\n\t\tlatestVersion: simulatedLatestVersion,\n\t\tneedsUpdate\n\t};\n}\n\n// Test with current version older than latest (should show update notice)\nasync function runTest() {\n\tconsole.log('=== Testing version check scenarios ===\\n');\n\n\t// Scenario 1: Update available\n\tconsole.log(\n\t\t'\\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---'\n\t);\n\tconst updateInfo1 = await testCheckForUpdate('1.0.0');\n\tconsole.log('Update check results:');\n\tconsole.log(`- Current version: ${updateInfo1.currentVersion}`);\n\tconsole.log(`- Latest version: ${updateInfo1.latestVersion}`);\n\tconsole.log(`- Update needed: ${updateInfo1.needsUpdate}`);\n\n\tif (updateInfo1.needsUpdate) {\n\t\tconsole.log('\\nDisplaying upgrade notification:');\n\t\tdisplayUpgradeNotification(\n\t\t\tupdateInfo1.currentVersion,\n\t\t\tupdateInfo1.latestVersion\n\t\t);\n\t}\n\n\t// Scenario 2: No update needed (versions equal)\n\tconsole.log(\n\t\t'\\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---'\n\t);\n\tconst updateInfo2 = await testCheckForUpdate('0.9.30');\n\tconsole.log('Update check results:');\n\tconsole.log(`- Current version: ${updateInfo2.currentVersion}`);\n\tconsole.log(`- Latest version: ${updateInfo2.latestVersion}`);\n\tconsole.log(`- Update needed: ${updateInfo2.needsUpdate}`);\n\n\t// Scenario 3: Development version (current newer than latest)\n\tconsole.log(\n\t\t'\\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---'\n\t);\n\tconst updateInfo3 = await testCheckForUpdate('0.9.0');\n\tconsole.log('Update check results:');\n\tconsole.log(`- Current version: ${updateInfo3.currentVersion}`);\n\tconsole.log(`- Latest version: ${updateInfo3.latestVersion}`);\n\tconsole.log(`- Update needed: ${updateInfo3.needsUpdate}`);\n\n\tconsole.log('\\n=== Test complete ===');\n}\n\n// Run all tests\nrunTest();\n"], ["/claude-task-master/mcp-server/src/custom-sdk/index.js", "/**\n * src/ai-providers/custom-sdk/mcp/index.js\n *\n * AI SDK factory function for MCP provider.\n * Creates MCP language model instances with session-based AI operations.\n */\n\nimport { MCPLanguageModel } from './language-model.js';\n\n/**\n * Create MCP provider factory function following AI SDK patterns\n * @param {object} options - Provider options\n * @param {object} options.session - MCP session object\n * @param {object} options.defaultSettings - Default settings for the provider\n * @returns {Function} Provider factory function\n */\nexport function createMCP(options = {}) {\n\tif (!options.session) {\n\t\tthrow new Error('MCP provider requires session object');\n\t}\n\n\t// Return the provider factory function that AI SDK expects\n\tconst provider = function (modelId, settings = {}) {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\n\t\t\t\t'The MCP model function cannot be called with the new keyword.'\n\t\t\t);\n\t\t}\n\n\t\treturn new MCPLanguageModel({\n\t\t\tsession: options.session,\n\t\t\tmodelId: modelId || 'claude-3-5-sonnet-20241022',\n\t\t\tsettings: {\n\t\t\t\ttemperature: settings.temperature,\n\t\t\t\tmaxTokens: settings.maxTokens,\n\t\t\t\t...options.defaultSettings,\n\t\t\t\t...settings\n\t\t\t}\n\t\t});\n\t};\n\n\t// Add required methods for AI SDK compatibility\n\tprovider.languageModel = (modelId, settings) => provider(modelId, settings);\n\tprovider.chat = (modelId, settings) => provider(modelId, settings);\n\n\treturn provider;\n}\n"], ["/claude-task-master/mcp-server/src/custom-sdk/message-converter.js", "/**\n * src/ai-providers/custom-sdk/mcp/message-converter.js\n *\n * Message conversion utilities for converting between AI SDK prompt format\n * and MCP sampling format.\n */\n\n/**\n * Convert AI SDK prompt format to MCP sampling format\n * @param {Array} prompt - AI SDK prompt array\n * @returns {object} MCP format with messages and systemPrompt\n */\nexport function convertToMCPFormat(prompt) {\n\tconst messages = [];\n\tlet systemPrompt = '';\n\n\tfor (const message of prompt) {\n\t\tif (message.role === 'system') {\n\t\t\t// Extract system prompt\n\t\t\tsystemPrompt = extractTextContent(message.content);\n\t\t} else if (message.role === 'user' || message.role === 'assistant') {\n\t\t\t// Convert user/assistant messages\n\t\t\tmessages.push({\n\t\t\t\trole: message.role,\n\t\t\t\tcontent: {\n\t\t\t\t\ttype: 'text',\n\t\t\t\t\ttext: extractTextContent(message.content)\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t}\n\n\treturn {\n\t\tmessages,\n\t\tsystemPrompt\n\t};\n}\n\n/**\n * Convert MCP response format to AI SDK format\n * @param {object} response - MCP sampling response\n * @returns {object} AI SDK compatible result\n */\nexport function convertFromMCPFormat(response) {\n\t// Handle different possible response formats\n\tlet text = '';\n\tlet usage = null;\n\tlet finishReason = 'stop';\n\tlet warnings = [];\n\n\tif (typeof response === 'string') {\n\t\ttext = response;\n\t} else if (response.content) {\n\t\ttext = extractTextContent(response.content);\n\t\tusage = response.usage;\n\t\tfinishReason = response.finishReason || 'stop';\n\t} else if (response.text) {\n\t\ttext = response.text;\n\t\tusage = response.usage;\n\t\tfinishReason = response.finishReason || 'stop';\n\t} else {\n\t\t// Fallback: try to extract text from response\n\t\ttext = JSON.stringify(response);\n\t\twarnings.push('Unexpected MCP response format, used JSON fallback');\n\t}\n\n\treturn {\n\t\ttext,\n\t\tusage,\n\t\tfinishReason,\n\t\twarnings\n\t};\n}\n\n/**\n * Extract text content from various content formats\n * @param {string|Array|object} content - Content in various formats\n * @returns {string} Extracted text\n */\nfunction extractTextContent(content) {\n\tif (typeof content === 'string') {\n\t\treturn content;\n\t}\n\n\tif (Array.isArray(content)) {\n\t\t// Handle array of content parts\n\t\treturn content\n\t\t\t.map((part) => {\n\t\t\t\tif (typeof part === 'string') {\n\t\t\t\t\treturn part;\n\t\t\t\t}\n\t\t\t\tif (part.type === 'text' && part.text) {\n\t\t\t\t\treturn part.text;\n\t\t\t\t}\n\t\t\t\tif (part.text) {\n\t\t\t\t\treturn part.text;\n\t\t\t\t}\n\t\t\t\t// Skip non-text content (images, etc.)\n\t\t\t\treturn '';\n\t\t\t})\n\t\t\t.filter((text) => text.length > 0)\n\t\t\t.join(' ');\n\t}\n\n\tif (content && typeof content === 'object') {\n\t\tif (content.type === 'text' && content.text) {\n\t\t\treturn content.text;\n\t\t}\n\t\tif (content.text) {\n\t\t\treturn content.text;\n\t\t}\n\t}\n\n\t// Fallback\n\treturn String(content || '');\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/errors.js", "/**\n * @fileoverview Error handling utilities for Claude Code provider\n */\n\nimport { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';\n\n/**\n * @typedef {import('./types.js').ClaudeCodeErrorMetadata} ClaudeCodeErrorMetadata\n */\n\n/**\n * Create an API call error with Claude Code specific metadata\n * @param {Object} params - Error parameters\n * @param {string} params.message - Error message\n * @param {string} [params.code] - Error code\n * @param {number} [params.exitCode] - Process exit code\n * @param {string} [params.stderr] - Standard error output\n * @param {string} [params.promptExcerpt] - Excerpt of the prompt\n * @param {boolean} [params.isRetryable=false] - Whether the error is retryable\n * @returns {APICallError}\n */\nexport function createAPICallError({\n\tmessage,\n\tcode,\n\texitCode,\n\tstderr,\n\tpromptExcerpt,\n\tisRetryable = false\n}) {\n\t/** @type {ClaudeCodeErrorMetadata} */\n\tconst metadata = {\n\t\tcode,\n\t\texitCode,\n\t\tstderr,\n\t\tpromptExcerpt\n\t};\n\n\treturn new APICallError({\n\t\tmessage,\n\t\tisRetryable,\n\t\turl: 'claude-code-cli://command',\n\t\trequestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,\n\t\tdata: metadata\n\t});\n}\n\n/**\n * Create an authentication error\n * @param {Object} params - Error parameters\n * @param {string} params.message - Error message\n * @returns {LoadAPIKeyError}\n */\nexport function createAuthenticationError({ message }) {\n\treturn new LoadAPIKeyError({\n\t\tmessage:\n\t\t\tmessage ||\n\t\t\t'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'\n\t});\n}\n\n/**\n * Create a timeout error\n * @param {Object} params - Error parameters\n * @param {string} params.message - Error message\n * @param {string} [params.promptExcerpt] - Excerpt of the prompt\n * @param {number} params.timeoutMs - Timeout in milliseconds\n * @returns {APICallError}\n */\nexport function createTimeoutError({ message, promptExcerpt, timeoutMs }) {\n\t// Store timeoutMs in metadata for potential use by error handlers\n\t/** @type {ClaudeCodeErrorMetadata & { timeoutMs: number }} */\n\tconst metadata = {\n\t\tcode: 'TIMEOUT',\n\t\tpromptExcerpt,\n\t\ttimeoutMs\n\t};\n\n\treturn new APICallError({\n\t\tmessage,\n\t\tisRetryable: true,\n\t\turl: 'claude-code-cli://command',\n\t\trequestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,\n\t\tdata: metadata\n\t});\n}\n\n/**\n * Check if an error is an authentication error\n * @param {unknown} error - Error to check\n * @returns {boolean}\n */\nexport function isAuthenticationError(error) {\n\tif (error instanceof LoadAPIKeyError) return true;\n\tif (\n\t\terror instanceof APICallError &&\n\t\t/** @type {ClaudeCodeErrorMetadata} */ (error.data)?.exitCode === 401\n\t)\n\t\treturn true;\n\treturn false;\n}\n\n/**\n * Check if an error is a timeout error\n * @param {unknown} error - Error to check\n * @returns {boolean}\n */\nexport function isTimeoutError(error) {\n\tif (\n\t\terror instanceof APICallError &&\n\t\t/** @type {ClaudeCodeErrorMetadata} */ (error.data)?.code === 'TIMEOUT'\n\t)\n\t\treturn true;\n\treturn false;\n}\n\n/**\n * Get error metadata from an error\n * @param {unknown} error - Error to extract metadata from\n * @returns {ClaudeCodeErrorMetadata|undefined}\n */\nexport function getErrorMetadata(error) {\n\tif (error instanceof APICallError && error.data) {\n\t\treturn /** @type {ClaudeCodeErrorMetadata} */ (error.data);\n\t}\n\treturn undefined;\n}\n"], ["/claude-task-master/mcp-server/src/core/__tests__/context-manager.test.js", "import { jest } from '@jest/globals';\nimport { ContextManager } from '../context-manager.js';\n\ndescribe('ContextManager', () => {\n\tlet contextManager;\n\n\tbeforeEach(() => {\n\t\tcontextManager = new ContextManager({\n\t\t\tmaxCacheSize: 10,\n\t\t\tttl: 1000, // 1 second for testing\n\t\t\tmaxContextSize: 1000\n\t\t});\n\t});\n\n\tdescribe('getContext', () => {\n\t\tit('should create a new context when not in cache', async () => {\n\t\t\tconst context = await contextManager.getContext('test-id', {\n\t\t\t\ttest: true\n\t\t\t});\n\t\t\texpect(context.id).toBe('test-id');\n\t\t\texpect(context.metadata.test).toBe(true);\n\t\t\texpect(contextManager.stats.misses).toBe(1);\n\t\t\texpect(contextManager.stats.hits).toBe(0);\n\t\t});\n\n\t\tit('should return cached context when available', async () => {\n\t\t\t// First call creates the context\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\n\t\t\t// Second call should hit cache\n\t\t\tconst context = await contextManager.getContext('test-id', {\n\t\t\t\ttest: true\n\t\t\t});\n\t\t\texpect(context.id).toBe('test-id');\n\t\t\texpect(context.metadata.test).toBe(true);\n\t\t\texpect(contextManager.stats.hits).toBe(1);\n\t\t\texpect(contextManager.stats.misses).toBe(1);\n\t\t});\n\n\t\tit('should respect TTL settings', async () => {\n\t\t\t// Create context\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\n\t\t\t// Wait for TTL to expire\n\t\t\tawait new Promise((resolve) => setTimeout(resolve, 1100));\n\n\t\t\t// Should create new context\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\texpect(contextManager.stats.misses).toBe(2);\n\t\t\texpect(contextManager.stats.hits).toBe(0);\n\t\t});\n\t});\n\n\tdescribe('updateContext', () => {\n\t\tit('should update existing context metadata', async () => {\n\t\t\tawait contextManager.getContext('test-id', { initial: true });\n\t\t\tconst updated = await contextManager.updateContext('test-id', {\n\t\t\t\tupdated: true\n\t\t\t});\n\n\t\t\texpect(updated.metadata.initial).toBe(true);\n\t\t\texpect(updated.metadata.updated).toBe(true);\n\t\t});\n\t});\n\n\tdescribe('invalidateContext', () => {\n\t\tit('should remove context from cache', async () => {\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\tcontextManager.invalidateContext('test-id', { test: true });\n\n\t\t\t// Should be a cache miss\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\texpect(contextManager.stats.invalidations).toBe(1);\n\t\t\texpect(contextManager.stats.misses).toBe(2);\n\t\t});\n\t});\n\n\tdescribe('getStats', () => {\n\t\tit('should return current cache statistics', async () => {\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\tconst stats = contextManager.getStats();\n\n\t\t\texpect(stats.hits).toBe(0);\n\t\t\texpect(stats.misses).toBe(1);\n\t\t\texpect(stats.invalidations).toBe(0);\n\t\t\texpect(stats.size).toBe(1);\n\t\t\texpect(stats.maxSize).toBe(10);\n\t\t\texpect(stats.ttl).toBe(1000);\n\t\t});\n\t});\n});\n"], ["/claude-task-master/mcp-server/src/custom-sdk/json-extractor.js", "/**\n * @fileoverview Extract JSON from MCP response, handling markdown blocks and other formatting\n */\n\n/**\n * Extract JSON from MCP AI response\n * @param {string} text - The text to extract JSON from\n * @returns {string} - The extracted JSON string\n */\nexport function extractJson(text) {\n\t// Remove markdown code blocks if present\n\tlet jsonText = text.trim();\n\n\t// Remove ```json blocks\n\tjsonText = jsonText.replace(/^```json\\s*/gm, '');\n\tjsonText = jsonText.replace(/^```\\s*/gm, '');\n\tjsonText = jsonText.replace(/```\\s*$/gm, '');\n\n\t// Remove common TypeScript/JavaScript patterns\n\tjsonText = jsonText.replace(/^const\\s+\\w+\\s*=\\s*/, ''); // Remove \"const varName = \"\n\tjsonText = jsonText.replace(/^let\\s+\\w+\\s*=\\s*/, ''); // Remove \"let varName = \"\n\tjsonText = jsonText.replace(/^var\\s+\\w+\\s*=\\s*/, ''); // Remove \"var varName = \"\n\tjsonText = jsonText.replace(/;?\\s*$/, ''); // Remove trailing semicolons\n\n\t// Remove explanatory text before JSON (common with AI responses)\n\tjsonText = jsonText.replace(/^.*?(?=\\{|\\[)/s, '');\n\n\t// Remove explanatory text after JSON\n\tconst lines = jsonText.split('\\n');\n\tlet jsonEndIndex = -1;\n\tlet braceCount = 0;\n\tlet inString = false;\n\tlet escapeNext = false;\n\n\t// Find the end of the JSON by tracking braces\n\tfor (let i = 0; i < jsonText.length; i++) {\n\t\tconst char = jsonText[i];\n\n\t\tif (escapeNext) {\n\t\t\tescapeNext = false;\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (char === '\\\\') {\n\t\t\tescapeNext = true;\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (char === '\"' && !escapeNext) {\n\t\t\tinString = !inString;\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (!inString) {\n\t\t\tif (char === '{' || char === '[') {\n\t\t\t\tbraceCount++;\n\t\t\t} else if (char === '}' || char === ']') {\n\t\t\t\tbraceCount--;\n\t\t\t\tif (braceCount === 0) {\n\t\t\t\t\tjsonEndIndex = i;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif (jsonEndIndex > -1) {\n\t\tjsonText = jsonText.substring(0, jsonEndIndex + 1);\n\t}\n\n\t// Try to extract JSON object or array if previous method didn't work\n\tif (jsonEndIndex === -1) {\n\t\tconst objectMatch = jsonText.match(/{[\\s\\S]*}/);\n\t\tconst arrayMatch = jsonText.match(/\\[[\\s\\S]*\\]/);\n\n\t\tif (objectMatch) {\n\t\t\tjsonText = objectMatch[0];\n\t\t} else if (arrayMatch) {\n\t\t\tjsonText = arrayMatch[0];\n\t\t}\n\t}\n\n\t// First try to parse as valid JSON\n\ttry {\n\t\tJSON.parse(jsonText);\n\t\treturn jsonText;\n\t} catch {\n\t\t// If it's not valid JSON, it might be a JavaScript object literal\n\t\t// Try to convert it to valid JSON\n\t\ttry {\n\t\t\t// This is a simple conversion that handles basic cases\n\t\t\t// Replace unquoted keys with quoted keys\n\t\t\tconst converted = jsonText\n\t\t\t\t.replace(/([{,]\\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\\s*:/g, '$1\"$2\":')\n\t\t\t\t// Replace single quotes with double quotes\n\t\t\t\t.replace(/'/g, '\"')\n\t\t\t\t// Handle trailing commas\n\t\t\t\t.replace(/,\\s*([}\\]])/g, '$1');\n\n\t\t\t// Validate the converted JSON\n\t\t\tJSON.parse(converted);\n\t\t\treturn converted;\n\t\t} catch {\n\t\t\t// If all else fails, return the original text\n\t\t\t// The calling code will handle the error appropriately\n\t\t\treturn text;\n\t\t}\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/custom-sdk/errors.js", "/**\n * src/ai-providers/custom-sdk/mcp/errors.js\n *\n * Error handling utilities for MCP AI SDK provider.\n * Maps MCP errors to AI SDK compatible error types.\n */\n\n/**\n * MCP-specific error class\n */\nexport class MCPError extends Error {\n\tconstructor(message, options = {}) {\n\t\tsuper(message);\n\t\tthis.name = 'MCPError';\n\t\tthis.code = options.code;\n\t\tthis.cause = options.cause;\n\t\tthis.mcpResponse = options.mcpResponse;\n\t}\n}\n\n/**\n * Session-related error\n */\nexport class MCPSessionError extends MCPError {\n\tconstructor(message, options = {}) {\n\t\tsuper(message, options);\n\t\tthis.name = 'MCPSessionError';\n\t}\n}\n\n/**\n * Sampling-related error\n */\nexport class MCPSamplingError extends MCPError {\n\tconstructor(message, options = {}) {\n\t\tsuper(message, options);\n\t\tthis.name = 'MCPSamplingError';\n\t}\n}\n\n/**\n * Map MCP errors to AI SDK compatible error types\n * @param {Error} error - Original error\n * @returns {Error} Mapped error\n */\nexport function mapMCPError(error) {\n\t// If already an MCP error, return as-is\n\tif (error instanceof MCPError) {\n\t\treturn error;\n\t}\n\n\tconst message = error.message || 'Unknown MCP error';\n\tconst originalError = error;\n\n\t// Map common error patterns\n\tif (message.includes('session') || message.includes('connection')) {\n\t\treturn new MCPSessionError(message, {\n\t\t\tcause: originalError,\n\t\t\tcode: 'SESSION_ERROR'\n\t\t});\n\t}\n\n\tif (message.includes('sampling') || message.includes('timeout')) {\n\t\treturn new MCPSamplingError(message, {\n\t\t\tcause: originalError,\n\t\t\tcode: 'SAMPLING_ERROR'\n\t\t});\n\t}\n\n\tif (message.includes('capabilities') || message.includes('not supported')) {\n\t\treturn new MCPSessionError(message, {\n\t\t\tcause: originalError,\n\t\t\tcode: 'CAPABILITY_ERROR'\n\t\t});\n\t}\n\n\t// Default to generic MCP error\n\treturn new MCPError(message, {\n\t\tcause: originalError,\n\t\tcode: 'UNKNOWN_ERROR'\n\t});\n}\n\n/**\n * Check if error is retryable\n * @param {Error} error - Error to check\n * @returns {boolean} True if error might be retryable\n */\nexport function isRetryableError(error) {\n\tif (error instanceof MCPSamplingError && error.code === 'SAMPLING_ERROR') {\n\t\treturn true;\n\t}\n\n\tif (error instanceof MCPSessionError && error.code === 'SESSION_ERROR') {\n\t\t// Session errors are generally not retryable\n\t\treturn false;\n\t}\n\n\t// Check for common retryable patterns\n\tconst message = error.message?.toLowerCase() || '';\n\treturn (\n\t\tmessage.includes('timeout') ||\n\t\tmessage.includes('network') ||\n\t\tmessage.includes('temporary')\n\t);\n}\n"], ["/claude-task-master/src/provider-registry/index.js", "/**\n * Provider Registry - Singleton for managing AI providers\n *\n * This module implements a singleton registry that allows dynamic registration\n * of AI providers at runtime, while maintaining compatibility with the existing\n * static PROVIDERS object in ai-services-unified.js.\n */\n\n// Singleton instance\nlet instance = null;\n\n/**\n * Provider Registry class - Manages dynamic provider registration\n */\nclass ProviderRegistry {\n\tconstructor() {\n\t\t// Private provider map\n\t\tthis._providers = new Map();\n\n\t\t// Flag to track initialization\n\t\tthis._initialized = false;\n\t}\n\n\t/**\n\t * Get the singleton instance\n\t * @returns {ProviderRegistry} The singleton instance\n\t */\n\tstatic getInstance() {\n\t\tif (!instance) {\n\t\t\tinstance = new ProviderRegistry();\n\t\t}\n\t\treturn instance;\n\t}\n\n\t/**\n\t * Initialize the registry\n\t * @returns {ProviderRegistry} The singleton instance\n\t */\n\tinitialize() {\n\t\tif (this._initialized) {\n\t\t\treturn this;\n\t\t}\n\n\t\tthis._initialized = true;\n\t\treturn this;\n\t}\n\n\t/**\n\t * Register a provider with the registry\n\t * @param {string} providerName - The name of the provider\n\t * @param {object} provider - The provider instance\n\t * @param {object} options - Additional options for registration\n\t * @returns {ProviderRegistry} The singleton instance for chaining\n\t */\n\tregisterProvider(providerName, provider, options = {}) {\n\t\tif (!providerName || typeof providerName !== 'string') {\n\t\t\tthrow new Error('Provider name must be a non-empty string');\n\t\t}\n\n\t\tif (!provider) {\n\t\t\tthrow new Error('Provider instance is required');\n\t\t}\n\n\t\t// Validate that provider implements the required interface\n\t\tif (\n\t\t\ttypeof provider.generateText !== 'function' ||\n\t\t\ttypeof provider.streamText !== 'function' ||\n\t\t\ttypeof provider.generateObject !== 'function'\n\t\t) {\n\t\t\tthrow new Error('Provider must implement BaseAIProvider interface');\n\t\t}\n\n\t\t// Add provider to the registry\n\t\tthis._providers.set(providerName, {\n\t\t\tinstance: provider,\n\t\t\toptions,\n\t\t\tregisteredAt: new Date()\n\t\t});\n\n\t\treturn this;\n\t}\n\n\t/**\n\t * Check if a provider exists in the registry\n\t * @param {string} providerName - The name of the provider\n\t * @returns {boolean} True if the provider exists\n\t */\n\thasProvider(providerName) {\n\t\treturn this._providers.has(providerName);\n\t}\n\n\t/**\n\t * Get a provider from the registry\n\t * @param {string} providerName - The name of the provider\n\t * @returns {object|null} The provider instance or null if not found\n\t */\n\tgetProvider(providerName) {\n\t\tconst providerEntry = this._providers.get(providerName);\n\t\treturn providerEntry ? providerEntry.instance : null;\n\t}\n\n\t/**\n\t * Get all registered providers\n\t * @returns {Map} Map of all registered providers\n\t */\n\tgetAllProviders() {\n\t\treturn new Map(this._providers);\n\t}\n\n\t/**\n\t * Remove a provider from the registry\n\t * @param {string} providerName - The name of the provider\n\t * @returns {boolean} True if the provider was removed\n\t */\n\tunregisterProvider(providerName) {\n\t\tif (this._providers.has(providerName)) {\n\t\t\tthis._providers.delete(providerName);\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\n\t/**\n\t * Reset the registry (primarily for testing)\n\t */\n\treset() {\n\t\tthis._providers.clear();\n\t\tthis._initialized = false;\n\t}\n}\n\nProviderRegistry.getInstance().initialize(); // Ensure singleton is initialized on import\n// Export singleton getter\nexport default ProviderRegistry;\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/index.js", "/**\n * @fileoverview Claude Code provider factory and exports\n */\n\nimport { NoSuchModelError } from '@ai-sdk/provider';\nimport { ClaudeCodeLanguageModel } from './language-model.js';\n\n/**\n * @typedef {import('./types.js').ClaudeCodeSettings} ClaudeCodeSettings\n * @typedef {import('./types.js').ClaudeCodeModelId} ClaudeCodeModelId\n * @typedef {import('./types.js').ClaudeCodeProvider} ClaudeCodeProvider\n * @typedef {import('./types.js').ClaudeCodeProviderSettings} ClaudeCodeProviderSettings\n */\n\n/**\n * Create a Claude Code provider using the official SDK\n * @param {ClaudeCodeProviderSettings} [options={}] - Provider configuration options\n * @returns {ClaudeCodeProvider} Claude Code provider instance\n */\nexport function createClaudeCode(options = {}) {\n\t/**\n\t * Create a language model instance\n\t * @param {ClaudeCodeModelId} modelId - Model ID\n\t * @param {ClaudeCodeSettings} [settings={}] - Model settings\n\t * @returns {ClaudeCodeLanguageModel}\n\t */\n\tconst createModel = (modelId, settings = {}) => {\n\t\treturn new ClaudeCodeLanguageModel({\n\t\t\tid: modelId,\n\t\t\tsettings: {\n\t\t\t\t...options.defaultSettings,\n\t\t\t\t...settings\n\t\t\t}\n\t\t});\n\t};\n\n\t/**\n\t * Provider function\n\t * @param {ClaudeCodeModelId} modelId - Model ID\n\t * @param {ClaudeCodeSettings} [settings] - Model settings\n\t * @returns {ClaudeCodeLanguageModel}\n\t */\n\tconst provider = function (modelId, settings) {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\n\t\t\t\t'The Claude Code model function cannot be called with the new keyword.'\n\t\t\t);\n\t\t}\n\n\t\treturn createModel(modelId, settings);\n\t};\n\n\tprovider.languageModel = createModel;\n\tprovider.chat = createModel; // Alias for languageModel\n\n\t// Add textEmbeddingModel method that throws NoSuchModelError\n\tprovider.textEmbeddingModel = (modelId) => {\n\t\tthrow new NoSuchModelError({\n\t\t\tmodelId,\n\t\t\tmodelType: 'textEmbeddingModel'\n\t\t});\n\t};\n\n\treturn /** @type {ClaudeCodeProvider} */ (provider);\n}\n\n/**\n * Default Claude Code provider instance\n */\nexport const claudeCode = createClaudeCode();\n\n// Provider exports\nexport { ClaudeCodeLanguageModel } from './language-model.js';\n\n// Error handling exports\nexport {\n\tisAuthenticationError,\n\tisTimeoutError,\n\tgetErrorMetadata,\n\tcreateAPICallError,\n\tcreateAuthenticationError,\n\tcreateTimeoutError\n} from './errors.js';\n"], ["/claude-task-master/src/ai-providers/anthropic.js", "/**\n * src/ai-providers/anthropic.js\n *\n * Implementation for interacting with Anthropic models (e.g., Claude)\n * using the Vercel AI SDK.\n */\n\nimport { createAnthropic } from '@ai-sdk/anthropic';\nimport { BaseAIProvider } from './base-provider.js';\n\n// TODO: Implement standardized functions for generateText, streamText, generateObject\n\n// --- Client Instantiation ---\n// Note: API key resolution should ideally happen closer to the call site\n// using the config manager/resolver which checks process.env and session.env.\n// This is a placeholder for basic functionality.\n// Remove the global variable and caching logic\n// let anthropicClient;\n\nexport class AnthropicAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Anthropic';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Anthropic API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'ANTHROPIC_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an Anthropic client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Anthropic API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Anthropic client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Anthropic API key is required.');\n\t\t\t}\n\n\t\t\treturn createAnthropic({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL }),\n\t\t\t\theaders: {\n\t\t\t\t\t'anthropic-beta': 'output-128k-2025-02-19'\n\t\t\t\t}\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n\n// TODO: Implement streamAnthropicObject if needed and supported well by the SDK for Anthropic.\n// The basic structure would be similar to generateAnthropicObject but using streamObject.\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/response-language.js", "/**\n * response-language.js\n * Direct function for managing response language via MCP\n */\n\nimport { setResponseLanguage } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\nexport async function responseLanguageDirect(args, log, context = {}) {\n\tconst { projectRoot, language } = args;\n\tconst mcpLog = createLogWrapper(log);\n\n\tlog.info(\n\t\t`Executing response-language_direct with args: ${JSON.stringify(args)}`\n\t);\n\tlog.info(`Using project root: ${projectRoot}`);\n\n\ttry {\n\t\tenableSilentMode();\n\t\treturn setResponseLanguage(language, {\n\t\t\tmcpLog,\n\t\t\tprojectRoot\n\t\t});\n\t} catch (error) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message,\n\t\t\t\tdetails: error.stack\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode();\n\t}\n}\n"], ["/claude-task-master/src/utils/getVersion.js", "import fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { log } from '../../scripts/modules/utils.js';\n\n/**\n * Reads the version from the nearest package.json relative to this file.\n * Returns 'unknown' if not found or on error.\n * @returns {string} The version string or 'unknown'.\n */\nexport function getTaskMasterVersion() {\n\tlet version = 'unknown';\n\ttry {\n\t\t// Get the directory of the current module (getPackageVersion.js)\n\t\tconst currentModuleFilename = fileURLToPath(import.meta.url);\n\t\tconst currentModuleDirname = path.dirname(currentModuleFilename);\n\t\t// Construct the path to package.json relative to this file (../../package.json)\n\t\tconst packageJsonPath = path.join(\n\t\t\tcurrentModuleDirname,\n\t\t\t'..',\n\t\t\t'..',\n\t\t\t'package.json'\n\t\t);\n\n\t\tif (fs.existsSync(packageJsonPath)) {\n\t\t\tconst packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8');\n\t\t\tconst packageJson = JSON.parse(packageJsonContent);\n\t\t\tversion = packageJson.version;\n\t\t}\n\t} catch (error) {\n\t\t// Silently fall back to default version\n\t\tlog('warn', 'Could not read own package.json for version info.', error);\n\t}\n\treturn version;\n}\n"], ["/claude-task-master/mcp-server/src/tools/response-language.js", "import { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { responseLanguageDirect } from '../core/direct-functions/response-language.js';\n\nexport function registerResponseLanguageTool(server) {\n\tserver.addTool({\n\t\tname: 'response-language',\n\t\tdescription: 'Get or set the response language for the project',\n\t\tparameters: z.object({\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.'\n\t\t\t\t),\n\t\t\tlanguage: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The new response language to set. like \"中文\" \"English\" or \"español\".'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing response-language tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst result = await responseLanguageDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\t...args,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(result, log, 'Error setting response language');\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in response-language tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/ai-providers/azure.js", "/**\n * azure.js\n * AI provider implementation for Azure OpenAI models using Vercel AI SDK.\n */\n\nimport { createAzure } from '@ai-sdk/azure';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class AzureProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Azure OpenAI';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Azure OpenAI API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'AZURE_OPENAI_API_KEY';\n\t}\n\n\t/**\n\t * Validates Azure-specific authentication parameters\n\t * @param {object} params - Parameters to validate\n\t * @throws {Error} If required parameters are missing\n\t */\n\tvalidateAuth(params) {\n\t\tif (!params.apiKey) {\n\t\t\tthrow new Error('Azure API key is required');\n\t\t}\n\n\t\tif (!params.baseURL) {\n\t\t\tthrow new Error(\n\t\t\t\t'Azure endpoint URL is required. Set it in .taskmasterconfig global.azureBaseURL or models.[role].baseURL'\n\t\t\t);\n\t\t}\n\t}\n\n\t/**\n\t * Creates and returns an Azure OpenAI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Azure OpenAI API key\n\t * @param {string} params.baseURL - Azure OpenAI endpoint URL (from .taskmasterconfig global.azureBaseURL or models.[role].baseURL)\n\t * @returns {Function} Azure OpenAI client function\n\t * @throws {Error} If required parameters are missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\treturn createAzure({\n\t\t\t\tapiKey,\n\t\t\t\tbaseURL\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/types.js", "/**\n * @fileoverview Type definitions for Claude Code AI SDK provider\n * These JSDoc types mirror the TypeScript interfaces from the original provider\n */\n\n/**\n * Claude Code provider settings\n * @typedef {Object} ClaudeCodeSettings\n * @property {string} [pathToClaudeCodeExecutable='claude'] - Custom path to Claude Code CLI executable\n * @property {string} [customSystemPrompt] - Custom system prompt to use\n * @property {string} [appendSystemPrompt] - Append additional content to the system prompt\n * @property {number} [maxTurns] - Maximum number of turns for the conversation\n * @property {number} [maxThinkingTokens] - Maximum thinking tokens for the model\n * @property {string} [cwd] - Working directory for CLI operations\n * @property {'bun'|'deno'|'node'} [executable='node'] - JavaScript runtime to use\n * @property {string[]} [executableArgs] - Additional arguments for the JavaScript runtime\n * @property {'default'|'acceptEdits'|'bypassPermissions'|'plan'} [permissionMode='default'] - Permission mode for tool usage\n * @property {string} [permissionPromptToolName] - Custom tool name for permission prompts\n * @property {boolean} [continue] - Continue the most recent conversation\n * @property {string} [resume] - Resume a specific session by ID\n * @property {string[]} [allowedTools] - Tools to explicitly allow during execution (e.g., ['Read', 'LS', 'Bash(git log:*)'])\n * @property {string[]} [disallowedTools] - Tools to disallow during execution (e.g., ['Write', 'Edit', 'Bash(rm:*)'])\n * @property {Object.<string, MCPServerConfig>} [mcpServers] - MCP server configuration\n * @property {boolean} [verbose] - Enable verbose logging for debugging\n */\n\n/**\n * MCP Server configuration\n * @typedef {Object} MCPServerConfig\n * @property {'stdio'|'sse'} [type='stdio'] - Server type\n * @property {string} command - Command to execute (for stdio type)\n * @property {string[]} [args] - Arguments for the command\n * @property {Object.<string, string>} [env] - Environment variables\n * @property {string} url - URL for SSE type servers\n * @property {Object.<string, string>} [headers] - Headers for SSE type servers\n */\n\n/**\n * Model ID type - either 'opus', 'sonnet', or any string\n * @typedef {'opus'|'sonnet'|string} ClaudeCodeModelId\n */\n\n/**\n * Language model options\n * @typedef {Object} ClaudeCodeLanguageModelOptions\n * @property {ClaudeCodeModelId} id - The model ID\n * @property {ClaudeCodeSettings} [settings] - Optional settings\n */\n\n/**\n * Error metadata for Claude Code errors\n * @typedef {Object} ClaudeCodeErrorMetadata\n * @property {string} [code] - Error code\n * @property {number} [exitCode] - Process exit code\n * @property {string} [stderr] - Standard error output\n * @property {string} [promptExcerpt] - Excerpt of the prompt that caused the error\n */\n\n/**\n * Claude Code provider interface\n * @typedef {Object} ClaudeCodeProvider\n * @property {function(ClaudeCodeModelId, ClaudeCodeSettings=): Object} languageModel - Create a language model\n * @property {function(ClaudeCodeModelId, ClaudeCodeSettings=): Object} chat - Alias for languageModel\n * @property {function(string): never} textEmbeddingModel - Throws NoSuchModelError (not supported)\n */\n\n/**\n * Claude Code provider settings\n * @typedef {Object} ClaudeCodeProviderSettings\n * @property {ClaudeCodeSettings} [defaultSettings] - Default settings to use for all models\n */\n\nexport {}; // This ensures the file is treated as a module\n"], ["/claude-task-master/mcp-server/src/core/utils/env-utils.js", "/**\n * Temporarily sets environment variables from session.env, executes an action,\n * and restores the original environment variables.\n * @param {object | undefined} sessionEnv - The environment object from the session.\n * @param {Function} actionFn - An async function to execute with the temporary environment.\n * @returns {Promise<any>} The result of the actionFn.\n */\nexport async function withSessionEnv(sessionEnv, actionFn) {\n\tif (\n\t\t!sessionEnv ||\n\t\ttypeof sessionEnv !== 'object' ||\n\t\tObject.keys(sessionEnv).length === 0\n\t) {\n\t\t// If no sessionEnv is provided, just run the action directly\n\t\treturn await actionFn();\n\t}\n\n\tconst originalEnv = {};\n\tconst keysToRestore = [];\n\n\t// Set environment variables from sessionEnv\n\tfor (const key in sessionEnv) {\n\t\tif (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {\n\t\t\t// Store original value if it exists, otherwise mark for deletion\n\t\t\tif (process.env[key] !== undefined) {\n\t\t\t\toriginalEnv[key] = process.env[key];\n\t\t\t}\n\t\t\tkeysToRestore.push(key);\n\t\t\tprocess.env[key] = sessionEnv[key];\n\t\t}\n\t}\n\n\ttry {\n\t\t// Execute the provided action function\n\t\treturn await actionFn();\n\t} finally {\n\t\t// Restore original environment variables\n\t\tfor (const key of keysToRestore) {\n\t\t\tif (Object.prototype.hasOwnProperty.call(originalEnv, key)) {\n\t\t\t\tprocess.env[key] = originalEnv[key];\n\t\t\t} else {\n\t\t\t\t// If the key didn't exist originally, delete it\n\t\t\t\tdelete process.env[key];\n\t\t\t}\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/xai.js", "/**\n * xai.js\n * AI provider implementation for xAI models using Vercel AI SDK.\n */\n\nimport { createXai } from '@ai-sdk/xai';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class XAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'xAI';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the xAI API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'XAI_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an xAI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - xAI API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} xAI client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('xAI API key is required.');\n\t\t\t}\n\n\t\t\treturn createXai({\n\t\t\t\tapiKey,\n\t\t\t\tbaseURL: baseURL || 'https://api.x.ai/v1'\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/constants/task-priority.js", "/**\n * @typedef {'high' | 'medium' | 'low'} TaskPriority\n */\n\n/**\n * Task priority options\n * @type {TaskPriority[]}\n * @description Defines possible task priorities:\n * - high: Critical tasks that need immediate attention\n * - medium: Standard priority tasks (default)\n * - low: Tasks that can be deferred or are nice-to-have\n */\nexport const TASK_PRIORITY_OPTIONS = ['high', 'medium', 'low'];\n\n/**\n * Default task priority\n * @type {TaskPriority}\n */\nexport const DEFAULT_TASK_PRIORITY = 'medium';\n\n/**\n * Check if a given priority is valid\n * @param {string} priority - The priority to check\n * @returns {boolean} True if the priority is valid, false otherwise\n */\nexport function isValidTaskPriority(priority) {\n\treturn TASK_PRIORITY_OPTIONS.includes(priority?.toLowerCase());\n}\n\n/**\n * Normalize a priority value to lowercase\n * @param {string} priority - The priority to normalize\n * @returns {TaskPriority|null} The normalized priority or null if invalid\n */\nexport function normalizeTaskPriority(priority) {\n\tif (!priority) return null;\n\tconst normalized = priority.toLowerCase();\n\treturn isValidTaskPriority(normalized) ? normalized : null;\n}\n"], ["/claude-task-master/src/ai-providers/claude-code.js", "/**\n * src/ai-providers/claude-code.js\n *\n * Implementation for interacting with Claude models via Claude Code CLI\n * using a custom AI SDK implementation.\n */\n\nimport { createClaudeCode } from './custom-sdk/claude-code/index.js';\nimport { BaseAIProvider } from './base-provider.js';\nimport { getClaudeCodeSettingsForCommand } from '../../scripts/modules/config-manager.js';\n\nexport class ClaudeCodeProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Claude Code';\n\t}\n\n\tgetRequiredApiKeyName() {\n\t\treturn 'CLAUDE_CODE_API_KEY';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Override validateAuth to skip API key validation for Claude Code\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Claude Code doesn't require an API key\n\t\t// No validation needed\n\t}\n\n\t/**\n\t * Creates and returns a Claude Code client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.commandName] - Name of the command invoking the service\n\t * @param {string} [params.baseURL] - Optional custom API endpoint (not used by Claude Code)\n\t * @returns {Function} Claude Code client function\n\t * @throws {Error} If initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\t// Claude Code doesn't use API keys or base URLs\n\t\t\t// Just return the provider factory\n\t\t\treturn createClaudeCode({\n\t\t\t\tdefaultSettings: getClaudeCodeSettingsForCommand(params?.commandName)\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/perplexity.js", "/**\n * perplexity.js\n * AI provider implementation for Perplexity models using Vercel AI SDK.\n */\n\nimport { createPerplexity } from '@ai-sdk/perplexity';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class PerplexityAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Perplexity';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Perplexity API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'PERPLEXITY_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns a Perplexity client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Perplexity API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Perplexity client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Perplexity API key is required.');\n\t\t\t}\n\n\t\t\treturn createPerplexity({\n\t\t\t\tapiKey,\n\t\t\t\tbaseURL: baseURL || 'https://api.perplexity.ai'\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/test-config-manager.js", "// test-config-manager.js\nconsole.log('=== ENVIRONMENT TEST ===');\nconsole.log('Working directory:', process.cwd());\nconsole.log('NODE_PATH:', process.env.NODE_PATH);\n\n// Test basic imports\ntry {\n\tconsole.log('Importing config-manager');\n\t// Use dynamic import for ESM\n\tconst configManagerModule = await import(\n\t\t'./scripts/modules/config-manager.js'\n\t);\n\tconst configManager = configManagerModule.default || configManagerModule;\n\tconsole.log('Config manager loaded successfully');\n\n\tconsole.log('Loading supported models');\n\t// Add after line 14 (after \"Config manager loaded successfully\")\n\tconsole.log('Config manager exports:', Object.keys(configManager));\n} catch (error) {\n\tconsole.error('Import error:', error.message);\n\tconsole.error(error.stack);\n}\n\n// Test file access\ntry {\n\tconsole.log('Checking for .taskmasterconfig');\n\t// Use dynamic import for ESM\n\tconst { readFileSync, existsSync } = await import('fs');\n\tconst { resolve } = await import('path');\n\n\tconst configExists = existsSync('./.taskmasterconfig');\n\tconsole.log('.taskmasterconfig exists:', configExists);\n\n\tif (configExists) {\n\t\tconst config = JSON.parse(readFileSync('./.taskmasterconfig', 'utf-8'));\n\t\tconsole.log('Config keys:', Object.keys(config));\n\t}\n\n\tconsole.log('Checking for supported-models.json');\n\tconst modelsPath = resolve('./scripts/modules/supported-models.json');\n\tconsole.log('Models path:', modelsPath);\n\tconst modelsExists = existsSync(modelsPath);\n\tconsole.log('supported-models.json exists:', modelsExists);\n} catch (error) {\n\tconsole.error('File access error:', error.message);\n}\n\nconsole.log('=== TEST COMPLETE ===');\n"], ["/claude-task-master/src/ai-providers/groq.js", "/**\n * src/ai-providers/groq.js\n *\n * Implementation for interacting with Groq models\n * using the Vercel AI SDK.\n */\n\nimport { createGroq } from '@ai-sdk/groq';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class GroqProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Groq';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Groq API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'GROQ_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns a Groq client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Groq API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Groq client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Groq API key is required.');\n\t\t\t}\n\n\t\t\treturn createGroq({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/google.js", "/**\n * google.js\n * AI provider implementation for Google AI models using Vercel AI SDK.\n */\n\nimport { createGoogleGenerativeAI } from '@ai-sdk/google';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class GoogleAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Google';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Google API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'GOOGLE_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns a Google AI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Google API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Google AI client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Google API key is required.');\n\t\t\t}\n\n\t\t\treturn createGoogleGenerativeAI({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/openai.js", "/**\n * openai.js\n * AI provider implementation for OpenAI models using Vercel AI SDK.\n */\n\nimport { createOpenAI } from '@ai-sdk/openai';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class OpenAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'OpenAI';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the OpenAI API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'OPENAI_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an OpenAI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - OpenAI API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} OpenAI client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('OpenAI API key is required.');\n\t\t\t}\n\n\t\t\treturn createOpenAI({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/openrouter.js", "/**\n * openrouter.js\n * AI provider implementation for OpenRouter models using Vercel AI SDK.\n */\n\nimport { createOpenRouter } from '@openrouter/ai-sdk-provider';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class OpenRouterAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'OpenRouter';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the OpenRouter API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'OPENROUTER_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an OpenRouter client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - OpenRouter API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} OpenRouter client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('OpenRouter API key is required.');\n\t\t\t}\n\n\t\t\treturn createOpenRouter({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/utils/logger-utils.js", "/**\n * Logger utility functions for Task Master\n * Provides standardized logging patterns for both CLI and utility contexts\n */\n\nimport { log as utilLog } from '../../scripts/modules/utils.js';\n\n/**\n * Creates a standard logger object that wraps the utility log function\n * This provides a consistent logger interface across different parts of the application\n * @returns {Object} A logger object with standard logging methods (info, warn, error, debug, success)\n */\nexport function createStandardLogger() {\n\treturn {\n\t\tinfo: (msg, ...args) => utilLog('info', msg, ...args),\n\t\twarn: (msg, ...args) => utilLog('warn', msg, ...args),\n\t\terror: (msg, ...args) => utilLog('error', msg, ...args),\n\t\tdebug: (msg, ...args) => utilLog('debug', msg, ...args),\n\t\tsuccess: (msg, ...args) => utilLog('success', msg, ...args)\n\t};\n}\n\n/**\n * Creates a logger using either the provided logger or a default standard logger\n * This is the recommended pattern for functions that accept an optional logger parameter\n * @param {Object|null} providedLogger - Optional logger object passed from caller\n * @returns {Object} A logger object with standard logging methods\n */\nexport function getLoggerOrDefault(providedLogger = null) {\n\treturn providedLogger || createStandardLogger();\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/json-extractor.js", "/**\n * @fileoverview Extract JSON from Claude's response, handling markdown blocks and other formatting\n */\n\n/**\n * Extract JSON from Claude's response\n * @param {string} text - The text to extract JSON from\n * @returns {string} - The extracted JSON string\n */\nexport function extractJson(text) {\n\t// Remove markdown code blocks if present\n\tlet jsonText = text.trim();\n\n\t// Remove ```json blocks\n\tjsonText = jsonText.replace(/^```json\\s*/gm, '');\n\tjsonText = jsonText.replace(/^```\\s*/gm, '');\n\tjsonText = jsonText.replace(/```\\s*$/gm, '');\n\n\t// Remove common TypeScript/JavaScript patterns\n\tjsonText = jsonText.replace(/^const\\s+\\w+\\s*=\\s*/, ''); // Remove \"const varName = \"\n\tjsonText = jsonText.replace(/^let\\s+\\w+\\s*=\\s*/, ''); // Remove \"let varName = \"\n\tjsonText = jsonText.replace(/^var\\s+\\w+\\s*=\\s*/, ''); // Remove \"var varName = \"\n\tjsonText = jsonText.replace(/;?\\s*$/, ''); // Remove trailing semicolons\n\n\t// Try to extract JSON object or array\n\tconst objectMatch = jsonText.match(/{[\\s\\S]*}/);\n\tconst arrayMatch = jsonText.match(/\\[[\\s\\S]*\\]/);\n\n\tif (objectMatch) {\n\t\tjsonText = objectMatch[0];\n\t} else if (arrayMatch) {\n\t\tjsonText = arrayMatch[0];\n\t}\n\n\t// First try to parse as valid JSON\n\ttry {\n\t\tJSON.parse(jsonText);\n\t\treturn jsonText;\n\t} catch {\n\t\t// If it's not valid JSON, it might be a JavaScript object literal\n\t\t// Try to convert it to valid JSON\n\t\ttry {\n\t\t\t// This is a simple conversion that handles basic cases\n\t\t\t// Replace unquoted keys with quoted keys\n\t\t\tconst converted = jsonText\n\t\t\t\t.replace(/([{,]\\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\\s*:/g, '$1\"$2\":')\n\t\t\t\t// Replace single quotes with double quotes\n\t\t\t\t.replace(/'/g, '\"');\n\n\t\t\t// Validate the converted JSON\n\t\t\tJSON.parse(converted);\n\t\t\treturn converted;\n\t\t} catch {\n\t\t\t// If all else fails, return the original text\n\t\t\t// The AI SDK will handle the error appropriately\n\t\t\treturn text;\n\t\t}\n\t}\n}\n"], ["/claude-task-master/mcp-server/server.js", "#!/usr/bin/env node\n\nimport TaskMasterMCPServer from './src/index.js';\nimport dotenv from 'dotenv';\nimport logger from './src/logger.js';\n\n// Load environment variables\ndotenv.config();\n\n/**\n * Start the MCP server\n */\nasync function startServer() {\n\tconst server = new TaskMasterMCPServer();\n\n\t// Handle graceful shutdown\n\tprocess.on('SIGINT', async () => {\n\t\tawait server.stop();\n\t\tprocess.exit(0);\n\t});\n\n\tprocess.on('SIGTERM', async () => {\n\t\tawait server.stop();\n\t\tprocess.exit(0);\n\t});\n\n\ttry {\n\t\tawait server.start();\n\t} catch (error) {\n\t\tlogger.error(`Failed to start MCP server: ${error.message}`);\n\t\tprocess.exit(1);\n\t}\n}\n\n// Start the server\nstartServer();\n"], ["/claude-task-master/src/ai-providers/ollama.js", "/**\n * ollama.js\n * AI provider implementation for Ollama models using the ollama-ai-provider package.\n */\n\nimport { createOllama } from 'ollama-ai-provider';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class OllamaAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Ollama';\n\t}\n\n\t/**\n\t * Override auth validation - Ollama doesn't require API keys\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(_params) {\n\t\t// Ollama runs locally and doesn't require API keys\n\t\t// No authentication validation needed\n\t}\n\n\t/**\n\t * Creates and returns an Ollama client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.baseURL] - Optional Ollama base URL (defaults to http://localhost:11434)\n\t * @returns {Function} Ollama client function\n\t * @throws {Error} If initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { baseURL } = params;\n\n\t\t\treturn createOllama({\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name for Ollama.\n\t * @returns {string} The environment variable name\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'OLLAMA_API_KEY';\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/cache-stats.js", "/**\n * cache-stats.js\n * Direct function implementation for retrieving cache statistics\n */\n\nimport { contextManager } from '../context-manager.js';\n\n/**\n * Get cache statistics for monitoring\n * @param {Object} args - Command arguments\n * @param {Object} log - Logger object\n * @returns {Object} - Cache statistics\n */\nexport async function getCacheStatsDirect(args, log) {\n\ttry {\n\t\tlog.info('Retrieving cache statistics');\n\t\tconst stats = contextManager.getStats();\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: stats\n\t\t};\n\t} catch (error) {\n\t\tlog.error(`Error getting cache stats: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CACHE_STATS_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error occurred'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/test-version-check.js", "import {\n\tdisplayUpgradeNotification,\n\tcompareVersions\n} from './scripts/modules/commands.js';\n\n// Simulate different version scenarios\nconsole.log('=== Simulating version check ===\\n');\n\n// 1. Current version is older than latest (should show update notice)\nconsole.log('Scenario 1: Current version older than latest');\ndisplayUpgradeNotification('0.9.30', '1.0.0');\n\n// 2. Current version same as latest (no update needed)\nconsole.log(\n\t'\\nScenario 2: Current version same as latest (this would not normally show a notice)'\n);\nconsole.log('Current: 1.0.0, Latest: 1.0.0');\nconsole.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));\nconsole.log(\n\t'Update needed:',\n\tcompareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No'\n);\n\n// 3. Current version newer than latest (e.g., development version, would not show notice)\nconsole.log(\n\t'\\nScenario 3: Current version newer than latest (this would not normally show a notice)'\n);\nconsole.log('Current: 1.1.0, Latest: 1.0.0');\nconsole.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));\nconsole.log(\n\t'Update needed:',\n\tcompareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No'\n);\n\nconsole.log('\\n=== Test complete ===');\n"], ["/claude-task-master/src/constants/paths.js", "/**\n * Path constants for Task Master application\n */\n\n// .taskmaster directory structure paths\nexport const TASKMASTER_DIR = '.taskmaster';\nexport const TASKMASTER_TASKS_DIR = '.taskmaster/tasks';\nexport const TASKMASTER_DOCS_DIR = '.taskmaster/docs';\nexport const TASKMASTER_REPORTS_DIR = '.taskmaster/reports';\nexport const TASKMASTER_TEMPLATES_DIR = '.taskmaster/templates';\n\n// Task Master configuration files\nexport const TASKMASTER_CONFIG_FILE = '.taskmaster/config.json';\nexport const TASKMASTER_STATE_FILE = '.taskmaster/state.json';\nexport const LEGACY_CONFIG_FILE = '.taskmasterconfig';\n\n// Task Master report files\nexport const COMPLEXITY_REPORT_FILE =\n\t'.taskmaster/reports/task-complexity-report.json';\nexport const LEGACY_COMPLEXITY_REPORT_FILE =\n\t'scripts/task-complexity-report.json';\n\n// Task Master PRD file paths\nexport const PRD_FILE = '.taskmaster/docs/prd.txt';\nexport const LEGACY_PRD_FILE = 'scripts/prd.txt';\n\n// Task Master template files\nexport const EXAMPLE_PRD_FILE = '.taskmaster/templates/example_prd.txt';\nexport const LEGACY_EXAMPLE_PRD_FILE = 'scripts/example_prd.txt';\n\n// Task Master task file paths\nexport const TASKMASTER_TASKS_FILE = '.taskmaster/tasks/tasks.json';\nexport const LEGACY_TASKS_FILE = 'tasks/tasks.json';\n\n// General project files (not Task Master specific but commonly used)\nexport const ENV_EXAMPLE_FILE = '.env.example';\nexport const GITIGNORE_FILE = '.gitignore';\n\n// Task file naming pattern\nexport const TASK_FILE_PREFIX = 'task_';\nexport const TASK_FILE_EXTENSION = '.txt';\n\n/**\n * Project markers used to identify a task-master project root\n * These files/directories indicate that a directory is a Task Master project\n */\nexport const PROJECT_MARKERS = [\n\t'.taskmaster', // New taskmaster directory\n\tLEGACY_CONFIG_FILE, // .taskmasterconfig\n\t'tasks.json', // Generic tasks file\n\tLEGACY_TASKS_FILE, // tasks/tasks.json (legacy location)\n\tTASKMASTER_TASKS_FILE, // .taskmaster/tasks/tasks.json (new location)\n\t'.git', // Git repository\n\t'.svn' // SVN repository\n];\n"], ["/claude-task-master/src/ai-providers/bedrock.js", "import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';\nimport { fromNodeProviderChain } from '@aws-sdk/credential-providers';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class BedrockAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Bedrock';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name for Bedrock.\n\t * Bedrock uses AWS credentials, so we return the AWS access key identifier.\n\t * @returns {string} The environment variable name\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'AWS_ACCESS_KEY_ID';\n\t}\n\n\t/**\n\t * Override auth validation - Bedrock uses AWS credentials instead of API keys\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {}\n\n\t/**\n\t * Creates and returns a Bedrock client instance.\n\t * See https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html\n\t * for AWS SDK environment variables and configuration options.\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst credentialProvider = fromNodeProviderChain();\n\n\t\t\treturn createAmazonBedrock({\n\t\t\t\tcredentialProvider\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/jest.config.js", "export default {\n\t// Use Node.js environment for testing\n\ttestEnvironment: 'node',\n\n\t// Automatically clear mock calls between every test\n\tclearMocks: true,\n\n\t// Indicates whether the coverage information should be collected while executing the test\n\tcollectCoverage: false,\n\n\t// The directory where Jest should output its coverage files\n\tcoverageDirectory: 'coverage',\n\n\t// A list of paths to directories that Jest should use to search for files in\n\troots: ['<rootDir>/tests'],\n\n\t// The glob patterns Jest uses to detect test files\n\ttestMatch: ['**/__tests__/**/*.js', '**/?(*.)+(spec|test).js'],\n\n\t// Transform files\n\ttransform: {},\n\n\t// Disable transformations for node_modules\n\ttransformIgnorePatterns: ['/node_modules/'],\n\n\t// Set moduleNameMapper for absolute paths\n\tmoduleNameMapper: {\n\t\t'^@/(.*)$': '<rootDir>/$1'\n\t},\n\n\t// Setup module aliases\n\tmoduleDirectories: ['node_modules', '<rootDir>'],\n\n\t// Configure test coverage thresholds\n\tcoverageThreshold: {\n\t\tglobal: {\n\t\t\tbranches: 80,\n\t\t\tfunctions: 80,\n\t\t\tlines: 80,\n\t\t\tstatements: 80\n\t\t}\n\t},\n\n\t// Generate coverage report in these formats\n\tcoverageReporters: ['text', 'lcov'],\n\n\t// Verbose output\n\tverbose: true,\n\n\t// Setup file\n\tsetupFilesAfterEnv: ['<rootDir>/tests/setup.js']\n};\n"], ["/claude-task-master/src/constants/profiles.js", "/**\n * @typedef {'amp' | 'claude' | 'cline' | 'codex' | 'cursor' | 'gemini' | 'kiro' | 'opencode' | 'roo' | 'trae' | 'windsurf' | 'vscode' | 'zed'} RulesProfile\n */\n\n/**\n * Available rule profiles for project initialization and rules command\n *\n * ⚠️ SINGLE SOURCE OF TRUTH: This is the authoritative list of all supported rule profiles.\n * This constant is used directly throughout the codebase (previously aliased as PROFILE_NAMES).\n *\n * @type {RulesProfile[]}\n * @description Defines possible rule profile sets:\n * - amp: Amp Code integration\n * - claude: Claude Code integration\n * - cline: Cline IDE rules\n * - codex: Codex integration\n * - cursor: Cursor IDE rules\n * - gemini: Gemini integration\n * - kiro: Kiro IDE rules\n * - opencode: OpenCode integration\n * - roo: Roo Code IDE rules\n * - trae: Trae IDE rules\n * - vscode: VS Code with GitHub Copilot integration\n * - windsurf: Windsurf IDE rules\n * - zed: Zed IDE rules\n *\n * To add a new rule profile:\n * 1. Add the profile name to this array\n * 2. Create a profile file in src/profiles/{profile}.js\n * 3. Export it as {profile}Profile in src/profiles/index.js\n */\nexport const RULE_PROFILES = [\n\t'amp',\n\t'claude',\n\t'cline',\n\t'codex',\n\t'cursor',\n\t'gemini',\n\t'kiro',\n\t'opencode',\n\t'roo',\n\t'trae',\n\t'vscode',\n\t'windsurf',\n\t'zed'\n];\n\n/**\n * Centralized enum for all supported Roo agent modes\n * @type {string[]}\n * @description Available Roo Code IDE modes for rule generation\n */\nexport const ROO_MODES = [\n\t'architect',\n\t'ask',\n\t'orchestrator',\n\t'code',\n\t'debug',\n\t'test'\n];\n\n/**\n * Check if a given rule profile is valid\n * @param {string} rulesProfile - The rule profile to check\n * @returns {boolean} True if the rule profile is valid, false otherwise\n */\nexport function isValidRulesProfile(rulesProfile) {\n\treturn RULE_PROFILES.includes(rulesProfile);\n}\n"], ["/claude-task-master/scripts/dev.js", "#!/usr/bin/env node\n\n/**\n * dev.js\n * Task Master CLI - AI-driven development task management\n *\n * This is the refactored entry point that uses the modular architecture.\n * It imports functionality from the modules directory and provides a CLI.\n */\n\nimport dotenv from 'dotenv';\ndotenv.config();\n\n// Add at the very beginning of the file\nif (process.env.DEBUG === '1') {\n\tconsole.error('DEBUG - dev.js received args:', process.argv.slice(2));\n}\n\nimport { runCLI } from './modules/commands.js';\n\n// Run the CLI with the process arguments\nrunCLI(process.argv);\n"], ["/claude-task-master/src/profiles/gemini.js", "// Gemini profile for rule-transformer\nimport { createProfile } from './base-profile.js';\n\n// Create and export gemini profile using the base factory\nexport const geminiProfile = createProfile({\n\tname: 'gemini',\n\tdisplayName: 'Gemini',\n\turl: 'codeassist.google',\n\tdocsUrl: 'github.com/google-gemini/gemini-cli',\n\tprofileDir: '.gemini', // Keep .gemini for settings.json\n\trulesDir: '.', // Root directory for GEMINI.md\n\tmcpConfigName: 'settings.json', // Override default 'mcp.json'\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': 'GEMINI.md'\n\t}\n});\n"], ["/claude-task-master/src/constants/providers.js", "/**\n * Provider validation constants\n * Defines which providers should be validated against the supported-models.json file\n */\n\n// Providers that have predefined model lists and should be validated\nexport const VALIDATED_PROVIDERS = [\n\t'anthropic',\n\t'openai',\n\t'google',\n\t'perplexity',\n\t'xai',\n\t'groq',\n\t'mistral'\n];\n\n// Custom providers object for easy named access\nexport const CUSTOM_PROVIDERS = {\n\tAZURE: 'azure',\n\tVERTEX: 'vertex',\n\tBEDROCK: 'bedrock',\n\tOPENROUTER: 'openrouter',\n\tOLLAMA: 'ollama',\n\tCLAUDE_CODE: 'claude-code',\n\tMCP: 'mcp',\n\tGEMINI_CLI: 'gemini-cli'\n};\n\n// Custom providers array (for backward compatibility and iteration)\nexport const CUSTOM_PROVIDERS_ARRAY = Object.values(CUSTOM_PROVIDERS);\n\n// All known providers (for reference)\nexport const ALL_PROVIDERS = [\n\t...VALIDATED_PROVIDERS,\n\t...CUSTOM_PROVIDERS_ARRAY\n];\n"], ["/claude-task-master/src/profiles/codex.js", "// Codex profile for rule-transformer\nimport { createProfile } from './base-profile.js';\n\n// Create and export codex profile using the base factory\nexport const codexProfile = createProfile({\n\tname: 'codex',\n\tdisplayName: 'Codex',\n\turl: 'codex.ai',\n\tdocsUrl: 'platform.openai.com/docs/codex',\n\tprofileDir: '.', // Root directory\n\trulesDir: '.', // No specific rules directory needed\n\tmcpConfig: false,\n\tmcpConfigName: null,\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': 'AGENTS.md'\n\t}\n});\n"], ["/claude-task-master/src/profiles/cline.js", "// Cline conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export cline profile using the base factory\nexport const clineProfile = createProfile({\n\tname: 'cline',\n\tdisplayName: 'Cline',\n\turl: 'cline.bot',\n\tdocsUrl: 'docs.cline.bot',\n\tprofileDir: '.clinerules',\n\trulesDir: '.clinerules',\n\tmcpConfig: false\n});\n"], ["/claude-task-master/src/constants/rules-actions.js", "/**\n * @typedef {'add' | 'remove'} RulesAction\n */\n\n/**\n * Individual rules action constants\n */\nexport const RULES_ACTIONS = {\n\tADD: 'add',\n\tREMOVE: 'remove'\n};\n\n/**\n * Special rules command (not a CRUD operation)\n */\nexport const RULES_SETUP_ACTION = 'setup';\n\n/**\n * Check if a given action is a valid rules action\n * @param {string} action - The action to check\n * @returns {boolean} True if the action is valid, false otherwise\n */\nexport function isValidRulesAction(action) {\n\treturn Object.values(RULES_ACTIONS).includes(action);\n}\n"], ["/claude-task-master/src/profiles/trae.js", "// Trae conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export trae profile using the base factory\nexport const traeProfile = createProfile({\n\tname: 'trae',\n\tdisplayName: 'Trae',\n\turl: 'trae.ai',\n\tdocsUrl: 'docs.trae.ai',\n\tmcpConfig: false\n});\n"], ["/claude-task-master/src/constants/commands.js", "/**\n * Command related constants\n * Defines which commands trigger AI processing\n */\n\n// Command names that trigger AI processing\nexport const AI_COMMAND_NAMES = [\n\t'add-task',\n\t'analyze-complexity',\n\t'expand-task',\n\t'parse-prd',\n\t'research',\n\t'research-save',\n\t'update-subtask',\n\t'update-task',\n\t'update-tasks'\n];\n"], ["/claude-task-master/scripts/modules/index.js", "/**\n * index.js\n * Main export point for all Task Master CLI modules\n */\n\n// Export all modules\nexport * from './ui.js';\nexport * from './utils.js';\nexport * from './commands.js';\nexport * from './task-manager.js';\nexport * from './prompt-manager.js';\n"], ["/claude-task-master/src/profiles/cursor.js", "// Cursor conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export cursor profile using the base factory\nexport const cursorProfile = createProfile({\n\tname: 'cursor',\n\tdisplayName: 'Cursor',\n\turl: 'cursor.so',\n\tdocsUrl: 'docs.cursor.com',\n\ttargetExtension: '.mdc', // Cursor keeps .mdc extension\n\tsupportsRulesSubdirectories: true\n});\n"], ["/claude-task-master/src/profiles/windsurf.js", "// Windsurf conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export windsurf profile using the base factory\nexport const windsurfProfile = createProfile({\n\tname: 'windsurf',\n\tdisplayName: 'Windsurf',\n\turl: 'windsurf.com',\n\tdocsUrl: 'docs.windsurf.com'\n});\n"], ["/claude-task-master/src/profiles/index.js", "// Profile exports for centralized importing\nexport { ampProfile } from './amp.js';\nexport { claudeProfile } from './claude.js';\nexport { clineProfile } from './cline.js';\nexport { codexProfile } from './codex.js';\nexport { cursorProfile } from './cursor.js';\nexport { geminiProfile } from './gemini.js';\nexport { kiroProfile } from './kiro.js';\nexport { opencodeProfile } from './opencode.js';\nexport { rooProfile } from './roo.js';\nexport { traeProfile } from './trae.js';\nexport { vscodeProfile } from './vscode.js';\nexport { windsurfProfile } from './windsurf.js';\nexport { zedProfile } from './zed.js';\n"], ["/claude-task-master/src/ai-providers/index.js", "/**\n * src/ai-providers/index.js\n * Central export point for all AI provider classes\n */\n\nexport { AnthropicAIProvider } from './anthropic.js';\nexport { PerplexityAIProvider } from './perplexity.js';\nexport { GoogleAIProvider } from './google.js';\nexport { OpenAIProvider } from './openai.js';\nexport { XAIProvider } from './xai.js';\nexport { GroqProvider } from './groq.js';\nexport { OpenRouterAIProvider } from './openrouter.js';\nexport { OllamaAIProvider } from './ollama.js';\nexport { BedrockAIProvider } from './bedrock.js';\nexport { AzureProvider } from './azure.js';\nexport { VertexAIProvider } from './google-vertex.js';\nexport { ClaudeCodeProvider } from './claude-code.js';\nexport { GeminiCliProvider } from './gemini-cli.js';\n"], ["/claude-task-master/test-tag-functions.js", ""]], "task_instance_info": {"created_time": "2025-08-21 00:05:44", "created_task_model": "DeepSeek-R1", "code_description": "This code segment functions to: Process a collection of identifiers to locate corresponding elements within a hierarchical structure, capture their current state attributes, apply state modifications through delegated operations, and maintain a record of all changes for subsequent reporting."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```javascript\n[TASK_BEGIN]\n\t\tfor (const taskId of taskIds) {\n\t\t\tconst task = findTaskById(data.tasks, taskId);\n\t\t\t\n\t\t\tif (!task) {\n\t\t\t\tthrow new Error(`Task ${taskId} not found in tag \"${tag}\"`);\n\t\t\t}\n\n\t\t\tif (task.status === newStatus) {\n\t\t\t\tlog('info', `Task ${taskId} already has status ${newStatus}. Skipping...`);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tconst oldStatus = task.status;\n\t\t\ttask.status = newStatus;\n\t\t\tupdatedTasks.push({ id: taskId, oldStatus, newStatus });\n\t\t\t\n\t\t\t// Handle any status-specific updates\n\t\t\tawait updateSingleTaskStatus(task, newStatus, options);\n\t\t}\n[TASK_END]\n```", "inference_time": "2025-08-21 00-06-01"}, "editdistance_info": {"edit_distance": 18.9055, "calculate_time": "2025-08-21 00:06:01", "true_code_clean": "for (const id of taskIds) {\n\t\t\tlet oldStatus = 'unknown';\n\t\t\tif (id.includes('.')) {\n\t\t\t\tconst [parentId, subtaskId] = id\n\t\t\t\t\t.split('.')\n\t\t\t\t\t.map((id) => parseInt(id, 10));\n\t\t\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\t\t\t\tif (parentTask?.subtasks) {\n\t\t\t\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\t\t\t\toldStatus = subtask?.status || 'pending';\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconst taskId = parseInt(id, 10);\n\t\t\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\t\t\t\toldStatus = task?.status || 'pending';\n\t\t\t}\n\t\t\tawait updateSingleTaskStatus(tasksPath, id, newStatus, data, !isMcpMode);\n\t\t\tupdatedTasks.push({ id, oldStatus, newStatus });\n\t\t}", "predict_code_clean": "for (const taskId of taskIds) {\n\t\t\tconst task = findTaskById(data.tasks, taskId);\n\t\t\tif (!task) {\n\t\t\t\tthrow new Error(`Task ${taskId} not found in tag \"${tag}\"`);\n\t\t\t}\n\t\t\tif (task.status === newStatus) {\n\t\t\t\tlog('info', `Task ${taskId} already has status ${newStatus}. Skipping...`);\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tconst oldStatus = task.status;\n\t\t\ttask.status = newStatus;\n\t\t\tupdatedTasks.push({ id: taskId, oldStatus, newStatus });\n\t\t\tawait updateSingleTaskStatus(task, newStatus, options);\n\t\t}"}} {"repo_name": "claude-task-master", "file_name": "/claude-task-master/mcp-server/src/core/direct-functions/list-tags.js", "inference_info": {"prefix_code": "/**\n * list-tags.js\n * Direct function implementation for listing all tags\n */\n\nimport { tags } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for listing all tags with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {boolean} [args.showMetadata=false] - Whether to include metadata in the output\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\n", "suffix_code": "\n", "middle_code": "export async function listTagsDirect(args, log, context = {}) {\n\tconst { tasksJsonPath, showMetadata = false, projectRoot } = args;\n\tconst { session } = context;\n\tenableSilentMode();\n\tconst mcpLog = createLogWrapper(log);\n\ttry {\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('listTagsDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\tlog.info('Listing all tags');\n\t\tconst options = {\n\t\t\tshowMetadata\n\t\t};\n\t\tconst result = await tags(\n\t\t\ttasksJsonPath,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' \n\t\t);\n\t\tconst tagsSummary = result.tags.map((tag) => {\n\t\t\tconst tasks = tag.tasks || [];\n\t\t\tconst statusBreakdown = tasks.reduce((acc, task) => {\n\t\t\t\tconst status = task.status || 'pending';\n\t\t\t\tacc[status] = (acc[status] || 0) + 1;\n\t\t\t\treturn acc;\n\t\t\t}, {});\n\t\t\tconst subtaskCounts = tasks.reduce(\n\t\t\t\t(acc, task) => {\n\t\t\t\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\t\t\t\tacc.totalSubtasks += task.subtasks.length;\n\t\t\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\t\t\tconst subStatus = subtask.status || 'pending';\n\t\t\t\t\t\t\tacc.subtasksByStatus[subStatus] =\n\t\t\t\t\t\t\t\t(acc.subtasksByStatus[subStatus] || 0) + 1;\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\treturn acc;\n\t\t\t\t},\n\t\t\t\t{ totalSubtasks: 0, subtasksByStatus: {} }\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tname: tag.name,\n\t\t\t\tisCurrent: tag.isCurrent,\n\t\t\t\ttaskCount: tasks.length,\n\t\t\t\tcompletedTasks: tag.completedTasks,\n\t\t\t\tstatusBreakdown,\n\t\t\t\tsubtaskCounts,\n\t\t\t\tcreated: tag.created,\n\t\t\t\tdescription: tag.description\n\t\t\t};\n\t\t});\n\t\tdisableSilentMode();\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttags: tagsSummary,\n\t\t\t\tcurrentTag: result.currentTag,\n\t\t\t\ttotalTags: result.totalTags,\n\t\t\t\tmessage: `Found ${result.totalTags} tag(s)`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\tdisableSilentMode();\n\t\tlog.error(`Error in listTagsDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'LIST_TAGS_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}", "code_description": null, "fill_type": "BLOCK_TYPE", "language_type": "javascript", "sub_task_type": "import"}, "context_code": [["/claude-task-master/mcp-server/src/core/direct-functions/create-tag-from-branch.js", "/**\n * create-tag-from-branch.js\n * Direct function implementation for creating tags from git branches\n */\n\nimport { createTagFromBranch } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tgetCurrentBranch,\n\tisGitRepository\n} from '../../../../scripts/modules/utils/git-utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for creating tags from git branches with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.branchName] - Git branch name (optional, uses current branch if not provided)\n * @param {boolean} [args.copyFromCurrent] - Copy tasks from current tag\n * @param {string} [args.copyFromTag] - Copy tasks from specific tag\n * @param {string} [args.description] - Custom description for the tag\n * @param {boolean} [args.autoSwitch] - Automatically switch to the new tag\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function createTagFromBranchDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst {\n\t\ttasksJsonPath,\n\t\tbranchName,\n\t\tcopyFromCurrent,\n\t\tcopyFromTag,\n\t\tdescription,\n\t\tautoSwitch,\n\t\tprojectRoot\n\t} = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('createTagFromBranchDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check if projectRoot was provided\n\t\tif (!projectRoot) {\n\t\t\tlog.error('createTagFromBranchDirect called without projectRoot');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'projectRoot is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check if we're in a git repository\n\t\tif (!(await isGitRepository(projectRoot))) {\n\t\t\tlog.error('Not in a git repository');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'NOT_GIT_REPOSITORY',\n\t\t\t\t\tmessage: 'Not in a git repository. Cannot create tag from branch.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Determine branch name\n\t\tlet targetBranch = branchName;\n\t\tif (!targetBranch) {\n\t\t\ttargetBranch = await getCurrentBranch(projectRoot);\n\t\t\tif (!targetBranch) {\n\t\t\t\tlog.error('Could not determine current git branch');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'NO_CURRENT_BRANCH',\n\t\t\t\t\t\tmessage: 'Could not determine current git branch'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t}\n\n\t\tlog.info(`Creating tag from git branch: ${targetBranch}`);\n\n\t\t// Prepare options\n\t\tconst options = {\n\t\t\tcopyFromCurrent: copyFromCurrent || false,\n\t\t\tcopyFromTag,\n\t\t\tdescription:\n\t\t\t\tdescription || `Tag created from git branch \"${targetBranch}\"`,\n\t\t\tautoSwitch: autoSwitch || false\n\t\t};\n\n\t\t// Call the createTagFromBranch function\n\t\tconst result = await createTagFromBranch(\n\t\t\ttasksJsonPath,\n\t\t\ttargetBranch,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tbranchName: result.branchName,\n\t\t\t\ttagName: result.tagName,\n\t\t\t\tcreated: result.created,\n\t\t\t\tmappingUpdated: result.mappingUpdated,\n\t\t\t\tautoSwitched: result.autoSwitched,\n\t\t\t\tmessage: `Successfully created tag \"${result.tagName}\" from branch \"${result.branchName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in createTagFromBranchDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'CREATE_TAG_FROM_BRANCH_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-tag.js", "/**\n * add-tag.js\n * Direct function implementation for creating a new tag\n */\n\nimport {\n\tcreateTag,\n\tcreateTagFromBranch\n} from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for creating a new tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.name - Name of the new tag to create\n * @param {boolean} [args.copyFromCurrent=false] - Whether to copy tasks from current tag\n * @param {string} [args.copyFromTag] - Specific tag to copy tasks from\n * @param {boolean} [args.fromBranch=false] - Create tag name from current git branch\n * @param {string} [args.description] - Optional description for the tag\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function addTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst {\n\t\ttasksJsonPath,\n\t\tname,\n\t\tcopyFromCurrent = false,\n\t\tcopyFromTag,\n\t\tfromBranch = false,\n\t\tdescription,\n\t\tprojectRoot\n\t} = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Handle --from-branch option\n\t\tif (fromBranch) {\n\t\t\tlog.info('Creating tag from current git branch');\n\n\t\t\t// Import git utilities\n\t\t\tconst gitUtils = await import(\n\t\t\t\t'../../../../scripts/modules/utils/git-utils.js'\n\t\t\t);\n\n\t\t\t// Check if we're in a git repository\n\t\t\tif (!(await gitUtils.isGitRepository(projectRoot))) {\n\t\t\t\tlog.error('Not in a git repository');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'NOT_GIT_REPO',\n\t\t\t\t\t\tmessage: 'Not in a git repository. Cannot use fromBranch option.'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Get current git branch\n\t\t\tconst currentBranch = await gitUtils.getCurrentBranch(projectRoot);\n\t\t\tif (!currentBranch) {\n\t\t\t\tlog.error('Could not determine current git branch');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'NO_CURRENT_BRANCH',\n\t\t\t\t\t\tmessage: 'Could not determine current git branch.'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Prepare options for branch-based tag creation\n\t\t\tconst branchOptions = {\n\t\t\t\tcopyFromCurrent,\n\t\t\t\tcopyFromTag,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created from git branch \"${currentBranch}\"`\n\t\t\t};\n\n\t\t\t// Call the createTagFromBranch function\n\t\t\tconst result = await createTagFromBranch(\n\t\t\t\ttasksJsonPath,\n\t\t\t\tcurrentBranch,\n\t\t\t\tbranchOptions,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot\n\t\t\t\t},\n\t\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tbranchName: result.branchName,\n\t\t\t\t\ttagName: result.tagName,\n\t\t\t\t\tcreated: result.created,\n\t\t\t\t\tmappingUpdated: result.mappingUpdated,\n\t\t\t\t\tmessage: `Successfully created tag \"${result.tagName}\" from git branch \"${result.branchName}\"`\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Check required parameters for regular tag creation\n\t\t\tif (!name || typeof name !== 'string') {\n\t\t\t\tlog.error('Missing required parameter: name');\n\t\t\t\tdisableSilentMode();\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\t\tmessage: 'Tag name is required and must be a string'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(`Creating new tag: ${name}`);\n\n\t\t\t// Prepare options\n\t\t\tconst options = {\n\t\t\t\tcopyFromCurrent,\n\t\t\t\tcopyFromTag,\n\t\t\t\tdescription\n\t\t\t};\n\n\t\t\t// Call the createTag function\n\t\t\tconst result = await createTag(\n\t\t\t\ttasksJsonPath,\n\t\t\t\tname,\n\t\t\t\toptions,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot\n\t\t\t\t},\n\t\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\ttagName: result.tagName,\n\t\t\t\t\tcreated: result.created,\n\t\t\t\t\ttasksCopied: result.tasksCopied,\n\t\t\t\t\tsourceTag: result.sourceTag,\n\t\t\t\t\tdescription: result.description,\n\t\t\t\t\tmessage: `Successfully created tag \"${result.tagName}\"`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'ADD_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/expand-task.js", "/**\n * expand-task.js\n * Direct function implementation for expanding a task into subtasks\n */\n\nimport expandTask from '../../../../scripts/modules/task-manager/expand-task.js';\nimport {\n\treadJSON,\n\twriteJSON,\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport path from 'path';\nimport fs from 'fs';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for expanding a task into subtasks with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - The ID of the task to expand.\n * @param {number|string} [args.num] - Number of subtasks to generate.\n * @param {boolean} [args.research] - Enable research role for subtask generation.\n * @param {string} [args.prompt] - Additional context to guide subtask generation.\n * @param {boolean} [args.force] - Force expansion even if subtasks exist.\n * @param {string} [args.projectRoot] - Project root directory.\n * @param {string} [args.tag] - Tag for the task\n * @param {Object} log - Logger object\n * @param {Object} context - Context object containing session\n * @param {Object} [context.session] - MCP Session object\n * @returns {Promise<Object>} - Task expansion result { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function expandTaskDirect(args, log, context = {}) {\n\tconst { session } = context; // Extract session\n\t// Destructure expected args, including projectRoot\n\tconst {\n\t\ttasksJsonPath,\n\t\tid,\n\t\tnum,\n\t\tresearch,\n\t\tprompt,\n\t\tforce,\n\t\tprojectRoot,\n\t\ttag,\n\t\tcomplexityReportPath\n\t} = args;\n\n\t// Log session root data for debugging\n\tlog.info(\n\t\t`Session data in expandTaskDirect: ${JSON.stringify({\n\t\t\thasSession: !!session,\n\t\t\tsessionKeys: session ? Object.keys(session) : [],\n\t\t\troots: session?.roots,\n\t\t\trootsStr: JSON.stringify(session?.roots)\n\t\t})}`\n\t);\n\n\t// Check if tasksJsonPath was provided\n\tif (!tasksJsonPath) {\n\t\tlog.error('expandTaskDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Use provided path\n\tconst tasksPath = tasksJsonPath;\n\n\tlog.info(`[expandTaskDirect] Using tasksPath: ${tasksPath}`);\n\n\t// Validate task ID\n\tconst taskId = id ? parseInt(id, 10) : null;\n\tif (!taskId) {\n\t\tlog.error('Task ID is required');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\tmessage: 'Task ID is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Process other parameters\n\tconst numSubtasks = num ? parseInt(num, 10) : undefined;\n\tconst useResearch = research === true;\n\tconst additionalContext = prompt || '';\n\tconst forceFlag = force === true;\n\n\ttry {\n\t\tlog.info(\n\t\t\t`[expandTaskDirect] Expanding task ${taskId} into ${numSubtasks || 'default'} subtasks. Research: ${useResearch}, Force: ${forceFlag}`\n\t\t);\n\n\t\t// Read tasks data\n\t\tlog.info(`[expandTaskDirect] Attempting to read JSON from: ${tasksPath}`);\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tlog.info(\n\t\t\t`[expandTaskDirect] Result of readJSON: ${data ? 'Data read successfully' : 'readJSON returned null or undefined'}`\n\t\t);\n\n\t\tif (!data || !data.tasks) {\n\t\t\tlog.error(\n\t\t\t\t`[expandTaskDirect] readJSON failed or returned invalid data for path: ${tasksPath}`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASKS_FILE',\n\t\t\t\t\tmessage: `No valid tasks found in ${tasksPath}. readJSON returned: ${JSON.stringify(data)}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Find the specific task\n\t\tlog.info(`[expandTaskDirect] Searching for task ID ${taskId} in data`);\n\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\t\tlog.info(`[expandTaskDirect] Task found: ${task ? 'Yes' : 'No'}`);\n\n\t\tif (!task) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'TASK_NOT_FOUND',\n\t\t\t\t\tmessage: `Task with ID ${taskId} not found`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check if task is completed\n\t\tif (task.status === 'done' || task.status === 'completed') {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'TASK_COMPLETED',\n\t\t\t\t\tmessage: `Task ${taskId} is already marked as ${task.status} and cannot be expanded`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check for existing subtasks and force flag\n\t\tconst hasExistingSubtasks = task.subtasks && task.subtasks.length > 0;\n\t\tif (hasExistingSubtasks && !forceFlag) {\n\t\t\tlog.info(\n\t\t\t\t`Task ${taskId} already has ${task.subtasks.length} subtasks. Use --force to overwrite.`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Task ${taskId} already has subtasks. Expansion skipped.`,\n\t\t\t\t\ttask,\n\t\t\t\t\tsubtasksAdded: 0,\n\t\t\t\t\thasExistingSubtasks\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// If force flag is set, clear existing subtasks\n\t\tif (hasExistingSubtasks && forceFlag) {\n\t\t\tlog.info(\n\t\t\t\t`Force flag set. Clearing existing subtasks for task ${taskId}.`\n\t\t\t);\n\t\t\ttask.subtasks = [];\n\t\t}\n\n\t\t// Keep a copy of the task before modification\n\t\tconst originalTask = JSON.parse(JSON.stringify(task));\n\n\t\t// Tracking subtasks count before expansion\n\t\tconst subtasksCountBefore = task.subtasks ? task.subtasks.length : 0;\n\n\t\t// Directly modify the data instead of calling the CLI function\n\t\tif (!task.subtasks) {\n\t\t\ttask.subtasks = [];\n\t\t}\n\n\t\t// Save tasks.json with potentially empty subtasks array and proper context\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Create logger wrapper using the utility\n\t\tconst mcpLog = createLogWrapper(log);\n\n\t\tlet wasSilent; // Declare wasSilent outside the try block\n\t\t// Process the request\n\t\ttry {\n\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\twasSilent = isSilentMode(); // Assign inside the try block\n\t\t\tif (!wasSilent) enableSilentMode();\n\n\t\t\t// Call the core expandTask function with the wrapped logger and projectRoot\n\t\t\tconst coreResult = await expandTask(\n\t\t\t\ttasksPath,\n\t\t\t\ttaskId,\n\t\t\t\tnumSubtasks,\n\t\t\t\tuseResearch,\n\t\t\t\tadditionalContext,\n\t\t\t\t{\n\t\t\t\t\tcomplexityReportPath,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tsession,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcommandName: 'expand-task',\n\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\ttag\n\t\t\t\t},\n\t\t\t\tforceFlag\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tif (!wasSilent && isSilentMode()) disableSilentMode();\n\n\t\t\t// Read the updated data\n\t\t\tconst updatedData = readJSON(tasksPath, projectRoot);\n\t\t\tconst updatedTask = updatedData.tasks.find((t) => t.id === taskId);\n\n\t\t\t// Calculate how many subtasks were added\n\t\t\tconst subtasksAdded = updatedTask.subtasks\n\t\t\t\t? updatedTask.subtasks.length - subtasksCountBefore\n\t\t\t\t: 0;\n\n\t\t\t// Return the result, including telemetryData\n\t\t\tlog.info(\n\t\t\t\t`Successfully expanded task ${taskId} with ${subtasksAdded} new subtasks`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\ttask: coreResult.task,\n\t\t\t\t\tsubtasksAdded,\n\t\t\t\t\thasExistingSubtasks,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tif (!wasSilent && isSilentMode()) disableSilentMode();\n\n\t\t\tlog.error(`Error expanding task: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage: error.message || 'Failed to expand task'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlog.error(`Error expanding task: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message || 'Failed to expand task'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/delete-tag.js", "/**\n * delete-tag.js\n * Direct function implementation for deleting a tag\n */\n\nimport { deleteTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for deleting a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.name - Name of the tag to delete\n * @param {boolean} [args.yes=false] - Skip confirmation prompts\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function deleteTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, name, yes = false, projectRoot } = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('deleteTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!name || typeof name !== 'string') {\n\t\t\tlog.error('Missing required parameter: name');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Deleting tag: ${name}`);\n\n\t\t// Prepare options\n\t\tconst options = {\n\t\t\tyes // For MCP, we always skip confirmation prompts\n\t\t};\n\n\t\t// Call the deleteTag function\n\t\tconst result = await deleteTag(\n\t\t\ttasksJsonPath,\n\t\t\tname,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttagName: result.tagName,\n\t\t\t\tdeleted: result.deleted,\n\t\t\t\ttasksDeleted: result.tasksDeleted,\n\t\t\t\twasCurrentTag: result.wasCurrentTag,\n\t\t\t\tswitchedToMaster: result.switchedToMaster,\n\t\t\t\tmessage: `Successfully deleted tag \"${result.tagName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in deleteTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'DELETE_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/use-tag.js", "/**\n * use-tag.js\n * Direct function implementation for switching to a tag\n */\n\nimport { useTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for switching to a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.name - Name of the tag to switch to\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function useTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, name, projectRoot } = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('useTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!name || typeof name !== 'string') {\n\t\t\tlog.error('Missing required parameter: name');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Switching to tag: ${name}`);\n\n\t\t// Call the useTag function\n\t\tconst result = await useTag(\n\t\t\ttasksJsonPath,\n\t\t\tname,\n\t\t\t{}, // options (empty for now)\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttagName: result.currentTag,\n\t\t\t\tswitched: result.switched,\n\t\t\t\tpreviousTag: result.previousTag,\n\t\t\t\ttaskCount: result.taskCount,\n\t\t\t\tmessage: `Successfully switched to tag \"${result.currentTag}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in useTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'USE_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/rename-tag.js", "/**\n * rename-tag.js\n * Direct function implementation for renaming a tag\n */\n\nimport { renameTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for renaming a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.oldName - Current name of the tag to rename\n * @param {string} args.newName - New name for the tag\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function renameTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, oldName, newName, projectRoot } = args;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('renameTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!oldName || typeof oldName !== 'string') {\n\t\t\tlog.error('Missing required parameter: oldName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Old tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!newName || typeof newName !== 'string') {\n\t\t\tlog.error('Missing required parameter: newName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'New tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Renaming tag from \"${oldName}\" to \"${newName}\"`);\n\n\t\t// Call the renameTag function\n\t\tconst result = await renameTag(\n\t\t\ttasksJsonPath,\n\t\t\toldName,\n\t\t\tnewName,\n\t\t\t{}, // options (empty for now)\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\toldName: result.oldName,\n\t\t\t\tnewName: result.newName,\n\t\t\t\trenamed: result.renamed,\n\t\t\t\ttaskCount: result.taskCount,\n\t\t\t\twasCurrentTag: result.wasCurrentTag,\n\t\t\t\tmessage: `Successfully renamed tag from \"${result.oldName}\" to \"${result.newName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in renameTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'RENAME_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/copy-tag.js", "/**\n * copy-tag.js\n * Direct function implementation for copying a tag\n */\n\nimport { copyTag } from '../../../../scripts/modules/task-manager/tag-management.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for copying a tag with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.sourceName - Name of the source tag to copy from\n * @param {string} args.targetName - Name of the new tag to create\n * @param {string} [args.description] - Optional description for the new tag\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {string} [args.projectRoot] - Project root path\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function copyTagDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, sourceName, targetName, description, projectRoot } =\n\t\targs;\n\tconst { session } = context;\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('copyTagDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters\n\t\tif (!sourceName || typeof sourceName !== 'string') {\n\t\t\tlog.error('Missing required parameter: sourceName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Source tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!targetName || typeof targetName !== 'string') {\n\t\t\tlog.error('Missing required parameter: targetName');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage: 'Target tag name is required and must be a string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(`Copying tag from \"${sourceName}\" to \"${targetName}\"`);\n\n\t\t// Prepare options\n\t\tconst options = {\n\t\t\tdescription\n\t\t};\n\n\t\t// Call the copyTag function\n\t\tconst result = await copyTag(\n\t\t\ttasksJsonPath,\n\t\t\tsourceName,\n\t\t\ttargetName,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' // outputFormat - use 'json' to suppress CLI UI\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tsourceName: result.sourceName,\n\t\t\t\ttargetName: result.targetName,\n\t\t\t\tcopied: result.copied,\n\t\t\t\ttasksCopied: result.tasksCopied,\n\t\t\t\tdescription: result.description,\n\t\t\t\tmessage: `Successfully copied tag from \"${result.sourceName}\" to \"${result.targetName}\"`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in copyTagDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'COPY_TAG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-task.js", "/**\n * add-task.js\n * Direct function implementation for adding a new task\n */\n\nimport { addTask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for adding a new task with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} [args.prompt] - Description of the task to add (required if not using manual fields)\n * @param {string} [args.title] - Task title (for manual task creation)\n * @param {string} [args.description] - Task description (for manual task creation)\n * @param {string} [args.details] - Implementation details (for manual task creation)\n * @param {string} [args.testStrategy] - Test strategy (for manual task creation)\n * @param {string} [args.dependencies] - Comma-separated list of task IDs this task depends on\n * @param {string} [args.priority='medium'] - Task priority (high, medium, low)\n * @param {string} [args.tasksJsonPath] - Path to the tasks.json file (resolved by tool)\n * @param {boolean} [args.research=false] - Whether to use research capabilities for task creation\n * @param {string} [args.projectRoot] - Project root path\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function addTaskDirect(args, log, context = {}) {\n\t// Destructure expected args (including research and projectRoot)\n\tconst {\n\t\ttasksJsonPath,\n\t\tprompt,\n\t\tdependencies,\n\t\tpriority,\n\t\tresearch,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\tconst { session } = context; // Destructure session from context\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addTaskDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Check if this is manual task creation or AI-driven task creation\n\t\tconst isManualCreation = args.title && args.description;\n\n\t\t// Check required parameters\n\t\tif (!args.prompt && !isManualCreation) {\n\t\t\tlog.error(\n\t\t\t\t'Missing required parameters: either prompt or title+description must be provided'\n\t\t\t);\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Either the prompt parameter or both title and description parameters are required for adding a task'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Extract and prepare parameters\n\t\tconst taskDependencies = Array.isArray(dependencies)\n\t\t\t? dependencies // Already an array if passed directly\n\t\t\t: dependencies // Check if dependencies exist and are a string\n\t\t\t\t? String(dependencies)\n\t\t\t\t\t\t.split(',')\n\t\t\t\t\t\t.map((id) => parseInt(id.trim(), 10)) // Split, trim, and parse\n\t\t\t\t: []; // Default to empty array if null/undefined\n\t\tconst taskPriority = priority || 'medium'; // Default priority\n\n\t\tlet manualTaskData = null;\n\t\tlet newTaskId;\n\t\tlet telemetryData;\n\t\tlet tagInfo;\n\n\t\tif (isManualCreation) {\n\t\t\t// Create manual task data object\n\t\t\tmanualTaskData = {\n\t\t\t\ttitle: args.title,\n\t\t\t\tdescription: args.description,\n\t\t\t\tdetails: args.details || '',\n\t\t\t\ttestStrategy: args.testStrategy || ''\n\t\t\t};\n\n\t\t\tlog.info(\n\t\t\t\t`Adding new task manually with title: \"${args.title}\", dependencies: [${taskDependencies.join(', ')}], priority: ${priority}`\n\t\t\t);\n\n\t\t\t// Call the addTask function with manual task data\n\t\t\tconst result = await addTask(\n\t\t\t\ttasksPath,\n\t\t\t\tnull, // prompt is null for manual creation\n\t\t\t\ttaskDependencies,\n\t\t\t\ttaskPriority,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcommandName: 'add-task',\n\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\ttag\n\t\t\t\t},\n\t\t\t\t'json', // outputFormat\n\t\t\t\tmanualTaskData, // Pass the manual task data\n\t\t\t\tfalse // research flag is false for manual creation\n\t\t\t);\n\t\t\tnewTaskId = result.newTaskId;\n\t\t\ttelemetryData = result.telemetryData;\n\t\t\ttagInfo = result.tagInfo;\n\t\t} else {\n\t\t\t// AI-driven task creation\n\t\t\tlog.info(\n\t\t\t\t`Adding new task with prompt: \"${prompt}\", dependencies: [${taskDependencies.join(', ')}], priority: ${taskPriority}, research: ${research}`\n\t\t\t);\n\n\t\t\t// Call the addTask function, passing the research flag\n\t\t\tconst result = await addTask(\n\t\t\t\ttasksPath,\n\t\t\t\tprompt, // Use the prompt for AI creation\n\t\t\t\ttaskDependencies,\n\t\t\t\ttaskPriority,\n\t\t\t\t{\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcommandName: 'add-task',\n\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\ttag\n\t\t\t\t},\n\t\t\t\t'json', // outputFormat\n\t\t\t\tnull, // manualTaskData is null for AI creation\n\t\t\t\tresearch // Pass the research flag\n\t\t\t);\n\t\t\tnewTaskId = result.newTaskId;\n\t\t\ttelemetryData = result.telemetryData;\n\t\t\ttagInfo = result.tagInfo;\n\t\t}\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttaskId: newTaskId,\n\t\t\t\tmessage: `Successfully added new task #${newTaskId}`,\n\t\t\t\ttelemetryData: telemetryData,\n\t\t\t\ttagInfo: tagInfo\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addTaskDirect: ${error.message}`);\n\t\t// Add specific error code checks if needed\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'ADD_TASK_ERROR', // Use error code if available\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/research.js", "/**\n * research.js\n * Direct function implementation for AI-powered research queries\n */\n\nimport path from 'path';\nimport { performResearch } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for performing AI-powered research with project context.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.query - Research query/prompt (required)\n * @param {string} [args.taskIds] - Comma-separated list of task/subtask IDs for context\n * @param {string} [args.filePaths] - Comma-separated list of file paths for context\n * @param {string} [args.customContext] - Additional custom context text\n * @param {boolean} [args.includeProjectTree=false] - Include project file tree in context\n * @param {string} [args.detailLevel='medium'] - Detail level: 'low', 'medium', 'high'\n * @param {string} [args.saveTo] - Automatically save to task/subtask ID (e.g., \"15\" or \"15.2\")\n * @param {boolean} [args.saveToFile=false] - Save research results to .taskmaster/docs/research/ directory\n * @param {string} [args.projectRoot] - Project root path\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function researchDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst {\n\t\tquery,\n\t\ttaskIds,\n\t\tfilePaths,\n\t\tcustomContext,\n\t\tincludeProjectTree = false,\n\t\tdetailLevel = 'medium',\n\t\tsaveTo,\n\t\tsaveToFile = false,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\tconst { session } = context; // Destructure session from context\n\n\t// Enable silent mode to prevent console logs from interfering with JSON response\n\tenableSilentMode();\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\ttry {\n\t\t// Check required parameters\n\t\tif (!query || typeof query !== 'string' || query.trim().length === 0) {\n\t\t\tlog.error('Missing or invalid required parameter: query');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_PARAMETER',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'The query parameter is required and must be a non-empty string'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Parse comma-separated task IDs if provided\n\t\tconst parsedTaskIds = taskIds\n\t\t\t? taskIds\n\t\t\t\t\t.split(',')\n\t\t\t\t\t.map((id) => id.trim())\n\t\t\t\t\t.filter((id) => id.length > 0)\n\t\t\t: [];\n\n\t\t// Parse comma-separated file paths if provided\n\t\tconst parsedFilePaths = filePaths\n\t\t\t? filePaths\n\t\t\t\t\t.split(',')\n\t\t\t\t\t.map((path) => path.trim())\n\t\t\t\t\t.filter((path) => path.length > 0)\n\t\t\t: [];\n\n\t\t// Validate detail level\n\t\tconst validDetailLevels = ['low', 'medium', 'high'];\n\t\tif (!validDetailLevels.includes(detailLevel)) {\n\t\t\tlog.error(`Invalid detail level: ${detailLevel}`);\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_PARAMETER',\n\t\t\t\t\tmessage: `Detail level must be one of: ${validDetailLevels.join(', ')}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlog.info(\n\t\t\t`Performing research query: \"${query.substring(0, 100)}${query.length > 100 ? '...' : ''}\", ` +\n\t\t\t\t`taskIds: [${parsedTaskIds.join(', ')}], ` +\n\t\t\t\t`filePaths: [${parsedFilePaths.join(', ')}], ` +\n\t\t\t\t`detailLevel: ${detailLevel}, ` +\n\t\t\t\t`includeProjectTree: ${includeProjectTree}, ` +\n\t\t\t\t`projectRoot: ${projectRoot}`\n\t\t);\n\n\t\t// Prepare options for the research function\n\t\tconst researchOptions = {\n\t\t\ttaskIds: parsedTaskIds,\n\t\t\tfilePaths: parsedFilePaths,\n\t\t\tcustomContext: customContext || '',\n\t\t\tincludeProjectTree,\n\t\t\tdetailLevel,\n\t\t\tprojectRoot,\n\t\t\ttag,\n\t\t\tsaveToFile\n\t\t};\n\n\t\t// Prepare context for the research function\n\t\tconst researchContext = {\n\t\t\tsession,\n\t\t\tmcpLog,\n\t\t\tcommandName: 'research',\n\t\t\toutputType: 'mcp'\n\t\t};\n\n\t\t// Call the performResearch function\n\t\tconst result = await performResearch(\n\t\t\tquery.trim(),\n\t\t\tresearchOptions,\n\t\t\tresearchContext,\n\t\t\t'json', // outputFormat - use 'json' to suppress CLI UI\n\t\t\tfalse // allowFollowUp - disable for MCP calls\n\t\t);\n\n\t\t// Auto-save to task/subtask if requested\n\t\tif (saveTo) {\n\t\t\ttry {\n\t\t\t\tconst isSubtask = saveTo.includes('.');\n\n\t\t\t\t// Format research content for saving\n\t\t\t\tconst researchContent = `## Research Query: ${query.trim()}\n\n**Detail Level:** ${result.detailLevel}\n**Context Size:** ${result.contextSize} characters\n**Timestamp:** ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}\n\n### Results\n\n${result.result}`;\n\n\t\t\t\tif (isSubtask) {\n\t\t\t\t\t// Save to subtask\n\t\t\t\t\tconst { updateSubtaskById } = await import(\n\t\t\t\t\t\t'../../../../scripts/modules/task-manager/update-subtask-by-id.js'\n\t\t\t\t\t);\n\n\t\t\t\t\tconst tasksPath = path.join(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t'.taskmaster',\n\t\t\t\t\t\t'tasks',\n\t\t\t\t\t\t'tasks.json'\n\t\t\t\t\t);\n\t\t\t\t\tawait updateSubtaskById(\n\t\t\t\t\t\ttasksPath,\n\t\t\t\t\t\tsaveTo,\n\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tsession,\n\t\t\t\t\t\t\tmcpLog,\n\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t\ttag\n\t\t\t\t\t\t},\n\t\t\t\t\t\t'json'\n\t\t\t\t\t);\n\n\t\t\t\t\tlog.info(`Research saved to subtask ${saveTo}`);\n\t\t\t\t} else {\n\t\t\t\t\t// Save to task\n\t\t\t\t\tconst updateTaskById = (\n\t\t\t\t\t\tawait import(\n\t\t\t\t\t\t\t'../../../../scripts/modules/task-manager/update-task-by-id.js'\n\t\t\t\t\t\t)\n\t\t\t\t\t).default;\n\n\t\t\t\t\tconst taskIdNum = parseInt(saveTo, 10);\n\t\t\t\t\tconst tasksPath = path.join(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t'.taskmaster',\n\t\t\t\t\t\t'tasks',\n\t\t\t\t\t\t'tasks.json'\n\t\t\t\t\t);\n\t\t\t\t\tawait updateTaskById(\n\t\t\t\t\t\ttasksPath,\n\t\t\t\t\t\ttaskIdNum,\n\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tsession,\n\t\t\t\t\t\t\tmcpLog,\n\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\toutputType: 'mcp',\n\t\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\t\ttag\n\t\t\t\t\t\t},\n\t\t\t\t\t\t'json',\n\t\t\t\t\t\ttrue // appendMode = true\n\t\t\t\t\t);\n\n\t\t\t\t\tlog.info(`Research saved to task ${saveTo}`);\n\t\t\t\t}\n\t\t\t} catch (saveError) {\n\t\t\t\tlog.warn(`Error saving research to task/subtask: ${saveError.message}`);\n\t\t\t}\n\t\t}\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tquery: result.query,\n\t\t\t\tresult: result.result,\n\t\t\t\tcontextSize: result.contextSize,\n\t\t\t\tcontextTokens: result.contextTokens,\n\t\t\t\ttokenBreakdown: result.tokenBreakdown,\n\t\t\t\tsystemPromptTokens: result.systemPromptTokens,\n\t\t\t\tuserPromptTokens: result.userPromptTokens,\n\t\t\t\ttotalInputTokens: result.totalInputTokens,\n\t\t\t\tdetailLevel: result.detailLevel,\n\t\t\t\ttelemetryData: result.telemetryData,\n\t\t\t\ttagInfo: result.tagInfo,\n\t\t\t\tsavedFilePath: result.savedFilePath\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in researchDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'RESEARCH_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/list-tasks.js", "/**\n * list-tasks.js\n * Direct function implementation for listing tasks\n */\n\nimport { listTasks } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for listTasks with error handling and caching.\n *\n * @param {Object} args - Command arguments (now expecting tasksJsonPath explicitly).\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.reportPath - Path to the report file.\n * @param {string} args.status - Status of the task.\n * @param {boolean} args.withSubtasks - Whether to include subtasks.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @returns {Promise<Object>} - Task list result { success: boolean, data?: any, error?: { code: string, message: string } }.\n */\nexport async function listTasksDirect(args, log, context = {}) {\n\t// Destructure the explicit tasksJsonPath from args\n\tconst { tasksJsonPath, reportPath, status, withSubtasks, projectRoot, tag } =\n\t\targs;\n\tconst { session } = context;\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('listTasksDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Use the explicit tasksJsonPath for cache key\n\tconst statusFilter = status || 'all';\n\tconst withSubtasksFilter = withSubtasks || false;\n\n\t// Define the action function to be executed on cache miss\n\tconst coreListTasksAction = async () => {\n\t\ttry {\n\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\tenableSilentMode();\n\n\t\t\tlog.info(\n\t\t\t\t`Executing core listTasks function for path: ${tasksJsonPath}, filter: ${statusFilter}, subtasks: ${withSubtasksFilter}`\n\t\t\t);\n\t\t\t// Pass the explicit tasksJsonPath to the core function\n\t\t\tconst resultData = listTasks(\n\t\t\t\ttasksJsonPath,\n\t\t\t\tstatusFilter,\n\t\t\t\treportPath,\n\t\t\t\twithSubtasksFilter,\n\t\t\t\t'json',\n\t\t\t\t{ projectRoot, session, tag }\n\t\t\t);\n\n\t\t\tif (!resultData || !resultData.tasks) {\n\t\t\t\tlog.error('Invalid or empty response from listTasks core function');\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'INVALID_CORE_RESPONSE',\n\t\t\t\t\t\tmessage: 'Invalid or empty response from listTasks core function'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(\n\t\t\t\t`Core listTasks function retrieved ${resultData.tasks.length} tasks`\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn { success: true, data: resultData };\n\t\t} catch (error) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Core listTasks function failed: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'LIST_TASKS_CORE_ERROR',\n\t\t\t\t\tmessage: error.message || 'Failed to list tasks'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t};\n\n\ttry {\n\t\tconst result = await coreListTasksAction();\n\t\tlog.info('listTasksDirect completed');\n\t\treturn result;\n\t} catch (error) {\n\t\tlog.error(`Unexpected error during listTasks: ${error.message}`);\n\t\tconsole.error(error.stack);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/update-subtask-by-id.js", "/**\n * update-subtask-by-id.js\n * Direct function implementation for appending information to a specific subtask\n */\n\nimport { updateSubtaskById } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for updateSubtaskById with error handling.\n *\n * @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot.\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Subtask ID in format \"parent.sub\".\n * @param {string} args.prompt - Information to append to the subtask.\n * @param {boolean} [args.research] - Whether to use research role.\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function updateSubtaskByIdDirect(args, log, context = {}) {\n\tconst { session } = context;\n\t// Destructure expected args, including projectRoot\n\tconst { tasksJsonPath, id, prompt, research, projectRoot, tag } = args;\n\n\tconst logWrapper = createLogWrapper(log);\n\n\ttry {\n\t\tlogWrapper.info(\n\t\t\t`Updating subtask by ID via direct function. ID: ${id}, ProjectRoot: ${projectRoot}`\n\t\t);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Basic validation for ID format (e.g., '5.2')\n\t\tif (!id || typeof id !== 'string' || !id.includes('.')) {\n\t\t\tconst errorMessage =\n\t\t\t\t'Invalid subtask ID format. Must be in format \"parentId.subtaskId\" (e.g., \"5.2\").';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_SUBTASK_ID', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tif (!prompt) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No prompt specified. Please provide the information to append.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_PROMPT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Validate subtask ID format\n\t\tconst subtaskId = id;\n\t\tif (typeof subtaskId !== 'string' && typeof subtaskId !== 'number') {\n\t\t\tconst errorMessage = `Invalid subtask ID type: ${typeof subtaskId}. Subtask ID must be a string or number.`;\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_SUBTASK_ID_TYPE', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tconst subtaskIdStr = String(subtaskId);\n\t\tif (!subtaskIdStr.includes('.')) {\n\t\t\tconst errorMessage = `Invalid subtask ID format: ${subtaskIdStr}. Subtask ID must be in format \"parentId.subtaskId\" (e.g., \"5.2\").`;\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_SUBTASK_ID_FORMAT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided path\n\t\tconst tasksPath = tasksJsonPath;\n\t\tconst useResearch = research === true;\n\n\t\tlog.info(\n\t\t\t`Updating subtask with ID ${subtaskIdStr} with prompt \"${prompt}\" and research: ${useResearch}`\n\t\t);\n\n\t\tconst wasSilent = isSilentMode();\n\t\tif (!wasSilent) {\n\t\t\tenableSilentMode();\n\t\t}\n\n\t\ttry {\n\t\t\t// Execute core updateSubtaskById function\n\t\t\tconst coreResult = await updateSubtaskById(\n\t\t\t\ttasksPath,\n\t\t\t\tsubtaskIdStr,\n\t\t\t\tprompt,\n\t\t\t\tuseResearch,\n\t\t\t\t{\n\t\t\t\t\tmcpLog: logWrapper,\n\t\t\t\t\tsession,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\ttag,\n\t\t\t\t\tcommandName: 'update-subtask',\n\t\t\t\t\toutputType: 'mcp'\n\t\t\t\t},\n\t\t\t\t'json'\n\t\t\t);\n\n\t\t\tif (!coreResult || coreResult.updatedSubtask === null) {\n\t\t\t\tconst message = `Subtask ${id} or its parent task not found.`;\n\t\t\t\tlogWrapper.error(message);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: { code: 'SUBTASK_NOT_FOUND', message: message }\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Subtask updated successfully\n\t\t\tconst successMessage = `Successfully updated subtask with ID ${subtaskIdStr}`;\n\t\t\tlogWrapper.success(successMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Successfully updated subtask with ID ${subtaskIdStr}`,\n\t\t\t\t\tsubtaskId: subtaskIdStr,\n\t\t\t\t\tparentId: subtaskIdStr.split('.')[0],\n\t\t\t\t\tsubtask: coreResult.updatedSubtask,\n\t\t\t\t\ttasksPath,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tlogWrapper.error(`Error updating subtask by ID: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'UPDATE_SUBTASK_CORE_ERROR',\n\t\t\t\t\tmessage: error.message || 'Unknown error updating subtask'\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(\n\t\t\t`Setup error in updateSubtaskByIdDirect: ${error.message}`\n\t\t);\n\t\tif (isSilentMode()) disableSilentMode();\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_SETUP_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown setup error'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/expand-all-tasks.js", "/**\n * Direct function wrapper for expandAllTasks\n */\n\nimport { expandAllTasks } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Expand all pending tasks with subtasks (Direct Function Wrapper)\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {number|string} [args.num] - Number of subtasks to generate\n * @param {boolean} [args.research] - Enable research-backed subtask generation\n * @param {string} [args.prompt] - Additional context to guide subtask generation\n * @param {boolean} [args.force] - Force regeneration of subtasks for tasks that already have them\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object from FastMCP\n * @param {Object} context - Context object containing session\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function expandAllTasksDirect(args, log, context = {}) {\n\tconst { session } = context; // Extract session\n\t// Destructure expected args, including projectRoot\n\tconst { tasksJsonPath, num, research, prompt, force, projectRoot, tag } =\n\t\targs;\n\n\t// Create logger wrapper using the utility\n\tconst mcpLog = createLogWrapper(log);\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('expandAllTasksDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\tenableSilentMode(); // Enable silent mode for the core function call\n\ttry {\n\t\tlog.info(\n\t\t\t`Calling core expandAllTasks with args: ${JSON.stringify({ num, research, prompt, force, projectRoot, tag })}`\n\t\t);\n\n\t\t// Parse parameters (ensure correct types)\n\t\tconst numSubtasks = num ? parseInt(num, 10) : undefined;\n\t\tconst useResearch = research === true;\n\t\tconst additionalContext = prompt || '';\n\t\tconst forceFlag = force === true;\n\n\t\t// Call the core function, passing options and the context object { session, mcpLog, projectRoot }\n\t\tconst result = await expandAllTasks(\n\t\t\ttasksJsonPath,\n\t\t\tnumSubtasks,\n\t\t\tuseResearch,\n\t\t\tadditionalContext,\n\t\t\tforceFlag,\n\t\t\t{ session, mcpLog, projectRoot, tag },\n\t\t\t'json'\n\t\t);\n\n\t\t// Core function now returns a summary object including the *aggregated* telemetryData\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Expand all operation completed. Expanded: ${result.expandedCount}, Failed: ${result.failedCount}, Skipped: ${result.skippedCount}`,\n\t\t\t\tdetails: {\n\t\t\t\t\texpandedCount: result.expandedCount,\n\t\t\t\t\tfailedCount: result.failedCount,\n\t\t\t\t\tskippedCount: result.skippedCount,\n\t\t\t\t\ttasksToExpand: result.tasksToExpand\n\t\t\t\t},\n\t\t\t\ttelemetryData: result.telemetryData // Pass the aggregated object\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Log the error using the MCP logger\n\t\tlog.error(`Error during core expandAllTasks execution: ${error.message}`);\n\t\t// Optionally log stack trace if available and debug enabled\n\t\t// if (error.stack && log.debug) { log.debug(error.stack); }\n\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR', // Or a more specific code if possible\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode(); // IMPORTANT: Ensure silent mode is always disabled\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/update-task-by-id.js", "/**\n * update-task-by-id.js\n * Direct function implementation for updating a single task by ID with new information\n */\n\nimport { updateTaskById } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\n/**\n * Direct function wrapper for updateTaskById with error handling.\n *\n * @param {Object} args - Command arguments containing id, prompt, useResearch, tasksJsonPath, and projectRoot.\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Task ID (or subtask ID like \"1.2\").\n * @param {string} args.prompt - New information/context prompt.\n * @param {boolean} [args.research] - Whether to use research role.\n * @param {boolean} [args.append] - Whether to append timestamped information instead of full update.\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function updateTaskByIdDirect(args, log, context = {}) {\n\tconst { session } = context;\n\t// Destructure expected args, including projectRoot\n\tconst { tasksJsonPath, id, prompt, research, append, projectRoot, tag } =\n\t\targs;\n\n\tconst logWrapper = createLogWrapper(log);\n\n\ttry {\n\t\tlogWrapper.info(\n\t\t\t`Updating task by ID via direct function. ID: ${id}, ProjectRoot: ${projectRoot}`\n\t\t);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters (id and prompt)\n\t\tif (!id) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No task ID specified. Please provide a task ID to update.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_TASK_ID', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tif (!prompt) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No prompt specified. Please provide a prompt with new information for the task update.';\n\t\t\tlogWrapper.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_PROMPT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Parse taskId - handle both string and number values\n\t\tlet taskId;\n\t\tif (typeof id === 'string') {\n\t\t\t// Handle subtask IDs (e.g., \"5.2\")\n\t\t\tif (id.includes('.')) {\n\t\t\t\ttaskId = id; // Keep as string for subtask IDs\n\t\t\t} else {\n\t\t\t\t// Parse as integer for main task IDs\n\t\t\t\ttaskId = parseInt(id, 10);\n\t\t\t\tif (Number.isNaN(taskId)) {\n\t\t\t\t\tconst errorMessage = `Invalid task ID: ${id}. Task ID must be a positive integer or subtask ID (e.g., \"5.2\").`;\n\t\t\t\t\tlogWrapper.error(errorMessage);\n\t\t\t\t\treturn {\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: { code: 'INVALID_TASK_ID', message: errorMessage }\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\ttaskId = id;\n\t\t}\n\n\t\t// Use the provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Get research flag\n\t\tconst useResearch = research === true;\n\n\t\tlogWrapper.info(\n\t\t\t`Updating task with ID ${taskId} with prompt \"${prompt}\" and research: ${useResearch}`\n\t\t);\n\n\t\tconst wasSilent = isSilentMode();\n\t\tif (!wasSilent) {\n\t\t\tenableSilentMode();\n\t\t}\n\n\t\ttry {\n\t\t\t// Execute core updateTaskById function with proper parameters\n\t\t\tconst coreResult = await updateTaskById(\n\t\t\t\ttasksPath,\n\t\t\t\ttaskId,\n\t\t\t\tprompt,\n\t\t\t\tuseResearch,\n\t\t\t\t{\n\t\t\t\t\tmcpLog: logWrapper,\n\t\t\t\t\tsession,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\ttag,\n\t\t\t\t\tcommandName: 'update-task',\n\t\t\t\t\toutputType: 'mcp'\n\t\t\t\t},\n\t\t\t\t'json',\n\t\t\t\tappend || false\n\t\t\t);\n\n\t\t\t// Check if the core function returned null or an object without success\n\t\t\tif (!coreResult || coreResult.updatedTask === null) {\n\t\t\t\t// Core function logs the reason, just return success with info\n\t\t\t\tconst message = `Task ${taskId} was not updated (likely already completed).`;\n\t\t\t\tlogWrapper.info(message);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: true,\n\t\t\t\t\tdata: {\n\t\t\t\t\t\tmessage: message,\n\t\t\t\t\t\ttaskId: taskId,\n\t\t\t\t\t\tupdated: false,\n\t\t\t\t\t\ttelemetryData: coreResult?.telemetryData,\n\t\t\t\t\t\ttagInfo: coreResult?.tagInfo\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Task was updated successfully\n\t\t\tconst successMessage = `Successfully updated task with ID ${taskId} based on the prompt`;\n\t\t\tlogWrapper.success(successMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: successMessage,\n\t\t\t\t\ttaskId: taskId,\n\t\t\t\t\ttasksPath: tasksPath,\n\t\t\t\t\tuseResearch: useResearch,\n\t\t\t\t\tupdated: true,\n\t\t\t\t\tupdatedTask: coreResult.updatedTask,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tlogWrapper.error(`Error updating task by ID: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'UPDATE_TASK_CORE_ERROR',\n\t\t\t\t\tmessage: error.message || 'Unknown error updating task'\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(`Setup error in updateTaskByIdDirect: ${error.message}`);\n\t\tif (isSilentMode()) disableSilentMode();\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_SETUP_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown setup error'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/set-task-status.js", "/**\n * set-task-status.js\n * Direct function implementation for setting task status\n */\n\nimport { setTaskStatus } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { nextTaskDirect } from './next-task.js';\n/**\n * Direct function wrapper for setTaskStatus with error handling.\n *\n * @param {Object} args - Command arguments containing id, status, tasksJsonPath, and projectRoot.\n * @param {string} args.id - The ID of the task to update.\n * @param {string} args.status - The new status to set for the task.\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function setTaskStatusDirect(args, log, context = {}) {\n\t// Destructure expected args, including the resolved tasksJsonPath and projectRoot\n\tconst { tasksJsonPath, id, status, complexityReportPath, projectRoot, tag } =\n\t\targs;\n\tconst { session } = context;\n\ttry {\n\t\tlog.info(`Setting task status with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Check required parameters (id and status)\n\t\tif (!id) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No task ID specified. Please provide a task ID to update.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_TASK_ID', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\tif (!status) {\n\t\t\tconst errorMessage =\n\t\t\t\t'No status specified. Please provide a new status value.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_STATUS', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Execute core setTaskStatus function\n\t\tconst taskId = id;\n\t\tconst newStatus = status;\n\n\t\tlog.info(`Setting task ${taskId} status to \"${newStatus}\"`);\n\n\t\t// Call the core function with proper silent mode handling\n\t\tenableSilentMode(); // Enable silent mode before calling core function\n\t\ttry {\n\t\t\t// Call the core function\n\t\t\tawait setTaskStatus(tasksPath, taskId, newStatus, {\n\t\t\t\tmcpLog: log,\n\t\t\t\tprojectRoot,\n\t\t\t\tsession,\n\t\t\t\ttag\n\t\t\t});\n\n\t\t\tlog.info(`Successfully set task ${taskId} status to ${newStatus}`);\n\n\t\t\t// Return success data\n\t\t\tconst result = {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Successfully updated task ${taskId} status to \"${newStatus}\"`,\n\t\t\t\t\ttaskId,\n\t\t\t\t\tstatus: newStatus,\n\t\t\t\t\ttasksPath: tasksPath // Return the path used\n\t\t\t\t}\n\t\t\t};\n\n\t\t\t// If the task was completed, attempt to fetch the next task\n\t\t\tif (result.data.status === 'done') {\n\t\t\t\ttry {\n\t\t\t\t\tlog.info(`Attempting to fetch next task for task ${taskId}`);\n\t\t\t\t\tconst nextResult = await nextTaskDirect(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\t\ttag\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog,\n\t\t\t\t\t\t{ session }\n\t\t\t\t\t);\n\n\t\t\t\t\tif (nextResult.success) {\n\t\t\t\t\t\tlog.info(\n\t\t\t\t\t\t\t`Successfully retrieved next task: ${nextResult.data.nextTask}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tresult.data = {\n\t\t\t\t\t\t\t...result.data,\n\t\t\t\t\t\t\tnextTask: nextResult.data.nextTask,\n\t\t\t\t\t\t\tisNextSubtask: nextResult.data.isSubtask,\n\t\t\t\t\t\t\tnextSteps: nextResult.data.nextSteps\n\t\t\t\t\t\t};\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.warn(\n\t\t\t\t\t\t\t`Failed to retrieve next task: ${nextResult.error?.message || 'Unknown error'}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t} catch (nextErr) {\n\t\t\t\t\tlog.error(`Error retrieving next task: ${nextErr.message}`);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn result;\n\t\t} catch (error) {\n\t\t\tlog.error(`Error setting task status: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'SET_STATUS_ERROR',\n\t\t\t\t\tmessage: error.message || 'Unknown error setting task status'\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\t// ALWAYS restore normal logging in finally block\n\t\t\tdisableSilentMode();\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled if there was an uncaught error in the outer try block\n\t\tif (isSilentMode()) {\n\t\t\tdisableSilentMode();\n\t\t}\n\n\t\tlog.error(`Error setting task status: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'SET_STATUS_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error setting task status'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/clear-subtasks.js", "/**\n * Direct function wrapper for clearSubtasks\n */\n\nimport { clearSubtasks } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\treadJSON\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\nimport path from 'path';\n\n/**\n * Clear subtasks from specified tasks\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} [args.id] - Task IDs (comma-separated) to clear subtasks from\n * @param {boolean} [args.all] - Clear subtasks from all tasks\n * @param {string} [args.tag] - Tag context to operate on (defaults to current active tag)\n * @param {string} [args.projectRoot] - Project root path (for MCP/env fallback)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function clearSubtasksDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, all, tag, projectRoot } = args;\n\ttry {\n\t\tlog.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('clearSubtasksDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Either id or all must be provided\n\t\tif (!id && !all) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Either task IDs with id parameter or all parameter must be provided'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Check if tasks.json exists\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'FILE_NOT_FOUND_ERROR',\n\t\t\t\t\tmessage: `Tasks file not found at ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tlet taskIds;\n\n\t\t// Use readJSON which handles silent migration and tag resolution\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\n\t\tif (!data || !data.tasks) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: `No tasks found in tasks file: ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst currentTag = data.tag || tag;\n\t\tconst tasks = data.tasks;\n\n\t\t// If all is specified, get all task IDs\n\t\tif (all) {\n\t\t\tlog.info(`Clearing subtasks from all tasks in tag '${currentTag}'`);\n\t\t\tif (tasks.length === 0) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\t\tmessage: `No tasks found in tag context '${currentTag}'`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t\ttaskIds = tasks.map((t) => t.id).join(',');\n\t\t} else {\n\t\t\t// Use the provided task IDs\n\t\t\ttaskIds = id;\n\t\t}\n\n\t\tlog.info(`Clearing subtasks from tasks: ${taskIds} in tag '${currentTag}'`);\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\t// Call the core function\n\t\tclearSubtasks(tasksPath, taskIds, { projectRoot, tag: currentTag });\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\t// Read the updated data to provide a summary\n\t\tconst updatedData = readJSON(tasksPath, projectRoot, currentTag);\n\t\tconst taskIdArray = taskIds.split(',').map((id) => parseInt(id.trim(), 10));\n\n\t\t// Build a summary of what was done\n\t\tconst clearedTasksCount = taskIdArray.length;\n\t\tconst updatedTasks = updatedData.tasks || [];\n\n\t\tconst taskSummary = taskIdArray.map((id) => {\n\t\t\tconst task = updatedTasks.find((t) => t.id === id);\n\t\t\treturn task ? { id, title: task.title } : { id, title: 'Task not found' };\n\t\t});\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully cleared subtasks from ${clearedTasksCount} task(s) in tag '${currentTag}'`,\n\t\t\t\ttasksCleared: taskSummary,\n\t\t\t\ttag: currentTag\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in clearSubtasksDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/analyze-task-complexity.js", "/**\n * Direct function wrapper for analyzeTaskComplexity\n */\n\nimport analyzeTaskComplexity from '../../../../scripts/modules/task-manager/analyze-task-complexity.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\nimport { createLogWrapper } from '../../tools/utils.js'; // Import the new utility\n\n/**\n * Analyze task complexity and generate recommendations\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.outputPath - Explicit absolute path to save the report.\n * @param {string|number} [args.threshold] - Minimum complexity score to recommend expansion (1-10)\n * @param {boolean} [args.research] - Use Perplexity AI for research-backed complexity analysis\n * @param {string} [args.ids] - Comma-separated list of task IDs to analyze\n * @param {number} [args.from] - Starting task ID in a range to analyze\n * @param {number} [args.to] - Ending task ID in a range to analyze\n * @param {string} [args.projectRoot] - Project root path.\n * @param {string} [args.tag] - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @param {Object} [context={}] - Context object containing session data\n * @param {Object} [context.session] - MCP session object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function analyzeTaskComplexityDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst {\n\t\ttasksJsonPath,\n\t\toutputPath,\n\t\tthreshold,\n\t\tresearch,\n\t\tprojectRoot,\n\t\tids,\n\t\tfrom,\n\t\tto,\n\t\ttag\n\t} = args;\n\n\tconst logWrapper = createLogWrapper(log);\n\n\t// --- Initial Checks (remain the same) ---\n\ttry {\n\t\tlog.info(`Analyzing task complexity with args: ${JSON.stringify(args)}`);\n\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('analyzeTaskComplexityDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\tif (!outputPath) {\n\t\t\tlog.error('analyzeTaskComplexityDirect called without outputPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: 'outputPath is required' }\n\t\t\t};\n\t\t}\n\n\t\tconst tasksPath = tasksJsonPath;\n\t\tconst resolvedOutputPath = outputPath;\n\n\t\tlog.info(`Analyzing task complexity from: ${tasksPath}`);\n\t\tlog.info(`Output report will be saved to: ${resolvedOutputPath}`);\n\n\t\tif (ids) {\n\t\t\tlog.info(`Analyzing specific task IDs: ${ids}`);\n\t\t} else if (from || to) {\n\t\t\tconst fromStr = from !== undefined ? from : 'first';\n\t\t\tconst toStr = to !== undefined ? to : 'last';\n\t\t\tlog.info(`Analyzing tasks in range: ${fromStr} to ${toStr}`);\n\t\t}\n\n\t\tif (research) {\n\t\t\tlog.info('Using research role for complexity analysis');\n\t\t}\n\n\t\t// Prepare options for the core function - REMOVED mcpLog and session here\n\t\tconst coreOptions = {\n\t\t\tfile: tasksJsonPath,\n\t\t\toutput: outputPath,\n\t\t\tthreshold: threshold,\n\t\t\tresearch: research === true, // Ensure boolean\n\t\t\tprojectRoot: projectRoot, // Pass projectRoot here\n\t\t\tid: ids, // Pass the ids parameter to the core function as 'id'\n\t\t\tfrom: from, // Pass from parameter\n\t\t\tto: to, // Pass to parameter\n\t\t\ttag // forward tag\n\t\t};\n\t\t// --- End Initial Checks ---\n\n\t\t// --- Silent Mode and Logger Wrapper ---\n\t\tconst wasSilent = isSilentMode();\n\t\tif (!wasSilent) {\n\t\t\tenableSilentMode(); // Still enable silent mode as a backup\n\t\t}\n\n\t\tlet report;\n\t\tlet coreResult;\n\n\t\ttry {\n\t\t\t// --- Call Core Function (Pass context separately) ---\n\t\t\t// Pass coreOptions as the first argument\n\t\t\t// Pass context object { session, mcpLog } as the second argument\n\t\t\tcoreResult = await analyzeTaskComplexity(coreOptions, {\n\t\t\t\tsession,\n\t\t\t\tmcpLog: logWrapper,\n\t\t\t\tcommandName: 'analyze-complexity',\n\t\t\t\toutputType: 'mcp',\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t});\n\t\t\treport = coreResult.report;\n\t\t} catch (error) {\n\t\t\tlog.error(\n\t\t\t\t`Error in analyzeTaskComplexity core function: ${error.message}`\n\t\t\t);\n\t\t\t// Restore logging if we changed it\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'ANALYZE_CORE_ERROR',\n\t\t\t\t\tmessage: `Error running core complexity analysis: ${error.message}`\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\t// Always restore normal logging in finally block if we enabled silent mode\n\t\t\tif (!wasSilent && isSilentMode()) {\n\t\t\t\tdisableSilentMode();\n\t\t\t}\n\t\t}\n\n\t\t// --- Result Handling (remains largely the same) ---\n\t\t// Verify the report file was created (core function writes it)\n\t\tif (!fs.existsSync(resolvedOutputPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'ANALYZE_REPORT_MISSING', // Specific code\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Analysis completed but no report file was created at the expected path.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (\n\t\t\t!coreResult ||\n\t\t\t!coreResult.report ||\n\t\t\ttypeof coreResult.report !== 'object'\n\t\t) {\n\t\t\tlog.error(\n\t\t\t\t'Core analysis function returned an invalid or undefined response.'\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_CORE_RESPONSE',\n\t\t\t\t\tmessage: 'Core analysis function returned an invalid response.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\ttry {\n\t\t\t// Ensure complexityAnalysis exists and is an array\n\t\t\tconst analysisArray = Array.isArray(coreResult.report.complexityAnalysis)\n\t\t\t\t? coreResult.report.complexityAnalysis\n\t\t\t\t: [];\n\n\t\t\t// Count tasks by complexity (remains the same)\n\t\t\tconst highComplexityTasks = analysisArray.filter(\n\t\t\t\t(t) => t.complexityScore >= 8\n\t\t\t).length;\n\t\t\tconst mediumComplexityTasks = analysisArray.filter(\n\t\t\t\t(t) => t.complexityScore >= 5 && t.complexityScore < 8\n\t\t\t).length;\n\t\t\tconst lowComplexityTasks = analysisArray.filter(\n\t\t\t\t(t) => t.complexityScore < 5\n\t\t\t).length;\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Task complexity analysis complete. Report saved to ${outputPath}`,\n\t\t\t\t\treportPath: outputPath,\n\t\t\t\t\treportSummary: {\n\t\t\t\t\t\ttaskCount: analysisArray.length,\n\t\t\t\t\t\thighComplexityTasks,\n\t\t\t\t\t\tmediumComplexityTasks,\n\t\t\t\t\t\tlowComplexityTasks\n\t\t\t\t\t},\n\t\t\t\t\tfullReport: coreResult.report,\n\t\t\t\t\ttelemetryData: coreResult.telemetryData,\n\t\t\t\t\ttagInfo: coreResult.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (parseError) {\n\t\t\t// Should not happen if core function returns object, but good safety check\n\t\t\tlog.error(`Internal error processing report data: ${parseError.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'REPORT_PROCESS_ERROR',\n\t\t\t\t\tmessage: `Internal error processing complexity report: ${parseError.message}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\t// --- End Result Handling ---\n\t} catch (error) {\n\t\t// Catch errors from initial checks or path resolution\n\t\t// Make sure to restore normal logging if silent mode was enabled\n\t\tif (isSilentMode()) {\n\t\t\tdisableSilentMode();\n\t\t}\n\t\tlog.error(`Error in analyzeTaskComplexityDirect setup: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_SETUP_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/remove-task.js", "/**\n * remove-task.js\n * Direct function implementation for removing a task\n */\n\nimport {\n\tremoveTask,\n\ttaskExists\n} from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\treadJSON\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for removeTask with error handling.\n * Supports removing multiple tasks at once with comma-separated IDs.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - The ID(s) of the task(s) or subtask(s) to remove (comma-separated for multiple).\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Remove task result { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function removeTaskDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, projectRoot, tag } = args;\n\tconst { session } = context;\n\ttry {\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('removeTaskDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate task ID parameter\n\t\tif (!id) {\n\t\t\tlog.error('Task ID is required');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Task ID is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Split task IDs if comma-separated\n\t\tconst taskIdArray = id.split(',').map((taskId) => taskId.trim());\n\n\t\tlog.info(\n\t\t\t`Removing ${taskIdArray.length} task(s) with ID(s): ${taskIdArray.join(', ')} from ${tasksJsonPath}${tag ? ` in tag '${tag}'` : ''}`\n\t\t);\n\n\t\t// Validate all task IDs exist before proceeding\n\t\tconst data = readJSON(tasksJsonPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASKS_FILE',\n\t\t\t\t\tmessage: `No valid tasks found in ${tasksJsonPath}${tag ? ` for tag '${tag}'` : ''}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst invalidTasks = taskIdArray.filter(\n\t\t\t(taskId) => !taskExists(data.tasks, taskId)\n\t\t);\n\n\t\tif (invalidTasks.length > 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASK_ID',\n\t\t\t\t\tmessage: `The following tasks were not found${tag ? ` in tag '${tag}'` : ''}: ${invalidTasks.join(', ')}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\ttry {\n\t\t\t// Call removeTask with proper context including tag\n\t\t\tconst result = await removeTask(tasksJsonPath, id, {\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t});\n\n\t\t\tif (!result.success) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'REMOVE_TASK_ERROR',\n\t\t\t\t\t\tmessage: result.error || 'Failed to remove tasks'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(`Successfully removed ${result.removedTasks.length} task(s)`);\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\ttotalTasks: taskIdArray.length,\n\t\t\t\t\tsuccessful: result.removedTasks.length,\n\t\t\t\t\tfailed: taskIdArray.length - result.removedTasks.length,\n\t\t\t\t\tremovedTasks: result.removedTasks,\n\t\t\t\t\tmessage: result.message,\n\t\t\t\t\ttasksPath: tasksJsonPath,\n\t\t\t\t\ttag\n\t\t\t\t}\n\t\t\t};\n\t\t} finally {\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled even if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\t// Catch any unexpected errors\n\t\tlog.error(`Unexpected error in removeTaskDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/utils.js", "/**\n * utils.js\n * Utility functions for the Task Master CLI\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport dotenv from 'dotenv';\n// Import specific config getters needed here\nimport { getLogLevel, getDebugFlag } from './config-manager.js';\nimport * as gitUtils from './utils/git-utils.js';\nimport {\n\tCOMPLEXITY_REPORT_FILE,\n\tLEGACY_COMPLEXITY_REPORT_FILE,\n\tLEGACY_CONFIG_FILE\n} from '../../src/constants/paths.js';\n\n// Global silent mode flag\nlet silentMode = false;\n\n// --- Environment Variable Resolution Utility ---\n/**\n * Resolves an environment variable's value.\n * Precedence:\n * 1. session.env (if session provided)\n * 2. process.env\n * 3. .env file at projectRoot (if projectRoot provided)\n * @param {string} key - The environment variable key.\n * @param {object|null} [session=null] - The MCP session object.\n * @param {string|null} [projectRoot=null] - The project root directory (for .env fallback).\n * @returns {string|undefined} The value of the environment variable or undefined if not found.\n */\nfunction resolveEnvVariable(key, session = null, projectRoot = null) {\n\t// 1. Check session.env\n\tif (session?.env?.[key]) {\n\t\treturn session.env[key];\n\t}\n\n\t// 2. Read .env file at projectRoot\n\tif (projectRoot) {\n\t\tconst envPath = path.join(projectRoot, '.env');\n\t\tif (fs.existsSync(envPath)) {\n\t\t\ttry {\n\t\t\t\tconst envFileContent = fs.readFileSync(envPath, 'utf-8');\n\t\t\t\tconst parsedEnv = dotenv.parse(envFileContent); // Use dotenv to parse\n\t\t\t\tif (parsedEnv && parsedEnv[key]) {\n\t\t\t\t\t// console.log(`DEBUG: Found key ${key} in ${envPath}`); // Optional debug log\n\t\t\t\t\treturn parsedEnv[key];\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// Log error but don't crash, just proceed as if key wasn't found in file\n\t\t\t\tlog('warn', `Could not read or parse ${envPath}: ${error.message}`);\n\t\t\t}\n\t\t}\n\t}\n\n\t// 3. Fallback: Check process.env\n\tif (process.env[key]) {\n\t\treturn process.env[key];\n\t}\n\n\t// Not found anywhere\n\treturn undefined;\n}\n\n// --- Tag-Aware Path Resolution Utility ---\n\n/**\n * Slugifies a tag name to be filesystem-safe\n * @param {string} tagName - The tag name to slugify\n * @returns {string} Slugified tag name safe for filesystem use\n */\nfunction slugifyTagForFilePath(tagName) {\n\tif (!tagName || typeof tagName !== 'string') {\n\t\treturn 'unknown-tag';\n\t}\n\n\t// Replace invalid filesystem characters with hyphens and clean up\n\treturn tagName\n\t\t.replace(/[^a-zA-Z0-9_-]/g, '-') // Replace invalid chars with hyphens\n\t\t.replace(/^-+|-+$/g, '') // Remove leading/trailing hyphens\n\t\t.replace(/-+/g, '-') // Collapse multiple hyphens\n\t\t.toLowerCase() // Convert to lowercase\n\t\t.substring(0, 50); // Limit length to prevent overly long filenames\n}\n\n/**\n * Resolves a file path to be tag-aware, following the pattern used by other commands.\n * For non-master tags, appends _slugified-tagname before the file extension.\n * @param {string} basePath - The base file path (e.g., '.taskmaster/reports/task-complexity-report.json')\n * @param {string|null} tag - The tag name (null, undefined, or 'master' uses base path)\n * @param {string} [projectRoot='.'] - The project root directory\n * @returns {string} The resolved file path\n */\nfunction getTagAwareFilePath(basePath, tag, projectRoot = '.') {\n\t// Use path.parse and format for clean tag insertion\n\tconst parsedPath = path.parse(basePath);\n\tif (!tag || tag === 'master') {\n\t\treturn path.join(projectRoot, basePath);\n\t}\n\n\t// Slugify the tag for filesystem safety\n\tconst slugifiedTag = slugifyTagForFilePath(tag);\n\n\t// Append slugified tag before file extension\n\tparsedPath.base = `${parsedPath.name}_${slugifiedTag}${parsedPath.ext}`;\n\tconst relativePath = path.format(parsedPath);\n\treturn path.join(projectRoot, relativePath);\n}\n\n// --- Project Root Finding Utility ---\n/**\n * Recursively searches upwards for project root starting from a given directory.\n * @param {string} [startDir=process.cwd()] - The directory to start searching from.\n * @param {string[]} [markers=['package.json', '.git', LEGACY_CONFIG_FILE]] - Marker files/dirs to look for.\n * @returns {string|null} The path to the project root, or null if not found.\n */\nfunction findProjectRoot(\n\tstartDir = process.cwd(),\n\tmarkers = ['package.json', 'pyproject.toml', '.git', LEGACY_CONFIG_FILE]\n) {\n\tlet currentPath = path.resolve(startDir);\n\tconst rootPath = path.parse(currentPath).root;\n\n\twhile (currentPath !== rootPath) {\n\t\t// Check if any marker exists in the current directory\n\t\tconst hasMarker = markers.some((marker) => {\n\t\t\tconst markerPath = path.join(currentPath, marker);\n\t\t\treturn fs.existsSync(markerPath);\n\t\t});\n\n\t\tif (hasMarker) {\n\t\t\treturn currentPath;\n\t\t}\n\n\t\t// Move up one directory\n\t\tcurrentPath = path.dirname(currentPath);\n\t}\n\n\t// Check the root directory as well\n\tconst hasMarkerInRoot = markers.some((marker) => {\n\t\tconst markerPath = path.join(rootPath, marker);\n\t\treturn fs.existsSync(markerPath);\n\t});\n\n\treturn hasMarkerInRoot ? rootPath : null;\n}\n\n// --- Dynamic Configuration Function --- (REMOVED)\n\n// --- Logging and Utility Functions ---\n\n// Set up logging based on log level\nconst LOG_LEVELS = {\n\tdebug: 0,\n\tinfo: 1,\n\twarn: 2,\n\terror: 3,\n\tsuccess: 1 // Treat success like info level\n};\n\n/**\n * Returns the task manager module\n * @returns {Promise<Object>} The task manager module object\n */\nasync function getTaskManager() {\n\treturn import('./task-manager.js');\n}\n\n/**\n * Enable silent logging mode\n */\nfunction enableSilentMode() {\n\tsilentMode = true;\n}\n\n/**\n * Disable silent logging mode\n */\nfunction disableSilentMode() {\n\tsilentMode = false;\n}\n\n/**\n * Check if silent mode is enabled\n * @returns {boolean} True if silent mode is enabled\n */\nfunction isSilentMode() {\n\treturn silentMode;\n}\n\n/**\n * Logs a message at the specified level\n * @param {string} level - The log level (debug, info, warn, error)\n * @param {...any} args - Arguments to log\n */\nfunction log(level, ...args) {\n\t// Immediately return if silentMode is enabled\n\tif (isSilentMode()) {\n\t\treturn;\n\t}\n\n\t// GUARD: Prevent circular dependency during config loading\n\t// Use a simple fallback log level instead of calling getLogLevel()\n\tlet configLevel = 'info'; // Default fallback\n\ttry {\n\t\t// Only try to get config level if we're not in the middle of config loading\n\t\tconfigLevel = getLogLevel() || 'info';\n\t} catch (error) {\n\t\t// If getLogLevel() fails (likely due to circular dependency),\n\t\t// use default 'info' level and continue\n\t\tconfigLevel = 'info';\n\t}\n\n\t// Use text prefixes instead of emojis\n\tconst prefixes = {\n\t\tdebug: chalk.gray('[DEBUG]'),\n\t\tinfo: chalk.blue('[INFO]'),\n\t\twarn: chalk.yellow('[WARN]'),\n\t\terror: chalk.red('[ERROR]'),\n\t\tsuccess: chalk.green('[SUCCESS]')\n\t};\n\n\t// Ensure level exists, default to info if not\n\tconst currentLevel = LOG_LEVELS.hasOwnProperty(level) ? level : 'info';\n\n\t// Check log level configuration\n\tif (\n\t\tLOG_LEVELS[currentLevel] >= (LOG_LEVELS[configLevel] ?? LOG_LEVELS.info)\n\t) {\n\t\tconst prefix = prefixes[currentLevel] || '';\n\t\t// Use console.log for all levels, let chalk handle coloring\n\t\t// Construct the message properly\n\t\tconst message = args\n\t\t\t.map((arg) => (typeof arg === 'object' ? JSON.stringify(arg) : arg))\n\t\t\t.join(' ');\n\t\tconsole.log(`${prefix} ${message}`);\n\t}\n}\n\n/**\n * Checks if the data object has a tagged structure (contains tag objects with tasks arrays)\n * @param {Object} data - The data object to check\n * @returns {boolean} True if the data has a tagged structure\n */\nfunction hasTaggedStructure(data) {\n\tif (!data || typeof data !== 'object') {\n\t\treturn false;\n\t}\n\n\t// Check if any top-level properties are objects with tasks arrays\n\tfor (const key in data) {\n\t\tif (\n\t\t\tdata.hasOwnProperty(key) &&\n\t\t\ttypeof data[key] === 'object' &&\n\t\t\tArray.isArray(data[key].tasks)\n\t\t) {\n\t\t\treturn true;\n\t\t}\n\t}\n\treturn false;\n}\n\n/**\n * Reads and parses a JSON file\n * @param {string} filepath - Path to the JSON file\n * @param {string} [projectRoot] - Optional project root for tag resolution (used by MCP)\n * @param {string} [tag] - Optional tag to use instead of current tag resolution\n * @returns {Object|null} The parsed JSON data or null if error\n */\nfunction readJSON(filepath, projectRoot = null, tag = null) {\n\t// GUARD: Prevent circular dependency during config loading\n\tlet isDebug = false; // Default fallback\n\ttry {\n\t\t// Only try to get debug flag if we're not in the middle of config loading\n\t\tisDebug = getDebugFlag();\n\t} catch (error) {\n\t\t// If getDebugFlag() fails (likely due to circular dependency),\n\t\t// use default false and continue\n\t}\n\n\tif (isDebug) {\n\t\tconsole.log(\n\t\t\t`readJSON called with: ${filepath}, projectRoot: ${projectRoot}, tag: ${tag}`\n\t\t);\n\t}\n\n\tif (!filepath) {\n\t\treturn null;\n\t}\n\n\tlet data;\n\ttry {\n\t\tdata = JSON.parse(fs.readFileSync(filepath, 'utf8'));\n\t\tif (isDebug) {\n\t\t\tconsole.log(`Successfully read JSON from ${filepath}`);\n\t\t}\n\t} catch (err) {\n\t\tif (isDebug) {\n\t\t\tconsole.log(`Failed to read JSON from ${filepath}: ${err.message}`);\n\t\t}\n\t\treturn null;\n\t}\n\n\t// If it's not a tasks.json file, return as-is\n\tif (!filepath.includes('tasks.json') || !data) {\n\t\tif (isDebug) {\n\t\t\tconsole.log(`File is not tasks.json or data is null, returning as-is`);\n\t\t}\n\t\treturn data;\n\t}\n\n\t// Check if this is legacy format that needs migration\n\t// Only migrate if we have tasks at the ROOT level AND no tag-like structure\n\tif (\n\t\tArray.isArray(data.tasks) &&\n\t\t!data._rawTaggedData &&\n\t\t!hasTaggedStructure(data)\n\t) {\n\t\tif (isDebug) {\n\t\t\tconsole.log(`File is in legacy format, performing migration...`);\n\t\t}\n\n\t\t// This is legacy format - migrate it to tagged format\n\t\tconst migratedData = {\n\t\t\tmaster: {\n\t\t\t\ttasks: data.tasks,\n\t\t\t\tmetadata: data.metadata || {\n\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\tdescription: 'Tasks for master context'\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\n\t\t// Write the migrated data back to the file\n\t\ttry {\n\t\t\twriteJSON(filepath, migratedData);\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Successfully migrated legacy format to tagged format`);\n\t\t\t}\n\n\t\t\t// Perform complete migration (config.json, state.json)\n\t\t\tperformCompleteTagMigration(filepath);\n\n\t\t\t// Check and auto-switch git tags if enabled (after migration)\n\t\t\t// This needs to run synchronously BEFORE tag resolution\n\t\t\tif (projectRoot) {\n\t\t\t\ttry {\n\t\t\t\t\t// Run git integration synchronously\n\t\t\t\t\tgitUtils.checkAndAutoSwitchGitTagSync(projectRoot, filepath);\n\t\t\t\t} catch (error) {\n\t\t\t\t\t// Silent fail - don't break normal operations\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Mark for migration notice\n\t\t\tmarkMigrationForNotice(filepath);\n\t\t} catch (writeError) {\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Error writing migrated data: ${writeError.message}`);\n\t\t\t}\n\t\t\t// If write fails, continue with the original data\n\t\t}\n\n\t\t// Continue processing with the migrated data structure\n\t\tdata = migratedData;\n\t}\n\n\t// If we have tagged data, we need to resolve which tag to use\n\tif (typeof data === 'object' && !data.tasks) {\n\t\t// This is tagged format\n\t\tif (isDebug) {\n\t\t\tconsole.log(`File is in tagged format, resolving tag...`);\n\t\t}\n\n\t\t// Ensure all tags have proper metadata before proceeding\n\t\tfor (const tagName in data) {\n\t\t\tif (\n\t\t\t\tdata.hasOwnProperty(tagName) &&\n\t\t\t\ttypeof data[tagName] === 'object' &&\n\t\t\t\tdata[tagName].tasks\n\t\t\t) {\n\t\t\t\ttry {\n\t\t\t\t\tensureTagMetadata(data[tagName], {\n\t\t\t\t\t\tdescription: `Tasks for ${tagName} context`,\n\t\t\t\t\t\tskipUpdate: true // Don't update timestamp during read operations\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\t// If ensureTagMetadata fails, continue without metadata\n\t\t\t\t\tif (isDebug) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t`Failed to ensure metadata for tag ${tagName}: ${error.message}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Store reference to the raw tagged data for functions that need it\n\t\tconst originalTaggedData = JSON.parse(JSON.stringify(data));\n\n\t\t// Check and auto-switch git tags if enabled (for existing tagged format)\n\t\t// This needs to run synchronously BEFORE tag resolution\n\t\tif (projectRoot) {\n\t\t\ttry {\n\t\t\t\t// Run git integration synchronously\n\t\t\t\tgitUtils.checkAndAutoSwitchGitTagSync(projectRoot, filepath);\n\t\t\t} catch (error) {\n\t\t\t\t// Silent fail - don't break normal operations\n\t\t\t}\n\t\t}\n\n\t\ttry {\n\t\t\t// Default to master tag if anything goes wrong\n\t\t\tlet resolvedTag = 'master';\n\n\t\t\t// Try to resolve the correct tag, but don't fail if it doesn't work\n\t\t\ttry {\n\t\t\t\t// If tag is provided, use it directly\n\t\t\t\tif (tag) {\n\t\t\t\t\tresolvedTag = tag;\n\t\t\t\t} else if (projectRoot) {\n\t\t\t\t\t// Use provided projectRoot\n\t\t\t\t\tresolvedTag = resolveTag({ projectRoot });\n\t\t\t\t} else {\n\t\t\t\t\t// Try to derive projectRoot from filepath\n\t\t\t\t\tconst derivedProjectRoot = findProjectRoot(path.dirname(filepath));\n\t\t\t\t\tif (derivedProjectRoot) {\n\t\t\t\t\t\tresolvedTag = resolveTag({ projectRoot: derivedProjectRoot });\n\t\t\t\t\t}\n\t\t\t\t\t// If derivedProjectRoot is null, stick with 'master'\n\t\t\t\t}\n\t\t\t} catch (tagResolveError) {\n\t\t\t\tif (isDebug) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Tag resolution failed, using master: ${tagResolveError.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\t// resolvedTag stays as 'master'\n\t\t\t}\n\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Resolved tag: ${resolvedTag}`);\n\t\t\t}\n\n\t\t\t// Get the data for the resolved tag\n\t\t\tconst tagData = data[resolvedTag];\n\t\t\tif (tagData && tagData.tasks) {\n\t\t\t\t// Add the _rawTaggedData property and the resolved tag to the returned data\n\t\t\t\tconst result = {\n\t\t\t\t\t...tagData,\n\t\t\t\t\ttag: resolvedTag,\n\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t};\n\t\t\t\tif (isDebug) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Returning data for tag '${resolvedTag}' with ${tagData.tasks.length} tasks`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\treturn result;\n\t\t\t} else {\n\t\t\t\t// If the resolved tag doesn't exist, fall back to master\n\t\t\t\tconst masterData = data.master;\n\t\t\t\tif (masterData && masterData.tasks) {\n\t\t\t\t\tif (isDebug) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t`Tag '${resolvedTag}' not found, falling back to master with ${masterData.tasks.length} tasks`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t\treturn {\n\t\t\t\t\t\t...masterData,\n\t\t\t\t\t\ttag: 'master',\n\t\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t\t};\n\t\t\t\t} else {\n\t\t\t\t\tif (isDebug) {\n\t\t\t\t\t\tconsole.log(`No valid tag data found, returning empty structure`);\n\t\t\t\t\t}\n\t\t\t\t\t// Return empty structure if no valid data\n\t\t\t\t\treturn {\n\t\t\t\t\t\ttasks: [],\n\t\t\t\t\t\ttag: 'master',\n\t\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(`Error during tag resolution: ${error.message}`);\n\t\t\t}\n\t\t\t// If anything goes wrong, try to return master or empty\n\t\t\tconst masterData = data.master;\n\t\t\tif (masterData && masterData.tasks) {\n\t\t\t\treturn {\n\t\t\t\t\t...masterData,\n\t\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t\t};\n\t\t\t}\n\t\t\treturn {\n\t\t\t\ttasks: [],\n\t\t\t\t_rawTaggedData: originalTaggedData\n\t\t\t};\n\t\t}\n\t}\n\n\t// If we reach here, it's some other format\n\tif (isDebug) {\n\t\tconsole.log(`File format not recognized, returning as-is`);\n\t}\n\treturn data;\n}\n\n/**\n * Performs complete tag migration including config.json and state.json updates\n * @param {string} tasksJsonPath - Path to the tasks.json file that was migrated\n */\nfunction performCompleteTagMigration(tasksJsonPath) {\n\ttry {\n\t\t// Derive project root from tasks.json path\n\t\tconst projectRoot =\n\t\t\tfindProjectRoot(path.dirname(tasksJsonPath)) ||\n\t\t\tpath.dirname(tasksJsonPath);\n\n\t\t// 1. Migrate config.json - add defaultTag and tags section\n\t\tconst configPath = path.join(projectRoot, '.taskmaster', 'config.json');\n\t\tif (fs.existsSync(configPath)) {\n\t\t\tmigrateConfigJson(configPath);\n\t\t}\n\n\t\t// 2. Create state.json if it doesn't exist\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\t\tif (!fs.existsSync(statePath)) {\n\t\t\tcreateStateJson(statePath);\n\t\t}\n\n\t\tif (getDebugFlag()) {\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Complete tag migration performed for project: ${projectRoot}`\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tif (getDebugFlag()) {\n\t\t\tlog('warn', `Error during complete tag migration: ${error.message}`);\n\t\t}\n\t}\n}\n\n/**\n * Migrates config.json to add tagged task system configuration\n * @param {string} configPath - Path to the config.json file\n */\nfunction migrateConfigJson(configPath) {\n\ttry {\n\t\tconst rawConfig = fs.readFileSync(configPath, 'utf8');\n\t\tconst config = JSON.parse(rawConfig);\n\t\tif (!config) return;\n\n\t\tlet modified = false;\n\n\t\t// Add global.defaultTag if missing\n\t\tif (!config.global) {\n\t\t\tconfig.global = {};\n\t\t}\n\t\tif (!config.global.defaultTag) {\n\t\t\tconfig.global.defaultTag = 'master';\n\t\t\tmodified = true;\n\t\t}\n\n\t\tif (modified) {\n\t\t\tfs.writeFileSync(configPath, JSON.stringify(config, null, 2), 'utf8');\n\t\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\t\tconsole.log(\n\t\t\t\t\t'[DEBUG] Updated config.json with tagged task system settings'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.warn(`[WARN] Error migrating config.json: ${error.message}`);\n\t\t}\n\t}\n}\n\n/**\n * Creates initial state.json file for tagged task system\n * @param {string} statePath - Path where state.json should be created\n */\nfunction createStateJson(statePath) {\n\ttry {\n\t\tconst initialState = {\n\t\t\tcurrentTag: 'master',\n\t\t\tlastSwitched: new Date().toISOString(),\n\t\t\tbranchTagMapping: {},\n\t\t\tmigrationNoticeShown: false\n\t\t};\n\n\t\tfs.writeFileSync(statePath, JSON.stringify(initialState, null, 2), 'utf8');\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.log('[DEBUG] Created initial state.json for tagged task system');\n\t\t}\n\t} catch (error) {\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.warn(`[WARN] Error creating state.json: ${error.message}`);\n\t\t}\n\t}\n}\n\n/**\n * Marks in state.json that migration occurred and notice should be shown\n * @param {string} tasksJsonPath - Path to the tasks.json file\n */\nfunction markMigrationForNotice(tasksJsonPath) {\n\ttry {\n\t\tconst projectRoot = path.dirname(path.dirname(tasksJsonPath));\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\t// Ensure state.json exists\n\t\tif (!fs.existsSync(statePath)) {\n\t\t\tcreateStateJson(statePath);\n\t\t}\n\n\t\t// Read and update state to mark migration occurred using fs directly\n\t\ttry {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tconst stateData = JSON.parse(rawState) || {};\n\t\t\t// Only set to false if it's not already set (i.e., first time migration)\n\t\t\tif (stateData.migrationNoticeShown === undefined) {\n\t\t\t\tstateData.migrationNoticeShown = false;\n\t\t\t\tfs.writeFileSync(statePath, JSON.stringify(stateData, null, 2), 'utf8');\n\t\t\t}\n\t\t} catch (stateError) {\n\t\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\t\tconsole.warn(\n\t\t\t\t\t`[WARN] Error updating state for migration notice: ${stateError.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tif (process.env.TASKMASTER_DEBUG === 'true') {\n\t\t\tconsole.warn(\n\t\t\t\t`[WARN] Error marking migration for notice: ${error.message}`\n\t\t\t);\n\t\t}\n\t}\n}\n\n/**\n * Writes and saves a JSON file. Handles tagged task lists properly.\n * @param {string} filepath - Path to the JSON file\n * @param {Object} data - Data to write (can be resolved tag data or raw tagged data)\n * @param {string} projectRoot - Optional project root for tag context\n * @param {string} tag - Optional tag for tag context\n */\nfunction writeJSON(filepath, data, projectRoot = null, tag = null) {\n\tconst isDebug = process.env.TASKMASTER_DEBUG === 'true';\n\n\ttry {\n\t\tlet finalData = data;\n\n\t\t// If data represents resolved tag data but lost _rawTaggedData (edge-case observed in MCP path)\n\t\tif (\n\t\t\t!data._rawTaggedData &&\n\t\t\tprojectRoot &&\n\t\t\tArray.isArray(data.tasks) &&\n\t\t\t!hasTaggedStructure(data)\n\t\t) {\n\t\t\tconst resolvedTag = tag || getCurrentTag(projectRoot);\n\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(\n\t\t\t\t\t`writeJSON: Detected resolved tag data missing _rawTaggedData. Re-reading raw data to prevent data loss for tag '${resolvedTag}'.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Re-read the full file to get the complete tagged structure\n\t\t\tconst rawFullData = JSON.parse(fs.readFileSync(filepath, 'utf8'));\n\n\t\t\t// Merge the updated data into the full structure\n\t\t\tfinalData = {\n\t\t\t\t...rawFullData,\n\t\t\t\t[resolvedTag]: {\n\t\t\t\t\t// Preserve existing tag metadata if it exists, otherwise use what's passed\n\t\t\t\t\t...(rawFullData[resolvedTag]?.metadata || {}),\n\t\t\t\t\t...(data.metadata ? { metadata: data.metadata } : {}),\n\t\t\t\t\ttasks: data.tasks // The updated tasks array is the source of truth here\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\t// If we have _rawTaggedData, this means we're working with resolved tag data\n\t\t// and need to merge it back into the full tagged structure\n\t\telse if (data && data._rawTaggedData && projectRoot) {\n\t\t\tconst resolvedTag = tag || getCurrentTag(projectRoot);\n\n\t\t\t// Get the original tagged data\n\t\t\tconst originalTaggedData = data._rawTaggedData;\n\n\t\t\t// Create a clean copy of the current resolved data (without internal properties)\n\t\t\tconst { _rawTaggedData, tag: _, ...cleanResolvedData } = data;\n\n\t\t\t// Update the specific tag with the resolved data\n\t\t\tfinalData = {\n\t\t\t\t...originalTaggedData,\n\t\t\t\t[resolvedTag]: cleanResolvedData\n\t\t\t};\n\n\t\t\tif (isDebug) {\n\t\t\t\tconsole.log(\n\t\t\t\t\t`writeJSON: Merging resolved data back into tag '${resolvedTag}'`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Clean up any internal properties that shouldn't be persisted\n\t\tlet cleanData = finalData;\n\t\tif (cleanData && typeof cleanData === 'object') {\n\t\t\t// Remove any _rawTaggedData or tag properties from root level\n\t\t\tconst { _rawTaggedData, tag: tagProp, ...rootCleanData } = cleanData;\n\t\t\tcleanData = rootCleanData;\n\n\t\t\t// Additional cleanup for tag objects\n\t\t\tif (typeof cleanData === 'object' && !Array.isArray(cleanData)) {\n\t\t\t\tconst finalCleanData = {};\n\t\t\t\tfor (const [key, value] of Object.entries(cleanData)) {\n\t\t\t\t\tif (\n\t\t\t\t\t\tvalue &&\n\t\t\t\t\t\ttypeof value === 'object' &&\n\t\t\t\t\t\tArray.isArray(value.tasks)\n\t\t\t\t\t) {\n\t\t\t\t\t\t// This is a tag object - clean up any rogue root-level properties\n\t\t\t\t\t\tconst { created, description, ...cleanTagData } = value;\n\n\t\t\t\t\t\t// Only keep the description if there's no metadata.description\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\tdescription &&\n\t\t\t\t\t\t\t(!cleanTagData.metadata || !cleanTagData.metadata.description)\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\tcleanTagData.description = description;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tfinalCleanData[key] = cleanTagData;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfinalCleanData[key] = value;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcleanData = finalCleanData;\n\t\t\t}\n\t\t}\n\n\t\tfs.writeFileSync(filepath, JSON.stringify(cleanData, null, 2), 'utf8');\n\n\t\tif (isDebug) {\n\t\t\tconsole.log(`writeJSON: Successfully wrote to ${filepath}`);\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `Error writing JSON file ${filepath}:`, error.message);\n\t\tif (isDebug) {\n\t\t\tlog('error', 'Full error details:', error);\n\t\t}\n\t}\n}\n\n/**\n * Sanitizes a prompt string for use in a shell command\n * @param {string} prompt The prompt to sanitize\n * @returns {string} Sanitized prompt\n */\nfunction sanitizePrompt(prompt) {\n\t// Replace double quotes with escaped double quotes\n\treturn prompt.replace(/\"/g, '\\\\\"');\n}\n\n/**\n * Reads the complexity report from file\n * @param {string} customPath - Optional custom path to the report\n * @returns {Object|null} The parsed complexity report or null if not found\n */\nfunction readComplexityReport(customPath = null) {\n\t// GUARD: Prevent circular dependency during config loading\n\tlet isDebug = false; // Default fallback\n\ttry {\n\t\t// Only try to get debug flag if we're not in the middle of config loading\n\t\tisDebug = getDebugFlag();\n\t} catch (error) {\n\t\t// If getDebugFlag() fails (likely due to circular dependency),\n\t\t// use default false and continue\n\t\tisDebug = false;\n\t}\n\n\ttry {\n\t\tlet reportPath;\n\t\tif (customPath) {\n\t\t\treportPath = customPath;\n\t\t} else {\n\t\t\t// Try new location first, then fall back to legacy\n\t\t\tconst newPath = path.join(process.cwd(), COMPLEXITY_REPORT_FILE);\n\t\t\tconst legacyPath = path.join(\n\t\t\t\tprocess.cwd(),\n\t\t\t\tLEGACY_COMPLEXITY_REPORT_FILE\n\t\t\t);\n\n\t\t\treportPath = fs.existsSync(newPath) ? newPath : legacyPath;\n\t\t}\n\n\t\tif (!fs.existsSync(reportPath)) {\n\t\t\tif (isDebug) {\n\t\t\t\tlog('debug', `Complexity report not found at ${reportPath}`);\n\t\t\t}\n\t\t\treturn null;\n\t\t}\n\n\t\tconst reportData = readJSON(reportPath);\n\t\tif (isDebug) {\n\t\t\tlog('debug', `Successfully read complexity report from ${reportPath}`);\n\t\t}\n\t\treturn reportData;\n\t} catch (error) {\n\t\tif (isDebug) {\n\t\t\tlog('error', `Error reading complexity report: ${error.message}`);\n\t\t}\n\t\treturn null;\n\t}\n}\n\n/**\n * Finds a task analysis in the complexity report\n * @param {Object} report - The complexity report\n * @param {number} taskId - The task ID to find\n * @returns {Object|null} The task analysis or null if not found\n */\nfunction findTaskInComplexityReport(report, taskId) {\n\tif (\n\t\t!report ||\n\t\t!report.complexityAnalysis ||\n\t\t!Array.isArray(report.complexityAnalysis)\n\t) {\n\t\treturn null;\n\t}\n\n\treturn report.complexityAnalysis.find((task) => task.taskId === taskId);\n}\n\nfunction addComplexityToTask(task, complexityReport) {\n\tlet taskId;\n\tif (task.isSubtask) {\n\t\ttaskId = task.parentTask.id;\n\t} else if (task.parentId) {\n\t\ttaskId = task.parentId;\n\t} else {\n\t\ttaskId = task.id;\n\t}\n\n\tconst taskAnalysis = findTaskInComplexityReport(complexityReport, taskId);\n\tif (taskAnalysis) {\n\t\ttask.complexityScore = taskAnalysis.complexityScore;\n\t}\n}\n\n/**\n * Checks if a task exists in the tasks array\n * @param {Array} tasks - The tasks array\n * @param {string|number} taskId - The task ID to check\n * @returns {boolean} True if the task exists, false otherwise\n */\nfunction taskExists(tasks, taskId) {\n\tif (!taskId || !tasks || !Array.isArray(tasks)) {\n\t\treturn false;\n\t}\n\n\t// Handle both regular task IDs and subtask IDs (e.g., \"1.2\")\n\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\tconst [parentId, subtaskId] = taskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\treturn false;\n\t\t}\n\n\t\treturn parentTask.subtasks.some((st) => st.id === subtaskId);\n\t}\n\n\tconst id = parseInt(taskId, 10);\n\treturn tasks.some((t) => t.id === id);\n}\n\n/**\n * Formats a task ID as a string\n * @param {string|number} id - The task ID to format\n * @returns {string} The formatted task ID\n */\nfunction formatTaskId(id) {\n\tif (typeof id === 'string' && id.includes('.')) {\n\t\treturn id; // Already formatted as a string with a dot (e.g., \"1.2\")\n\t}\n\n\tif (typeof id === 'number') {\n\t\treturn id.toString();\n\t}\n\n\treturn id;\n}\n\n/**\n * Finds a task by ID in the tasks array. Optionally filters subtasks by status.\n * @param {Array} tasks - The tasks array\n * @param {string|number} taskId - The task ID to find\n * @param {Object|null} complexityReport - Optional pre-loaded complexity report\n * @param {string} [statusFilter] - Optional status to filter subtasks by\n * @returns {{task: Object|null, originalSubtaskCount: number|null, originalSubtasks: Array|null}} The task object (potentially with filtered subtasks), the original subtask count, and original subtasks array if filtered, or nulls if not found.\n */\nfunction findTaskById(\n\ttasks,\n\ttaskId,\n\tcomplexityReport = null,\n\tstatusFilter = null\n) {\n\tif (!taskId || !tasks || !Array.isArray(tasks)) {\n\t\treturn { task: null, originalSubtaskCount: null };\n\t}\n\n\t// Check if it's a subtask ID (e.g., \"1.2\")\n\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\t// If looking for a subtask, statusFilter doesn't apply directly here.\n\t\tconst [parentId, subtaskId] = taskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\treturn { task: null, originalSubtaskCount: null, originalSubtasks: null };\n\t\t}\n\n\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\tif (subtask) {\n\t\t\t// Add reference to parent task for context\n\t\t\tsubtask.parentTask = {\n\t\t\t\tid: parentTask.id,\n\t\t\t\ttitle: parentTask.title,\n\t\t\t\tstatus: parentTask.status\n\t\t\t};\n\t\t\tsubtask.isSubtask = true;\n\t\t}\n\n\t\t// If we found a task, check for complexity data\n\t\tif (subtask && complexityReport) {\n\t\t\taddComplexityToTask(subtask, complexityReport);\n\t\t}\n\n\t\treturn {\n\t\t\ttask: subtask || null,\n\t\t\toriginalSubtaskCount: null,\n\t\t\toriginalSubtasks: null\n\t\t};\n\t}\n\n\tlet taskResult = null;\n\tlet originalSubtaskCount = null;\n\tlet originalSubtasks = null;\n\n\t// Find the main task\n\tconst id = parseInt(taskId, 10);\n\tconst task = tasks.find((t) => t.id === id) || null;\n\n\t// If task not found, return nulls\n\tif (!task) {\n\t\treturn { task: null, originalSubtaskCount: null, originalSubtasks: null };\n\t}\n\n\ttaskResult = task;\n\n\t// If task found and statusFilter provided, filter its subtasks\n\tif (statusFilter && task.subtasks && Array.isArray(task.subtasks)) {\n\t\t// Store original subtasks and count before filtering\n\t\toriginalSubtasks = [...task.subtasks]; // Clone the original subtasks array\n\t\toriginalSubtaskCount = task.subtasks.length;\n\n\t\t// Clone the task to avoid modifying the original array\n\t\tconst filteredTask = { ...task };\n\t\tfilteredTask.subtasks = task.subtasks.filter(\n\t\t\t(subtask) =>\n\t\t\t\tsubtask.status &&\n\t\t\t\tsubtask.status.toLowerCase() === statusFilter.toLowerCase()\n\t\t);\n\n\t\ttaskResult = filteredTask;\n\t}\n\n\t// If task found and complexityReport provided, add complexity data\n\tif (taskResult && complexityReport) {\n\t\taddComplexityToTask(taskResult, complexityReport);\n\t}\n\n\t// Return the found task, original subtask count, and original subtasks\n\treturn { task: taskResult, originalSubtaskCount, originalSubtasks };\n}\n\n/**\n * Truncates text to a specified length\n * @param {string} text - The text to truncate\n * @param {number} maxLength - The maximum length\n * @returns {string} The truncated text\n */\nfunction truncate(text, maxLength) {\n\tif (!text || text.length <= maxLength) {\n\t\treturn text;\n\t}\n\n\treturn `${text.slice(0, maxLength - 3)}...`;\n}\n\n/**\n * Checks if array or object are empty\n * @param {*} value - The value to check\n * @returns {boolean} True if empty, false otherwise\n */\nfunction isEmpty(value) {\n\tif (Array.isArray(value)) {\n\t\treturn value.length === 0;\n\t} else if (typeof value === 'object' && value !== null) {\n\t\treturn Object.keys(value).length === 0;\n\t}\n\n\treturn false; // Not an array or object, or is null\n}\n\n/**\n * Find cycles in a dependency graph using DFS\n * @param {string} subtaskId - Current subtask ID\n * @param {Map} dependencyMap - Map of subtask IDs to their dependencies\n * @param {Set} visited - Set of visited nodes\n * @param {Set} recursionStack - Set of nodes in current recursion stack\n * @returns {Array} - List of dependency edges that need to be removed to break cycles\n */\nfunction findCycles(\n\tsubtaskId,\n\tdependencyMap,\n\tvisited = new Set(),\n\trecursionStack = new Set(),\n\tpath = []\n) {\n\t// Mark the current node as visited and part of recursion stack\n\tvisited.add(subtaskId);\n\trecursionStack.add(subtaskId);\n\tpath.push(subtaskId);\n\n\tconst cyclesToBreak = [];\n\n\t// Get all dependencies of the current subtask\n\tconst dependencies = dependencyMap.get(subtaskId) || [];\n\n\t// For each dependency\n\tfor (const depId of dependencies) {\n\t\t// If not visited, recursively check for cycles\n\t\tif (!visited.has(depId)) {\n\t\t\tconst cycles = findCycles(depId, dependencyMap, visited, recursionStack, [\n\t\t\t\t...path\n\t\t\t]);\n\t\t\tcyclesToBreak.push(...cycles);\n\t\t}\n\t\t// If the dependency is in the recursion stack, we found a cycle\n\t\telse if (recursionStack.has(depId)) {\n\t\t\t// Find the position of the dependency in the path\n\t\t\tconst cycleStartIndex = path.indexOf(depId);\n\t\t\t// The last edge in the cycle is what we want to remove\n\t\t\tconst cycleEdges = path.slice(cycleStartIndex);\n\t\t\t// We'll remove the last edge in the cycle (the one that points back)\n\t\t\tcyclesToBreak.push(depId);\n\t\t}\n\t}\n\n\t// Remove the node from recursion stack before returning\n\trecursionStack.delete(subtaskId);\n\n\treturn cyclesToBreak;\n}\n\n/**\n * Convert a string from camelCase to kebab-case\n * @param {string} str - The string to convert\n * @returns {string} The kebab-case version of the string\n */\nconst toKebabCase = (str) => {\n\t// Special handling for common acronyms\n\tconst withReplacedAcronyms = str\n\t\t.replace(/ID/g, 'Id')\n\t\t.replace(/API/g, 'Api')\n\t\t.replace(/UI/g, 'Ui')\n\t\t.replace(/URL/g, 'Url')\n\t\t.replace(/URI/g, 'Uri')\n\t\t.replace(/JSON/g, 'Json')\n\t\t.replace(/XML/g, 'Xml')\n\t\t.replace(/HTML/g, 'Html')\n\t\t.replace(/CSS/g, 'Css');\n\n\t// Insert hyphens before capital letters and convert to lowercase\n\treturn withReplacedAcronyms\n\t\t.replace(/([A-Z])/g, '-$1')\n\t\t.toLowerCase()\n\t\t.replace(/^-/, ''); // Remove leading hyphen if present\n};\n\n/**\n * Detect camelCase flags in command arguments\n * @param {string[]} args - Command line arguments to check\n * @returns {Array<{original: string, kebabCase: string}>} - List of flags that should be converted\n */\nfunction detectCamelCaseFlags(args) {\n\tconst camelCaseFlags = [];\n\tfor (const arg of args) {\n\t\tif (arg.startsWith('--')) {\n\t\t\tconst flagName = arg.split('=')[0].slice(2); // Remove -- and anything after =\n\n\t\t\t// Skip single-word flags - they can't be camelCase\n\t\t\tif (!flagName.includes('-') && !/[A-Z]/.test(flagName)) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Check for camelCase pattern (lowercase followed by uppercase)\n\t\t\tif (/[a-z][A-Z]/.test(flagName)) {\n\t\t\t\tconst kebabVersion = toKebabCase(flagName);\n\t\t\t\tif (kebabVersion !== flagName) {\n\t\t\t\t\tcamelCaseFlags.push({\n\t\t\t\t\t\toriginal: flagName,\n\t\t\t\t\t\tkebabCase: kebabVersion\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn camelCaseFlags;\n}\n\n/**\n * Aggregates an array of telemetry objects into a single summary object.\n * @param {Array<Object>} telemetryArray - Array of telemetryData objects.\n * @param {string} overallCommandName - The name for the aggregated command.\n * @returns {Object|null} Aggregated telemetry object or null if input is empty.\n */\nfunction aggregateTelemetry(telemetryArray, overallCommandName) {\n\tif (!telemetryArray || telemetryArray.length === 0) {\n\t\treturn null;\n\t}\n\n\tconst aggregated = {\n\t\ttimestamp: new Date().toISOString(), // Use current time for aggregation time\n\t\tuserId: telemetryArray[0].userId, // Assume userId is consistent\n\t\tcommandName: overallCommandName,\n\t\tmodelUsed: 'Multiple', // Default if models vary\n\t\tproviderName: 'Multiple', // Default if providers vary\n\t\tinputTokens: 0,\n\t\toutputTokens: 0,\n\t\ttotalTokens: 0,\n\t\ttotalCost: 0,\n\t\tcurrency: telemetryArray[0].currency || 'USD' // Assume consistent currency or default\n\t};\n\n\tconst uniqueModels = new Set();\n\tconst uniqueProviders = new Set();\n\tconst uniqueCurrencies = new Set();\n\n\ttelemetryArray.forEach((item) => {\n\t\taggregated.inputTokens += item.inputTokens || 0;\n\t\taggregated.outputTokens += item.outputTokens || 0;\n\t\taggregated.totalCost += item.totalCost || 0;\n\t\tuniqueModels.add(item.modelUsed);\n\t\tuniqueProviders.add(item.providerName);\n\t\tuniqueCurrencies.add(item.currency || 'USD');\n\t});\n\n\taggregated.totalTokens = aggregated.inputTokens + aggregated.outputTokens;\n\taggregated.totalCost = parseFloat(aggregated.totalCost.toFixed(6)); // Fix precision\n\n\tif (uniqueModels.size === 1) {\n\t\taggregated.modelUsed = [...uniqueModels][0];\n\t}\n\tif (uniqueProviders.size === 1) {\n\t\taggregated.providerName = [...uniqueProviders][0];\n\t}\n\tif (uniqueCurrencies.size > 1) {\n\t\taggregated.currency = 'Multiple'; // Mark if currencies actually differ\n\t} else if (uniqueCurrencies.size === 1) {\n\t\taggregated.currency = [...uniqueCurrencies][0];\n\t}\n\n\treturn aggregated;\n}\n\n/**\n * @deprecated Use TaskMaster.getCurrentTag() instead\n * Gets the current tag from state.json or falls back to defaultTag from config\n * @param {string} projectRoot - The project root directory (required)\n * @returns {string} The current tag name\n */\nfunction getCurrentTag(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getCurrentTag');\n\t}\n\n\ttry {\n\t\t// Try to read current tag from state.json using fs directly\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\t\tif (fs.existsSync(statePath)) {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tconst stateData = JSON.parse(rawState);\n\t\t\tif (stateData && stateData.currentTag) {\n\t\t\t\treturn stateData.currentTag;\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\t// Ignore errors, fall back to default\n\t}\n\n\t// Fall back to defaultTag from config using fs directly\n\ttry {\n\t\tconst configPath = path.join(projectRoot, '.taskmaster', 'config.json');\n\t\tif (fs.existsSync(configPath)) {\n\t\t\tconst rawConfig = fs.readFileSync(configPath, 'utf8');\n\t\t\tconst configData = JSON.parse(rawConfig);\n\t\t\tif (configData && configData.global && configData.global.defaultTag) {\n\t\t\t\treturn configData.global.defaultTag;\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\t// Ignore errors, use hardcoded default\n\t}\n\n\t// Final fallback\n\treturn 'master';\n}\n\n/**\n * Resolves the tag to use based on options\n * @param {Object} options - Options object\n * @param {string} options.projectRoot - The project root directory (required)\n * @param {string} [options.tag] - Explicit tag to use\n * @returns {string} The resolved tag name\n */\nfunction resolveTag(options = {}) {\n\tconst { projectRoot, tag } = options;\n\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for resolveTag');\n\t}\n\n\t// If explicit tag provided, use it\n\tif (tag) {\n\t\treturn tag;\n\t}\n\n\t// Otherwise get current tag from state/config\n\treturn getCurrentTag(projectRoot);\n}\n\n/**\n * Gets the tasks array for a specific tag from tagged tasks.json data\n * @param {Object} data - The parsed tasks.json data (after migration)\n * @param {string} tagName - The tag name to get tasks for\n * @returns {Array} The tasks array for the specified tag, or empty array if not found\n */\nfunction getTasksForTag(data, tagName) {\n\tif (!data || !tagName) {\n\t\treturn [];\n\t}\n\n\t// Handle migrated format: { \"master\": { \"tasks\": [...] }, \"otherTag\": { \"tasks\": [...] } }\n\tif (\n\t\tdata[tagName] &&\n\t\tdata[tagName].tasks &&\n\t\tArray.isArray(data[tagName].tasks)\n\t) {\n\t\treturn data[tagName].tasks;\n\t}\n\n\treturn [];\n}\n\n/**\n * Sets the tasks array for a specific tag in the data structure\n * @param {Object} data - The tasks.json data object\n * @param {string} tagName - The tag name to set tasks for\n * @param {Array} tasks - The tasks array to set\n * @returns {Object} The updated data object\n */\nfunction setTasksForTag(data, tagName, tasks) {\n\tif (!data) {\n\t\tdata = {};\n\t}\n\n\tif (!data[tagName]) {\n\t\tdata[tagName] = {};\n\t}\n\n\tdata[tagName].tasks = tasks || [];\n\treturn data;\n}\n\n/**\n * Flatten tasks array to include subtasks as individual searchable items\n * @param {Array} tasks - Array of task objects\n * @returns {Array} Flattened array including both tasks and subtasks\n */\nfunction flattenTasksWithSubtasks(tasks) {\n\tconst flattened = [];\n\n\tfor (const task of tasks) {\n\t\t// Add the main task\n\t\tflattened.push({\n\t\t\t...task,\n\t\t\tsearchableId: task.id.toString(), // For consistent ID handling\n\t\t\tisSubtask: false\n\t\t});\n\n\t\t// Add subtasks if they exist\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tfor (const subtask of task.subtasks) {\n\t\t\t\tflattened.push({\n\t\t\t\t\t...subtask,\n\t\t\t\t\tsearchableId: `${task.id}.${subtask.id}`, // Format: \"15.2\"\n\t\t\t\t\tisSubtask: true,\n\t\t\t\t\tparentId: task.id,\n\t\t\t\t\tparentTitle: task.title,\n\t\t\t\t\t// Enhance subtask context with parent information\n\t\t\t\t\ttitle: `${subtask.title} (subtask of: ${task.title})`,\n\t\t\t\t\tdescription: `${subtask.description} [Parent: ${task.description}]`\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn flattened;\n}\n\n/**\n * Ensures the tag object has a metadata object with created/updated timestamps.\n * @param {Object} tagObj - The tag object (e.g., data['master'])\n * @param {Object} [opts] - Optional fields (e.g., description, skipUpdate)\n * @param {string} [opts.description] - Description for the tag\n * @param {boolean} [opts.skipUpdate] - If true, don't update the 'updated' timestamp\n * @returns {Object} The updated tag object (for chaining)\n */\nfunction ensureTagMetadata(tagObj, opts = {}) {\n\tif (!tagObj || typeof tagObj !== 'object') {\n\t\tthrow new Error('tagObj must be a valid object');\n\t}\n\n\tconst now = new Date().toISOString();\n\n\tif (!tagObj.metadata) {\n\t\t// Create new metadata object\n\t\ttagObj.metadata = {\n\t\t\tcreated: now,\n\t\t\tupdated: now,\n\t\t\t...(opts.description ? { description: opts.description } : {})\n\t\t};\n\t} else {\n\t\t// Ensure existing metadata has required fields\n\t\tif (!tagObj.metadata.created) {\n\t\t\ttagObj.metadata.created = now;\n\t\t}\n\n\t\t// Update timestamp unless explicitly skipped\n\t\tif (!opts.skipUpdate) {\n\t\t\ttagObj.metadata.updated = now;\n\t\t}\n\n\t\t// Add description if provided and not already present\n\t\tif (opts.description && !tagObj.metadata.description) {\n\t\t\ttagObj.metadata.description = opts.description;\n\t\t}\n\t}\n\n\treturn tagObj;\n}\n\n// Export all utility functions and configuration\nexport {\n\tLOG_LEVELS,\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tsanitizePrompt,\n\treadComplexityReport,\n\tfindTaskInComplexityReport,\n\ttaskExists,\n\tformatTaskId,\n\tfindTaskById,\n\ttruncate,\n\tisEmpty,\n\tfindCycles,\n\ttoKebabCase,\n\tdetectCamelCaseFlags,\n\tdisableSilentMode,\n\tenableSilentMode,\n\tgetTaskManager,\n\tisSilentMode,\n\taddComplexityToTask,\n\tresolveEnvVariable,\n\tfindProjectRoot,\n\tgetTagAwareFilePath,\n\tslugifyTagForFilePath,\n\taggregateTelemetry,\n\tgetCurrentTag,\n\tresolveTag,\n\tgetTasksForTag,\n\tsetTasksForTag,\n\tperformCompleteTagMigration,\n\tmigrateConfigJson,\n\tcreateStateJson,\n\tmarkMigrationForNotice,\n\tflattenTasksWithSubtasks,\n\tensureTagMetadata\n};\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-subtask.js", "/**\n * Direct function wrapper for addSubtask\n */\n\nimport { addSubtask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Add a subtask to an existing task\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Parent task ID\n * @param {string} [args.taskId] - Existing task ID to convert to subtask (optional)\n * @param {string} [args.title] - Title for new subtask (when creating a new subtask)\n * @param {string} [args.description] - Description for new subtask\n * @param {string} [args.details] - Implementation details for new subtask\n * @param {string} [args.status] - Status for new subtask (default: 'pending')\n * @param {string} [args.dependencies] - Comma-separated list of dependency IDs\n * @param {boolean} [args.skipGenerate] - Skip regenerating task files\n * @param {string} [args.projectRoot] - Project root directory\n * @param {string} [args.tag] - Tag for the task\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: string}>}\n */\nexport async function addSubtaskDirect(args, log) {\n\t// Destructure expected args\n\tconst {\n\t\ttasksJsonPath,\n\t\tid,\n\t\ttaskId,\n\t\ttitle,\n\t\tdescription,\n\t\tdetails,\n\t\tstatus,\n\t\tdependencies: dependenciesStr,\n\t\tskipGenerate,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\ttry {\n\t\tlog.info(`Adding subtask with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addSubtaskDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!id) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Parent task ID is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Either taskId or title must be provided\n\t\tif (!taskId && !title) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Either taskId or title must be provided'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Parse dependencies if provided\n\t\tlet dependencies = [];\n\t\tif (dependenciesStr) {\n\t\t\tdependencies = dependenciesStr.split(',').map((depId) => {\n\t\t\t\t// Handle both regular IDs and dot notation\n\t\t\t\treturn depId.includes('.') ? depId.trim() : parseInt(depId.trim(), 10);\n\t\t\t});\n\t\t}\n\n\t\t// Convert existingTaskId to a number if provided\n\t\tconst existingTaskId = taskId ? parseInt(taskId, 10) : null;\n\n\t\t// Convert parent ID to a number\n\t\tconst parentId = parseInt(id, 10);\n\n\t\t// Determine if we should generate files\n\t\tconst generateFiles = !skipGenerate;\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tconst context = { projectRoot, tag };\n\n\t\t// Case 1: Convert existing task to subtask\n\t\tif (existingTaskId) {\n\t\t\tlog.info(`Converting task ${existingTaskId} to a subtask of ${parentId}`);\n\t\t\tconst result = await addSubtask(\n\t\t\t\ttasksPath,\n\t\t\t\tparentId,\n\t\t\t\texistingTaskId,\n\t\t\t\tnull,\n\t\t\t\tgenerateFiles,\n\t\t\t\tcontext\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`,\n\t\t\t\t\tsubtask: result\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\t// Case 2: Create new subtask\n\t\telse {\n\t\t\tlog.info(`Creating new subtask for parent task ${parentId}`);\n\n\t\t\tconst newSubtaskData = {\n\t\t\t\ttitle: title,\n\t\t\t\tdescription: description || '',\n\t\t\t\tdetails: details || '',\n\t\t\t\tstatus: status || 'pending',\n\t\t\t\tdependencies: dependencies\n\t\t\t};\n\n\t\t\tconst result = await addSubtask(\n\t\t\t\ttasksPath,\n\t\t\t\tparentId,\n\t\t\t\tnull,\n\t\t\t\tnewSubtaskData,\n\t\t\t\tgenerateFiles,\n\t\t\t\tcontext\n\t\t\t);\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `New subtask ${parentId}.${result.id} successfully created`,\n\t\t\t\t\tsubtask: result\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addSubtaskDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/tag-management.js", "import path from 'path';\nimport fs from 'fs';\nimport inquirer from 'inquirer';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tgetCurrentTag,\n\tresolveTag,\n\tgetTasksForTag,\n\tsetTasksForTag,\n\tfindProjectRoot,\n\ttruncate\n} from '../utils.js';\nimport { displayBanner, getStatusWithColor } from '../ui.js';\nimport findNextTask from './find-next-task.js';\n\n/**\n * Create a new tag context\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} tagName - Name of the new tag to create\n * @param {Object} options - Options object\n * @param {boolean} [options.copyFromCurrent=false] - Whether to copy tasks from current tag\n * @param {string} [options.copyFromTag] - Specific tag to copy tasks from\n * @param {string} [options.description] - Optional description for the tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with tag creation details\n */\nasync function createTag(\n\ttasksPath,\n\ttagName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { copyFromCurrent = false, copyFromTag, description } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate tag name\n\t\tif (!tagName || typeof tagName !== 'string') {\n\t\t\tthrow new Error('Tag name is required and must be a string');\n\t\t}\n\n\t\t// Validate tag name format (alphanumeric, hyphens, underscores only)\n\t\tif (!/^[a-zA-Z0-9_-]+$/.test(tagName)) {\n\t\t\tthrow new Error(\n\t\t\t\t'Tag name can only contain letters, numbers, hyphens, and underscores'\n\t\t\t);\n\t\t}\n\n\t\t// Reserved tag names\n\t\tconst reservedNames = ['master', 'main', 'default'];\n\t\tif (reservedNames.includes(tagName.toLowerCase())) {\n\t\t\tthrow new Error(`\"${tagName}\" is a reserved tag name`);\n\t\t}\n\n\t\tlogFn.info(`Creating new tag: ${tagName}`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations - ensure we get the actual tagged structure\n\t\tlet rawData;\n\t\tif (data._rawTaggedData) {\n\t\t\t// If we have _rawTaggedData, use it (this is the clean tagged structure)\n\t\t\trawData = data._rawTaggedData;\n\t\t} else if (data.tasks && !data.master) {\n\t\t\t// This is legacy format - create a master tag structure\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: data.tasks,\n\t\t\t\t\tmetadata: data.metadata || {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Tasks live here by default'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// This is already in tagged format, use it directly but exclude internal fields\n\t\t\trawData = {};\n\t\t\tfor (const [key, value] of Object.entries(data)) {\n\t\t\t\tif (key !== '_rawTaggedData' && key !== 'tag') {\n\t\t\t\t\trawData[key] = value;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check if tag already exists\n\t\tif (rawData[tagName]) {\n\t\t\tthrow new Error(`Tag \"${tagName}\" already exists`);\n\t\t}\n\n\t\t// Determine source for copying tasks (only if explicitly requested)\n\t\tlet sourceTasks = [];\n\t\tif (copyFromCurrent || copyFromTag) {\n\t\t\tconst sourceTag = copyFromTag || getCurrentTag(projectRoot);\n\t\t\tsourceTasks = getTasksForTag(rawData, sourceTag);\n\n\t\t\tif (copyFromTag && sourceTasks.length === 0) {\n\t\t\t\tlogFn.warn(`Source tag \"${copyFromTag}\" not found or has no tasks`);\n\t\t\t}\n\n\t\t\tlogFn.info(`Copying ${sourceTasks.length} tasks from tag \"${sourceTag}\"`);\n\t\t} else {\n\t\t\tlogFn.info('Creating empty tag (no tasks copied)');\n\t\t}\n\n\t\t// Create the new tag structure in raw data\n\t\trawData[tagName] = {\n\t\t\ttasks: [...sourceTasks], // Create a copy of the tasks array\n\t\t\tmetadata: {\n\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created on ${new Date().toLocaleDateString()}`\n\t\t\t}\n\t\t};\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\tlogFn.success(`Successfully created tag \"${tagName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\ttagName,\n\t\t\t\tcreated: true,\n\t\t\t\ttasksCopied: sourceTasks.length,\n\t\t\t\tsourceTag:\n\t\t\t\t\tcopyFromCurrent || copyFromTag\n\t\t\t\t\t\t? copyFromTag || getCurrentTag(projectRoot)\n\t\t\t\t\t\t: null,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created on ${new Date().toLocaleDateString()}`\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Created Successfully') +\n\t\t\t\t\t\t`\\n\\nTag Name: ${chalk.cyan(tagName)}` +\n\t\t\t\t\t\t`\\nTasks Copied: ${chalk.yellow(sourceTasks.length)}` +\n\t\t\t\t\t\t(copyFromCurrent || copyFromTag\n\t\t\t\t\t\t\t? `\\nSource Tag: ${chalk.cyan(copyFromTag || getCurrentTag(projectRoot))}`\n\t\t\t\t\t\t\t: '') +\n\t\t\t\t\t\t(description ? `\\nDescription: ${chalk.gray(description)}` : ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\ttagName,\n\t\t\tcreated: true,\n\t\t\ttasksCopied: sourceTasks.length,\n\t\t\tsourceTag:\n\t\t\t\tcopyFromCurrent || copyFromTag\n\t\t\t\t\t? copyFromTag || getCurrentTag(projectRoot)\n\t\t\t\t\t: null,\n\t\t\tdescription:\n\t\t\t\tdescription || `Tag created on ${new Date().toLocaleDateString()}`\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error creating tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Delete an existing tag\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} tagName - Name of the tag to delete\n * @param {Object} options - Options object\n * @param {boolean} [options.yes=false] - Skip confirmation prompts\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with deletion details\n */\nasync function deleteTag(\n\ttasksPath,\n\ttagName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { yes = false } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate tag name\n\t\tif (!tagName || typeof tagName !== 'string') {\n\t\t\tthrow new Error('Tag name is required and must be a string');\n\t\t}\n\n\t\t// Prevent deletion of master tag\n\t\tif (tagName === 'master') {\n\t\t\tthrow new Error('Cannot delete the \"master\" tag');\n\t\t}\n\n\t\tlogFn.info(`Deleting tag: ${tagName}`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations - ensure we get the actual tagged structure\n\t\tlet rawData;\n\t\tif (data._rawTaggedData) {\n\t\t\t// If we have _rawTaggedData, use it (this is the clean tagged structure)\n\t\t\trawData = data._rawTaggedData;\n\t\t} else if (data.tasks && !data.master) {\n\t\t\t// This is legacy format - create a master tag structure\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: data.tasks,\n\t\t\t\t\tmetadata: data.metadata || {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Tasks live here by default'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// This is already in tagged format, use it directly but exclude internal fields\n\t\t\trawData = {};\n\t\t\tfor (const [key, value] of Object.entries(data)) {\n\t\t\t\tif (key !== '_rawTaggedData' && key !== 'tag') {\n\t\t\t\t\trawData[key] = value;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check if tag exists\n\t\tif (!rawData[tagName]) {\n\t\t\tthrow new Error(`Tag \"${tagName}\" does not exist`);\n\t\t}\n\n\t\t// Get current tag to check if we're deleting the active tag\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\t\tconst isCurrentTag = currentTag === tagName;\n\n\t\t// Get task count for confirmation\n\t\tconst tasks = getTasksForTag(rawData, tagName);\n\t\tconst taskCount = tasks.length;\n\n\t\t// If not forced and has tasks, require confirmation (for CLI)\n\t\tif (!yes && taskCount > 0 && outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.yellow.bold('⚠ WARNING: Tag Deletion') +\n\t\t\t\t\t\t`\\n\\nYou are about to delete tag \"${chalk.cyan(tagName)}\"` +\n\t\t\t\t\t\t`\\nThis will permanently delete ${chalk.red.bold(taskCount)} tasks` +\n\t\t\t\t\t\t'\\n\\nThis action cannot be undone!',\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// First confirmation\n\t\t\tconst firstConfirm = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'confirm',\n\t\t\t\t\tname: 'proceed',\n\t\t\t\t\tmessage: `Are you sure you want to delete tag \"${tagName}\" and its ${taskCount} tasks?`,\n\t\t\t\t\tdefault: false\n\t\t\t\t}\n\t\t\t]);\n\n\t\t\tif (!firstConfirm.proceed) {\n\t\t\t\tlogFn.info('Tag deletion cancelled by user');\n\t\t\t\tthrow new Error('Tag deletion cancelled');\n\t\t\t}\n\n\t\t\t// Second confirmation (double-check)\n\t\t\tconst secondConfirm = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'tagNameConfirm',\n\t\t\t\t\tmessage: `To confirm deletion, please type the tag name \"${tagName}\":`,\n\t\t\t\t\tvalidate: (input) => {\n\t\t\t\t\t\tif (input === tagName) {\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn `Please type exactly \"${tagName}\" to confirm deletion`;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t]);\n\n\t\t\tif (secondConfirm.tagNameConfirm !== tagName) {\n\t\t\t\tlogFn.info('Tag deletion cancelled - incorrect tag name confirmation');\n\t\t\t\tthrow new Error('Tag deletion cancelled');\n\t\t\t}\n\n\t\t\tlogFn.info('Double confirmation received, proceeding with deletion...');\n\t\t}\n\n\t\t// Delete the tag\n\t\tdelete rawData[tagName];\n\n\t\t// If we're deleting the current tag, switch to master\n\t\tif (isCurrentTag) {\n\t\t\tawait switchCurrentTag(projectRoot, 'master');\n\t\t\tlogFn.info('Switched current tag to \"master\"');\n\t\t}\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\tlogFn.success(`Successfully deleted tag \"${tagName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\ttagName,\n\t\t\t\tdeleted: true,\n\t\t\t\ttasksDeleted: taskCount,\n\t\t\t\twasCurrentTag: isCurrentTag,\n\t\t\t\tswitchedToMaster: isCurrentTag\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.red.bold('✓ Tag Deleted Successfully') +\n\t\t\t\t\t\t`\\n\\nTag Name: ${chalk.cyan(tagName)}` +\n\t\t\t\t\t\t`\\nTasks Deleted: ${chalk.yellow(taskCount)}` +\n\t\t\t\t\t\t(isCurrentTag\n\t\t\t\t\t\t\t? `\\n${chalk.yellow('⚠ Switched current tag to \"master\"')}`\n\t\t\t\t\t\t\t: ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\ttagName,\n\t\t\tdeleted: true,\n\t\t\ttasksDeleted: taskCount,\n\t\t\twasCurrentTag: isCurrentTag,\n\t\t\tswitchedToMaster: isCurrentTag\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error deleting tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Enhance existing tags with metadata if they don't have it\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} rawData - The raw tagged data\n * @param {Object} context - Context object\n * @returns {Promise<boolean>} True if any tags were enhanced\n */\nasync function enhanceTagsWithMetadata(tasksPath, rawData, context = {}) {\n\tlet enhanced = false;\n\n\ttry {\n\t\t// Get file stats for creation date fallback\n\t\tlet fileCreatedDate;\n\t\ttry {\n\t\t\tconst stats = fs.statSync(tasksPath);\n\t\t\tfileCreatedDate =\n\t\t\t\tstats.birthtime < stats.mtime ? stats.birthtime : stats.mtime;\n\t\t} catch (error) {\n\t\t\tfileCreatedDate = new Date();\n\t\t}\n\n\t\tfor (const [tagName, tagData] of Object.entries(rawData)) {\n\t\t\t// Skip non-tag properties\n\t\t\tif (\n\t\t\t\ttagName === 'tasks' ||\n\t\t\t\ttagName === 'tag' ||\n\t\t\t\ttagName === '_rawTaggedData' ||\n\t\t\t\t!tagData ||\n\t\t\t\ttypeof tagData !== 'object' ||\n\t\t\t\t!Array.isArray(tagData.tasks)\n\t\t\t) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Check if tag needs metadata enhancement\n\t\t\tif (!tagData.metadata) {\n\t\t\t\ttagData.metadata = {};\n\t\t\t\tenhanced = true;\n\t\t\t}\n\n\t\t\t// Add missing metadata fields\n\t\t\tif (!tagData.metadata.created) {\n\t\t\t\ttagData.metadata.created = fileCreatedDate.toISOString();\n\t\t\t\tenhanced = true;\n\t\t\t}\n\n\t\t\tif (!tagData.metadata.description) {\n\t\t\t\tif (tagName === 'master') {\n\t\t\t\t\ttagData.metadata.description = 'Tasks live here by default';\n\t\t\t\t} else {\n\t\t\t\t\ttagData.metadata.description = `Tag created on ${new Date(tagData.metadata.created).toLocaleDateString()}`;\n\t\t\t\t}\n\t\t\t\tenhanced = true;\n\t\t\t}\n\n\t\t\t// Add updated field if missing (set to created date initially)\n\t\t\tif (!tagData.metadata.updated) {\n\t\t\t\ttagData.metadata.updated = tagData.metadata.created;\n\t\t\t\tenhanced = true;\n\t\t\t}\n\t\t}\n\n\t\t// If we enhanced any tags, write the data back\n\t\tif (enhanced) {\n\t\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\t\tconst cleanData = {};\n\t\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\t\tcleanData[key] = value;\n\t\t\t\t}\n\t\t\t}\n\t\t\twriteJSON(tasksPath, cleanData, context.projectRoot);\n\t\t}\n\t} catch (error) {\n\t\t// Don't throw - just log and continue\n\t\tconst logFn = context.mcpLog || {\n\t\t\twarn: (...args) => log('warn', ...args)\n\t\t};\n\t\tlogFn.warn(`Could not enhance tag metadata: ${error.message}`);\n\t}\n\n\treturn enhanced;\n}\n\n/**\n * List all available tags with metadata\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} options - Options object\n * @param {boolean} [options.showTaskCounts=true] - Whether to show task counts\n * @param {boolean} [options.showMetadata=false] - Whether to show metadata\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with tags list\n */\nasync function tags(\n\ttasksPath,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { showTaskCounts = true, showMetadata = false } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\tlogFn.info('Listing available tags');\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Get current tag\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\n\t\t// Use raw tagged data if available, otherwise use the data directly\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Enhance existing tags with metadata if they don't have it\n\t\tawait enhanceTagsWithMetadata(tasksPath, rawData, context);\n\n\t\t// Extract all tags\n\t\tconst tagList = [];\n\t\tfor (const [tagName, tagData] of Object.entries(rawData)) {\n\t\t\t// Skip non-tag properties (like legacy 'tasks' array, 'tag', '_rawTaggedData')\n\t\t\tif (\n\t\t\t\ttagName === 'tasks' ||\n\t\t\t\ttagName === 'tag' ||\n\t\t\t\ttagName === '_rawTaggedData' ||\n\t\t\t\t!tagData ||\n\t\t\t\ttypeof tagData !== 'object' ||\n\t\t\t\t!Array.isArray(tagData.tasks)\n\t\t\t) {\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tconst tasks = tagData.tasks || [];\n\t\t\tconst metadata = tagData.metadata || {};\n\n\t\t\ttagList.push({\n\t\t\t\tname: tagName,\n\t\t\t\tisCurrent: tagName === currentTag,\n\t\t\t\tcompletedTasks: tasks.filter(\n\t\t\t\t\t(t) => t.status === 'done' || t.status === 'completed'\n\t\t\t\t).length,\n\t\t\t\ttasks: tasks || [],\n\t\t\t\tcreated: metadata.created || 'Unknown',\n\t\t\t\tdescription: metadata.description || 'No description'\n\t\t\t});\n\t\t}\n\n\t\t// Sort tags: current tag first, then alphabetically\n\t\ttagList.sort((a, b) => {\n\t\t\tif (a.isCurrent) return -1;\n\t\t\tif (b.isCurrent) return 1;\n\t\t\treturn a.name.localeCompare(b.name);\n\t\t});\n\n\t\tlogFn.success(`Found ${tagList.length} tags`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\ttags: tagList,\n\t\t\t\tcurrentTag,\n\t\t\t\ttotalTags: tagList.length\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display formatted table\n\t\tif (outputFormat === 'text') {\n\t\t\tif (tagList.length === 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(chalk.yellow('No tags found'), {\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t})\n\t\t\t\t);\n\t\t\t\treturn { tags: [], currentTag, totalTags: 0 };\n\t\t\t}\n\n\t\t\t// Create table headers based on options\n\t\t\tconst headers = [chalk.cyan.bold('Tag Name')];\n\t\t\tif (showTaskCounts) {\n\t\t\t\theaders.push(chalk.cyan.bold('Tasks'));\n\t\t\t\theaders.push(chalk.cyan.bold('Completed'));\n\t\t\t}\n\t\t\tif (showMetadata) {\n\t\t\t\theaders.push(chalk.cyan.bold('Created'));\n\t\t\t\theaders.push(chalk.cyan.bold('Description'));\n\t\t\t}\n\n\t\t\tconst table = new Table({\n\t\t\t\thead: headers,\n\t\t\t\tcolWidths: showMetadata ? [20, 10, 12, 15, 50] : [25, 10, 12]\n\t\t\t});\n\n\t\t\t// Add rows\n\t\t\ttagList.forEach((tag) => {\n\t\t\t\tconst row = [];\n\n\t\t\t\t// Tag name with current indicator\n\t\t\t\tconst tagDisplay = tag.isCurrent\n\t\t\t\t\t? `${chalk.green('●')} ${chalk.green.bold(tag.name)} ${chalk.gray('(current)')}`\n\t\t\t\t\t: ` ${tag.name}`;\n\t\t\t\trow.push(tagDisplay);\n\n\t\t\t\tif (showTaskCounts) {\n\t\t\t\t\trow.push(chalk.white(tag.tasks.length.toString()));\n\t\t\t\t\trow.push(chalk.green(tag.completedTasks.toString()));\n\t\t\t\t}\n\n\t\t\t\tif (showMetadata) {\n\t\t\t\t\tconst createdDate =\n\t\t\t\t\t\ttag.created !== 'Unknown'\n\t\t\t\t\t\t\t? new Date(tag.created).toLocaleDateString()\n\t\t\t\t\t\t\t: 'Unknown';\n\t\t\t\t\trow.push(chalk.gray(createdDate));\n\t\t\t\t\trow.push(chalk.gray(truncate(tag.description, 50)));\n\t\t\t\t}\n\n\t\t\t\ttable.push(row);\n\t\t\t});\n\n\t\t\t// console.log(\n\t\t\t// \tboxen(\n\t\t\t// \t\tchalk.white.bold('Available Tags') +\n\t\t\t// \t\t\t`\\n\\nCurrent Tag: ${chalk.green.bold(currentTag)}`,\n\t\t\t// \t\t{\n\t\t\t// \t\t\tpadding: { top: 0, bottom: 1, left: 1, right: 1 },\n\t\t\t// \t\t\tborderColor: 'blue',\n\t\t\t// \t\t\tborderStyle: 'round',\n\t\t\t// \t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t// \t\t}\n\t\t\t// \t)\n\t\t\t// );\n\n\t\t\tconsole.log(table.toString());\n\t\t}\n\n\t\treturn {\n\t\t\ttags: tagList,\n\t\t\tcurrentTag,\n\t\t\ttotalTags: tagList.length\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error listing tags: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Switch to a different tag context\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} tagName - Name of the tag to switch to\n * @param {Object} options - Options object\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with switch details\n */\nasync function useTag(\n\ttasksPath,\n\ttagName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate tag name\n\t\tif (!tagName || typeof tagName !== 'string') {\n\t\t\tthrow new Error('Tag name is required and must be a string');\n\t\t}\n\n\t\tlogFn.info(`Switching to tag: ${tagName}`);\n\n\t\t// Read current tasks data to verify tag exists\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data to check if tag exists\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Check if tag exists\n\t\tif (!rawData[tagName]) {\n\t\t\tthrow new Error(`Tag \"${tagName}\" does not exist`);\n\t\t}\n\n\t\t// Get current tag\n\t\tconst previousTag = getCurrentTag(projectRoot);\n\n\t\t// Switch to the new tag\n\t\tawait switchCurrentTag(projectRoot, tagName);\n\n\t\t// Get task count for the new tag - read tasks specifically for this tag\n\t\tconst tagData = readJSON(tasksPath, projectRoot, tagName);\n\t\tconst tasks = tagData ? tagData.tasks || [] : [];\n\t\tconst taskCount = tasks.length;\n\n\t\t// Find the next task to work on in this tag\n\t\tconst nextTask = findNextTask(tasks);\n\n\t\tlogFn.success(`Successfully switched to tag \"${tagName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\tpreviousTag,\n\t\t\t\tcurrentTag: tagName,\n\t\t\t\tswitched: true,\n\t\t\t\ttaskCount,\n\t\t\t\tnextTask\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tlet nextTaskInfo = '';\n\t\t\tif (nextTask) {\n\t\t\t\tnextTaskInfo = `\\nNext Task: ${chalk.cyan(`#${nextTask.id}`)} - ${chalk.white(nextTask.title)}`;\n\t\t\t} else {\n\t\t\t\tnextTaskInfo = `\\nNext Task: ${chalk.gray('No eligible tasks available')}`;\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Switched Successfully') +\n\t\t\t\t\t\t`\\n\\nPrevious Tag: ${chalk.cyan(previousTag)}` +\n\t\t\t\t\t\t`\\nCurrent Tag: ${chalk.green.bold(tagName)}` +\n\t\t\t\t\t\t`\\nAvailable Tasks: ${chalk.yellow(taskCount)}` +\n\t\t\t\t\t\tnextTaskInfo,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\tpreviousTag,\n\t\t\tcurrentTag: tagName,\n\t\t\tswitched: true,\n\t\t\ttaskCount,\n\t\t\tnextTask\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error switching tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Rename an existing tag\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} oldName - Current name of the tag\n * @param {string} newName - New name for the tag\n * @param {Object} options - Options object\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with rename details\n */\nasync function renameTag(\n\ttasksPath,\n\toldName,\n\tnewName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate parameters\n\t\tif (!oldName || typeof oldName !== 'string') {\n\t\t\tthrow new Error('Old tag name is required and must be a string');\n\t\t}\n\t\tif (!newName || typeof newName !== 'string') {\n\t\t\tthrow new Error('New tag name is required and must be a string');\n\t\t}\n\n\t\t// Validate new tag name format\n\t\tif (!/^[a-zA-Z0-9_-]+$/.test(newName)) {\n\t\t\tthrow new Error(\n\t\t\t\t'New tag name can only contain letters, numbers, hyphens, and underscores'\n\t\t\t);\n\t\t}\n\n\t\t// Prevent renaming master tag\n\t\tif (oldName === 'master') {\n\t\t\tthrow new Error('Cannot rename the \"master\" tag');\n\t\t}\n\n\t\t// Reserved tag names\n\t\tconst reservedNames = ['master', 'main', 'default'];\n\t\tif (reservedNames.includes(newName.toLowerCase())) {\n\t\t\tthrow new Error(`\"${newName}\" is a reserved tag name`);\n\t\t}\n\n\t\tlogFn.info(`Renaming tag from \"${oldName}\" to \"${newName}\"`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Check if old tag exists\n\t\tif (!rawData[oldName]) {\n\t\t\tthrow new Error(`Tag \"${oldName}\" does not exist`);\n\t\t}\n\n\t\t// Check if new tag name already exists\n\t\tif (rawData[newName]) {\n\t\t\tthrow new Error(`Tag \"${newName}\" already exists`);\n\t\t}\n\n\t\t// Get current tag to check if we're renaming the active tag\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\t\tconst isCurrentTag = currentTag === oldName;\n\n\t\t// Rename the tag by copying data and deleting old\n\t\trawData[newName] = { ...rawData[oldName] };\n\n\t\t// Update metadata if it exists\n\t\tif (rawData[newName].metadata) {\n\t\t\trawData[newName].metadata.renamed = {\n\t\t\t\tfrom: oldName,\n\t\t\t\tdate: new Date().toISOString()\n\t\t\t};\n\t\t}\n\n\t\tdelete rawData[oldName];\n\n\t\t// If we're renaming the current tag, update the current tag reference\n\t\tif (isCurrentTag) {\n\t\t\tawait switchCurrentTag(projectRoot, newName);\n\t\t\tlogFn.info(`Updated current tag reference to \"${newName}\"`);\n\t\t}\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\t// Get task count\n\t\tconst tasks = getTasksForTag(rawData, newName);\n\t\tconst taskCount = tasks.length;\n\n\t\tlogFn.success(`Successfully renamed tag from \"${oldName}\" to \"${newName}\"`);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\toldName,\n\t\t\t\tnewName,\n\t\t\t\trenamed: true,\n\t\t\t\ttaskCount,\n\t\t\t\twasCurrentTag: isCurrentTag,\n\t\t\t\tisCurrentTag: isCurrentTag\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Renamed Successfully') +\n\t\t\t\t\t\t`\\n\\nOld Name: ${chalk.cyan(oldName)}` +\n\t\t\t\t\t\t`\\nNew Name: ${chalk.green.bold(newName)}` +\n\t\t\t\t\t\t`\\nTasks: ${chalk.yellow(taskCount)}` +\n\t\t\t\t\t\t(isCurrentTag ? `\\n${chalk.green('✓ Current tag updated')}` : ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\toldName,\n\t\t\tnewName,\n\t\t\trenamed: true,\n\t\t\ttaskCount,\n\t\t\twasCurrentTag: isCurrentTag,\n\t\t\tisCurrentTag: isCurrentTag\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error renaming tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Copy an existing tag to create a new tag with the same tasks\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} sourceName - Name of the source tag to copy from\n * @param {string} targetName - Name of the new tag to create\n * @param {Object} options - Options object\n * @param {string} [options.description] - Optional description for the new tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with copy details\n */\nasync function copyTag(\n\ttasksPath,\n\tsourceName,\n\ttargetName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { description } = options;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate parameters\n\t\tif (!sourceName || typeof sourceName !== 'string') {\n\t\t\tthrow new Error('Source tag name is required and must be a string');\n\t\t}\n\t\tif (!targetName || typeof targetName !== 'string') {\n\t\t\tthrow new Error('Target tag name is required and must be a string');\n\t\t}\n\n\t\t// Validate target tag name format\n\t\tif (!/^[a-zA-Z0-9_-]+$/.test(targetName)) {\n\t\t\tthrow new Error(\n\t\t\t\t'Target tag name can only contain letters, numbers, hyphens, and underscores'\n\t\t\t);\n\t\t}\n\n\t\t// Reserved tag names\n\t\tconst reservedNames = ['master', 'main', 'default'];\n\t\tif (reservedNames.includes(targetName.toLowerCase())) {\n\t\t\tthrow new Error(`\"${targetName}\" is a reserved tag name`);\n\t\t}\n\n\t\tlogFn.info(`Copying tag from \"${sourceName}\" to \"${targetName}\"`);\n\n\t\t// Read current tasks data\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tif (!data) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use raw tagged data for tag operations\n\t\tconst rawData = data._rawTaggedData || data;\n\n\t\t// Check if source tag exists\n\t\tif (!rawData[sourceName]) {\n\t\t\tthrow new Error(`Source tag \"${sourceName}\" does not exist`);\n\t\t}\n\n\t\t// Check if target tag already exists\n\t\tif (rawData[targetName]) {\n\t\t\tthrow new Error(`Target tag \"${targetName}\" already exists`);\n\t\t}\n\n\t\t// Get source tasks\n\t\tconst sourceTasks = getTasksForTag(rawData, sourceName);\n\n\t\t// Create deep copy of the source tag data\n\t\trawData[targetName] = {\n\t\t\ttasks: JSON.parse(JSON.stringify(sourceTasks)), // Deep copy tasks\n\t\t\tmetadata: {\n\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription:\n\t\t\t\t\tdescription ||\n\t\t\t\t\t`Copy of \"${sourceName}\" created on ${new Date().toLocaleDateString()}`,\n\t\t\t\tcopiedFrom: {\n\t\t\t\t\ttag: sourceName,\n\t\t\t\t\tdate: new Date().toISOString()\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\n\t\t// Create clean data for writing (exclude _rawTaggedData to prevent corruption)\n\t\tconst cleanData = {};\n\t\tfor (const [key, value] of Object.entries(rawData)) {\n\t\t\tif (key !== '_rawTaggedData') {\n\t\t\t\tcleanData[key] = value;\n\t\t\t}\n\t\t}\n\n\t\t// Write the clean data back to file with proper context to avoid tag corruption\n\t\twriteJSON(tasksPath, cleanData, projectRoot);\n\n\t\tlogFn.success(\n\t\t\t`Successfully copied tag from \"${sourceName}\" to \"${targetName}\"`\n\t\t);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\tsourceName,\n\t\t\t\ttargetName,\n\t\t\t\tcopied: true,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription ||\n\t\t\t\t\t`Copy of \"${sourceName}\" created on ${new Date().toLocaleDateString()}`\n\t\t\t};\n\t\t}\n\n\t\t// For text output, display success message\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green.bold('✓ Tag Copied Successfully') +\n\t\t\t\t\t\t`\\n\\nSource Tag: ${chalk.cyan(sourceName)}` +\n\t\t\t\t\t\t`\\nTarget Tag: ${chalk.green.bold(targetName)}` +\n\t\t\t\t\t\t`\\nTasks Copied: ${chalk.yellow(sourceTasks.length)}` +\n\t\t\t\t\t\t(description ? `\\nDescription: ${chalk.gray(description)}` : ''),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\tsourceName,\n\t\t\ttargetName,\n\t\t\tcopied: true,\n\t\t\tdescription:\n\t\t\t\tdescription ||\n\t\t\t\t`Copy of \"${sourceName}\" created on ${new Date().toLocaleDateString()}`\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error copying tag: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Helper function to switch the current tag in state.json\n * @param {string} projectRoot - Project root directory\n * @param {string} tagName - Name of the tag to switch to\n * @returns {Promise<void>}\n */\nasync function switchCurrentTag(projectRoot, tagName) {\n\ttry {\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\t// Read current state or create default\n\t\tlet state = {};\n\t\tif (fs.existsSync(statePath)) {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tstate = JSON.parse(rawState);\n\t\t}\n\n\t\t// Update current tag and timestamp\n\t\tstate.currentTag = tagName;\n\t\tstate.lastSwitched = new Date().toISOString();\n\n\t\t// Ensure other required state properties exist\n\t\tif (!state.branchTagMapping) {\n\t\t\tstate.branchTagMapping = {};\n\t\t}\n\t\tif (state.migrationNoticeShown === undefined) {\n\t\t\tstate.migrationNoticeShown = false;\n\t\t}\n\n\t\t// Write updated state\n\t\tfs.writeFileSync(statePath, JSON.stringify(state, null, 2), 'utf8');\n\t} catch (error) {\n\t\tlog('warn', `Could not update current tag in state.json: ${error.message}`);\n\t\t// Don't throw - this is not critical for tag operations\n\t}\n}\n\n/**\n * Update branch-tag mapping in state.json\n * @param {string} projectRoot - Project root directory\n * @param {string} branchName - Git branch name\n * @param {string} tagName - Tag name to map to\n * @returns {Promise<void>}\n */\nasync function updateBranchTagMapping(projectRoot, branchName, tagName) {\n\ttry {\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\t// Read current state or create default\n\t\tlet state = {};\n\t\tif (fs.existsSync(statePath)) {\n\t\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\t\tstate = JSON.parse(rawState);\n\t\t}\n\n\t\t// Ensure branchTagMapping exists\n\t\tif (!state.branchTagMapping) {\n\t\t\tstate.branchTagMapping = {};\n\t\t}\n\n\t\t// Update the mapping\n\t\tstate.branchTagMapping[branchName] = tagName;\n\n\t\t// Write updated state\n\t\tfs.writeFileSync(statePath, JSON.stringify(state, null, 2), 'utf8');\n\t} catch (error) {\n\t\tlog('warn', `Could not update branch-tag mapping: ${error.message}`);\n\t\t// Don't throw - this is not critical for tag operations\n\t}\n}\n\n/**\n * Get tag name for a git branch from state.json mapping\n * @param {string} projectRoot - Project root directory\n * @param {string} branchName - Git branch name\n * @returns {Promise<string|null>} Mapped tag name or null if not found\n */\nasync function getTagForBranch(projectRoot, branchName) {\n\ttry {\n\t\tconst statePath = path.join(projectRoot, '.taskmaster', 'state.json');\n\n\t\tif (!fs.existsSync(statePath)) {\n\t\t\treturn null;\n\t\t}\n\n\t\tconst rawState = fs.readFileSync(statePath, 'utf8');\n\t\tconst state = JSON.parse(rawState);\n\n\t\treturn state.branchTagMapping?.[branchName] || null;\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Create a tag from a git branch name\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} branchName - Git branch name to create tag from\n * @param {Object} options - Options object\n * @param {boolean} [options.copyFromCurrent] - Copy tasks from current tag\n * @param {string} [options.copyFromTag] - Copy tasks from specific tag\n * @param {string} [options.description] - Custom description for the tag\n * @param {boolean} [options.autoSwitch] - Automatically switch to the new tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with creation details\n */\nasync function createTagFromBranch(\n\ttasksPath,\n\tbranchName,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { copyFromCurrent, copyFromTag, description, autoSwitch } = options;\n\n\t// Import git utilities\n\tconst { sanitizeBranchNameForTag, isValidBranchForTag } = await import(\n\t\t'../utils/git-utils.js'\n\t);\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Validate branch name\n\t\tif (!branchName || typeof branchName !== 'string') {\n\t\t\tthrow new Error('Branch name is required and must be a string');\n\t\t}\n\n\t\t// Check if branch name is valid for tag creation\n\t\tif (!isValidBranchForTag(branchName)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Branch \"${branchName}\" cannot be converted to a valid tag name`\n\t\t\t);\n\t\t}\n\n\t\t// Sanitize branch name to create tag name\n\t\tconst tagName = sanitizeBranchNameForTag(branchName);\n\n\t\tlogFn.info(`Creating tag \"${tagName}\" from git branch \"${branchName}\"`);\n\n\t\t// Create the tag using existing createTag function\n\t\tconst createResult = await createTag(\n\t\t\ttasksPath,\n\t\t\ttagName,\n\t\t\t{\n\t\t\t\tcopyFromCurrent,\n\t\t\t\tcopyFromTag,\n\t\t\t\tdescription:\n\t\t\t\t\tdescription || `Tag created from git branch \"${branchName}\"`\n\t\t\t},\n\t\t\tcontext,\n\t\t\toutputFormat\n\t\t);\n\n\t\t// Update branch-tag mapping\n\t\tawait updateBranchTagMapping(projectRoot, branchName, tagName);\n\t\tlogFn.info(`Updated branch-tag mapping: ${branchName} -> ${tagName}`);\n\n\t\t// Auto-switch to the new tag if requested\n\t\tif (autoSwitch) {\n\t\t\tawait switchCurrentTag(projectRoot, tagName);\n\t\t\tlogFn.info(`Automatically switched to tag \"${tagName}\"`);\n\t\t}\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\treturn {\n\t\t\t\t...createResult,\n\t\t\t\tbranchName,\n\t\t\t\ttagName,\n\t\t\t\tmappingUpdated: true,\n\t\t\t\tautoSwitched: autoSwitch || false\n\t\t\t};\n\t\t}\n\n\t\t// For text output, the createTag function already handles display\n\t\treturn {\n\t\t\tbranchName,\n\t\t\ttagName,\n\t\t\tcreated: true,\n\t\t\tmappingUpdated: true,\n\t\t\tautoSwitched: autoSwitch || false\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Error creating tag from branch: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Automatically switch tag based on current git branch\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} options - Options object\n * @param {boolean} [options.createIfMissing] - Create tag if it doesn't exist\n * @param {boolean} [options.copyFromCurrent] - Copy tasks when creating new tag\n * @param {Object} context - Context object containing session and projectRoot\n * @param {string} [context.projectRoot] - Project root path\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @returns {Promise<Object>} Result object with switch details\n */\nasync function autoSwitchTagForBranch(\n\ttasksPath,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text'\n) {\n\tconst { mcpLog, projectRoot } = context;\n\tconst { createIfMissing, copyFromCurrent } = options;\n\n\t// Import git utilities\n\tconst {\n\t\tgetCurrentBranch,\n\t\tisGitRepository,\n\t\tsanitizeBranchNameForTag,\n\t\tisValidBranchForTag\n\t} = await import('../utils/git-utils.js');\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = mcpLog || {\n\t\tinfo: (...args) => log('info', ...args),\n\t\twarn: (...args) => log('warn', ...args),\n\t\terror: (...args) => log('error', ...args),\n\t\tdebug: (...args) => log('debug', ...args),\n\t\tsuccess: (...args) => log('success', ...args)\n\t};\n\n\ttry {\n\t\t// Check if we're in a git repository\n\t\tif (!(await isGitRepository(projectRoot))) {\n\t\t\tlogFn.warn('Not in a git repository, cannot auto-switch tags');\n\t\t\treturn { switched: false, reason: 'not_git_repo' };\n\t\t}\n\n\t\t// Get current git branch\n\t\tconst currentBranch = await getCurrentBranch(projectRoot);\n\t\tif (!currentBranch) {\n\t\t\tlogFn.warn('Could not determine current git branch');\n\t\t\treturn { switched: false, reason: 'no_current_branch' };\n\t\t}\n\n\t\tlogFn.info(`Current git branch: ${currentBranch}`);\n\n\t\t// Check if branch is valid for tag creation\n\t\tif (!isValidBranchForTag(currentBranch)) {\n\t\t\tlogFn.info(`Branch \"${currentBranch}\" is not suitable for tag creation`);\n\t\t\treturn {\n\t\t\t\tswitched: false,\n\t\t\t\treason: 'invalid_branch_for_tag',\n\t\t\t\tbranchName: currentBranch\n\t\t\t};\n\t\t}\n\n\t\t// Check if there's already a mapping for this branch\n\t\tlet tagName = await getTagForBranch(projectRoot, currentBranch);\n\n\t\tif (!tagName) {\n\t\t\t// No mapping exists, create tag name from branch\n\t\t\ttagName = sanitizeBranchNameForTag(currentBranch);\n\t\t}\n\n\t\t// Check if tag exists\n\t\tconst data = readJSON(tasksPath, projectRoot);\n\t\tconst rawData = data._rawTaggedData || data;\n\t\tconst tagExists = rawData[tagName];\n\n\t\tif (!tagExists && createIfMissing) {\n\t\t\t// Create the tag from branch\n\t\t\tlogFn.info(`Creating new tag \"${tagName}\" for branch \"${currentBranch}\"`);\n\n\t\t\tconst createResult = await createTagFromBranch(\n\t\t\t\ttasksPath,\n\t\t\t\tcurrentBranch,\n\t\t\t\t{\n\t\t\t\t\tcopyFromCurrent,\n\t\t\t\t\tautoSwitch: true\n\t\t\t\t},\n\t\t\t\tcontext,\n\t\t\t\toutputFormat\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\tswitched: true,\n\t\t\t\tcreated: true,\n\t\t\t\tbranchName: currentBranch,\n\t\t\t\ttagName,\n\t\t\t\t...createResult\n\t\t\t};\n\t\t} else if (tagExists) {\n\t\t\t// Tag exists, switch to it\n\t\t\tlogFn.info(\n\t\t\t\t`Switching to existing tag \"${tagName}\" for branch \"${currentBranch}\"`\n\t\t\t);\n\n\t\t\tconst switchResult = await useTag(\n\t\t\t\ttasksPath,\n\t\t\t\ttagName,\n\t\t\t\t{},\n\t\t\t\tcontext,\n\t\t\t\toutputFormat\n\t\t\t);\n\n\t\t\t// Update mapping if it didn't exist\n\t\t\tif (!(await getTagForBranch(projectRoot, currentBranch))) {\n\t\t\t\tawait updateBranchTagMapping(projectRoot, currentBranch, tagName);\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\tswitched: true,\n\t\t\t\tcreated: false,\n\t\t\t\tbranchName: currentBranch,\n\t\t\t\ttagName,\n\t\t\t\t...switchResult\n\t\t\t};\n\t\t} else {\n\t\t\t// Tag doesn't exist and createIfMissing is false\n\t\t\tlogFn.warn(\n\t\t\t\t`Tag \"${tagName}\" for branch \"${currentBranch}\" does not exist`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tswitched: false,\n\t\t\t\treason: 'tag_not_found',\n\t\t\t\tbranchName: currentBranch,\n\t\t\t\ttagName\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlogFn.error(`Error in auto-switch tag for branch: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Check git workflow configuration and perform auto-switch if enabled\n * @param {string} projectRoot - Project root directory\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Object} context - Context object\n * @returns {Promise<Object|null>} Switch result or null if not enabled\n */\nasync function checkAndAutoSwitchTag(projectRoot, tasksPath, context = {}) {\n\ttry {\n\t\t// Read configuration\n\t\tconst configPath = path.join(projectRoot, '.taskmaster', 'config.json');\n\t\tif (!fs.existsSync(configPath)) {\n\t\t\treturn null;\n\t\t}\n\n\t\tconst rawConfig = fs.readFileSync(configPath, 'utf8');\n\t\tconst config = JSON.parse(rawConfig);\n\n\t\t// Git workflow has been removed - return null to disable auto-switching\n\t\treturn null;\n\n\t\t// Perform auto-switch\n\t\treturn await autoSwitchTagForBranch(\n\t\t\ttasksPath,\n\t\t\t{ createIfMissing: true, copyFromCurrent: false },\n\t\t\tcontext,\n\t\t\t'json'\n\t\t);\n\t} catch (error) {\n\t\t// Silently fail - this is not critical\n\t\treturn null;\n\t}\n}\n\n// Export all tag management functions\nexport {\n\tcreateTag,\n\tdeleteTag,\n\ttags,\n\tuseTag,\n\trenameTag,\n\tcopyTag,\n\tswitchCurrentTag,\n\tupdateBranchTagMapping,\n\tgetTagForBranch,\n\tcreateTagFromBranch,\n\tautoSwitchTagForBranch,\n\tcheckAndAutoSwitchTag\n};\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/update-tasks.js", "/**\n * update-tasks.js\n * Direct function implementation for updating tasks based on new context\n */\n\nimport path from 'path';\nimport { updateTasks } from '../../../../scripts/modules/task-manager.js';\nimport { createLogWrapper } from '../../tools/utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for updating tasks based on new context.\n *\n * @param {Object} args - Command arguments containing projectRoot, from, prompt, research options.\n * @param {string} args.from - The ID of the task to update.\n * @param {string} args.prompt - The prompt to update the task with.\n * @param {boolean} args.research - Whether to use research mode.\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function updateTasksDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst { from, prompt, research, tasksJsonPath, projectRoot, tag } = args;\n\n\t// Create the standard logger wrapper\n\tconst logWrapper = createLogWrapper(log);\n\n\t// --- Input Validation ---\n\tif (!projectRoot) {\n\t\tlogWrapper.error('updateTasksDirect requires a projectRoot argument.');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'projectRoot is required.'\n\t\t\t}\n\t\t};\n\t}\n\n\tif (!from) {\n\t\tlogWrapper.error('updateTasksDirect called without from ID');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'Starting task ID (from) is required'\n\t\t\t}\n\t\t};\n\t}\n\n\tif (!prompt) {\n\t\tlogWrapper.error('updateTasksDirect called without prompt');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'Update prompt is required'\n\t\t\t}\n\t\t};\n\t}\n\n\tlogWrapper.info(\n\t\t`Updating tasks via direct function. From: ${from}, Research: ${research}, File: ${tasksJsonPath}, ProjectRoot: ${projectRoot}`\n\t);\n\n\tenableSilentMode(); // Enable silent mode\n\ttry {\n\t\t// Call the core updateTasks function\n\t\tconst result = await updateTasks(\n\t\t\ttasksJsonPath,\n\t\t\tfrom,\n\t\t\tprompt,\n\t\t\tresearch,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog: logWrapper,\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t},\n\t\t\t'json'\n\t\t);\n\n\t\tif (result && result.success && Array.isArray(result.updatedTasks)) {\n\t\t\tlogWrapper.success(\n\t\t\t\t`Successfully updated ${result.updatedTasks.length} tasks.`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Successfully updated ${result.updatedTasks.length} tasks.`,\n\t\t\t\t\ttasksPath: tasksJsonPath,\n\t\t\t\t\tupdatedCount: result.updatedTasks.length,\n\t\t\t\t\ttelemetryData: result.telemetryData,\n\t\t\t\t\ttagInfo: result.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Handle case where core function didn't return expected success structure\n\t\t\tlogWrapper.error(\n\t\t\t\t'Core updateTasks function did not return a successful structure.'\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\tresult?.message ||\n\t\t\t\t\t\t'Core function failed to update tasks or returned unexpected result.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(`Error executing core updateTasks: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UPDATE_TASKS_CORE_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error updating tasks'\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode(); // Ensure silent mode is disabled\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/update-task-by-id.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\nimport { z } from 'zod'; // Keep Zod for post-parse validation\n\nimport {\n\tlog as consoleLog,\n\treadJSON,\n\twriteJSON,\n\ttruncate,\n\tisSilentMode,\n\tflattenTasksWithSubtasks,\n\tfindProjectRoot\n} from '../utils.js';\n\nimport {\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getDebugFlag, isApiKeySet } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\n\n// Zod schema for post-parsing validation of the updated task object\nconst updatedTaskSchema = z\n\t.object({\n\t\tid: z.number().int(),\n\t\ttitle: z.string(), // Title should be preserved, but check it exists\n\t\tdescription: z.string(),\n\t\tstatus: z.string(),\n\t\tdependencies: z.array(z.union([z.number().int(), z.string()])),\n\t\tpriority: z.string().nullable().default('medium'),\n\t\tdetails: z.string().nullable().default(''),\n\t\ttestStrategy: z.string().nullable().default(''),\n\t\tsubtasks: z\n\t\t\t.array(\n\t\t\t\tz.object({\n\t\t\t\t\tid: z\n\t\t\t\t\t\t.number()\n\t\t\t\t\t\t.int()\n\t\t\t\t\t\t.positive()\n\t\t\t\t\t\t.describe('Sequential subtask ID starting from 1'),\n\t\t\t\t\ttitle: z.string(),\n\t\t\t\t\tdescription: z.string(),\n\t\t\t\t\tstatus: z.string(),\n\t\t\t\t\tdependencies: z.array(z.number().int()).nullable().default([]),\n\t\t\t\t\tdetails: z.string().nullable().default(''),\n\t\t\t\t\ttestStrategy: z.string().nullable().default('')\n\t\t\t\t})\n\t\t\t)\n\t\t\t.nullable()\n\t\t\t.default([])\n\t})\n\t.strip(); // Allows parsing even if AI adds extra fields, but validation focuses on schema\n\n/**\n * Parses a single updated task object from AI's text response.\n * @param {string} text - Response text from AI.\n * @param {number} expectedTaskId - The ID of the task expected.\n * @param {Function | Object} logFn - Logging function or MCP logger.\n * @param {boolean} isMCP - Flag indicating MCP context.\n * @returns {Object} Parsed and validated task object.\n * @throws {Error} If parsing or validation fails.\n */\nfunction parseUpdatedTaskFromText(text, expectedTaskId, logFn, isMCP) {\n\t// Report helper consistent with the established pattern\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\tlogFn(level, ...args);\n\t\t}\n\t};\n\n\treport(\n\t\t'info',\n\t\t'Attempting to parse updated task object from text response...'\n\t);\n\tif (!text || text.trim() === '')\n\t\tthrow new Error('AI response text is empty.');\n\n\tlet cleanedResponse = text.trim();\n\tconst originalResponseForDebug = cleanedResponse;\n\tlet parseMethodUsed = 'raw'; // Keep track of which method worked\n\n\t// --- NEW Step 1: Try extracting between {} first ---\n\tconst firstBraceIndex = cleanedResponse.indexOf('{');\n\tconst lastBraceIndex = cleanedResponse.lastIndexOf('}');\n\tlet potentialJsonFromBraces = null;\n\n\tif (firstBraceIndex !== -1 && lastBraceIndex > firstBraceIndex) {\n\t\tpotentialJsonFromBraces = cleanedResponse.substring(\n\t\t\tfirstBraceIndex,\n\t\t\tlastBraceIndex + 1\n\t\t);\n\t\tif (potentialJsonFromBraces.length <= 2) {\n\t\t\tpotentialJsonFromBraces = null; // Ignore empty braces {}\n\t\t}\n\t}\n\n\t// If {} extraction yielded something, try parsing it immediately\n\tif (potentialJsonFromBraces) {\n\t\ttry {\n\t\t\tconst testParse = JSON.parse(potentialJsonFromBraces);\n\t\t\t// It worked! Use this as the primary cleaned response.\n\t\t\tcleanedResponse = potentialJsonFromBraces;\n\t\t\tparseMethodUsed = 'braces';\n\t\t} catch (e) {\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t'Content between {} looked promising but failed initial parse. Proceeding to other methods.'\n\t\t\t);\n\t\t\t// Reset cleanedResponse to original if brace parsing failed\n\t\t\tcleanedResponse = originalResponseForDebug;\n\t\t}\n\t}\n\n\t// --- Step 2: If brace parsing didn't work or wasn't applicable, try code block extraction ---\n\tif (parseMethodUsed === 'raw') {\n\t\tconst codeBlockMatch = cleanedResponse.match(\n\t\t\t/```(?:json|javascript)?\\s*([\\s\\S]*?)\\s*```/i\n\t\t);\n\t\tif (codeBlockMatch) {\n\t\t\tcleanedResponse = codeBlockMatch[1].trim();\n\t\t\tparseMethodUsed = 'codeblock';\n\t\t\treport('info', 'Extracted JSON content from Markdown code block.');\n\t\t} else {\n\t\t\t// --- Step 3: If code block failed, try stripping prefixes ---\n\t\t\tconst commonPrefixes = [\n\t\t\t\t'json\\n',\n\t\t\t\t'javascript\\n'\n\t\t\t\t// ... other prefixes ...\n\t\t\t];\n\t\t\tlet prefixFound = false;\n\t\t\tfor (const prefix of commonPrefixes) {\n\t\t\t\tif (cleanedResponse.toLowerCase().startsWith(prefix)) {\n\t\t\t\t\tcleanedResponse = cleanedResponse.substring(prefix.length).trim();\n\t\t\t\t\tparseMethodUsed = 'prefix';\n\t\t\t\t\treport('info', `Stripped prefix: \"${prefix.trim()}\"`);\n\t\t\t\t\tprefixFound = true;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (!prefixFound) {\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'Response does not appear to contain {}, code block, or known prefix. Attempting raw parse.'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t// --- Step 4: Attempt final parse ---\n\tlet parsedTask;\n\ttry {\n\t\tparsedTask = JSON.parse(cleanedResponse);\n\t} catch (parseError) {\n\t\treport('error', `Failed to parse JSON object: ${parseError.message}`);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`\n\t\t);\n\t\tthrow new Error(\n\t\t\t`Failed to parse JSON response object: ${parseError.message}`\n\t\t);\n\t}\n\n\tif (!parsedTask || typeof parsedTask !== 'object') {\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content is not an object. Type: ${typeof parsedTask}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content sample: ${JSON.stringify(parsedTask).substring(0, 200)}`\n\t\t);\n\t\tthrow new Error('Parsed AI response is not a valid JSON object.');\n\t}\n\n\t// Preprocess the task to ensure subtasks have proper structure\n\tconst preprocessedTask = {\n\t\t...parsedTask,\n\t\tstatus: parsedTask.status || 'pending',\n\t\tdependencies: Array.isArray(parsedTask.dependencies)\n\t\t\t? parsedTask.dependencies\n\t\t\t: [],\n\t\tdetails:\n\t\t\ttypeof parsedTask.details === 'string'\n\t\t\t\t? parsedTask.details\n\t\t\t\t: String(parsedTask.details || ''),\n\t\ttestStrategy:\n\t\t\ttypeof parsedTask.testStrategy === 'string'\n\t\t\t\t? parsedTask.testStrategy\n\t\t\t\t: String(parsedTask.testStrategy || ''),\n\t\t// Ensure subtasks is an array and each subtask has required fields\n\t\tsubtasks: Array.isArray(parsedTask.subtasks)\n\t\t\t? parsedTask.subtasks.map((subtask) => ({\n\t\t\t\t\t...subtask,\n\t\t\t\t\ttitle: subtask.title || '',\n\t\t\t\t\tdescription: subtask.description || '',\n\t\t\t\t\tstatus: subtask.status || 'pending',\n\t\t\t\t\tdependencies: Array.isArray(subtask.dependencies)\n\t\t\t\t\t\t? subtask.dependencies\n\t\t\t\t\t\t: [],\n\t\t\t\t\tdetails:\n\t\t\t\t\t\ttypeof subtask.details === 'string'\n\t\t\t\t\t\t\t? subtask.details\n\t\t\t\t\t\t\t: String(subtask.details || ''),\n\t\t\t\t\ttestStrategy:\n\t\t\t\t\t\ttypeof subtask.testStrategy === 'string'\n\t\t\t\t\t\t\t? subtask.testStrategy\n\t\t\t\t\t\t\t: String(subtask.testStrategy || '')\n\t\t\t\t}))\n\t\t\t: []\n\t};\n\n\t// Validate the parsed task object using Zod\n\tconst validationResult = updatedTaskSchema.safeParse(preprocessedTask);\n\tif (!validationResult.success) {\n\t\treport('error', 'Parsed task object failed Zod validation.');\n\t\tvalidationResult.error.errors.forEach((err) => {\n\t\t\treport('error', ` - Field '${err.path.join('.')}': ${err.message}`);\n\t\t});\n\t\tthrow new Error(\n\t\t\t`AI response failed task structure validation: ${validationResult.error.message}`\n\t\t);\n\t}\n\n\t// Final check: ensure ID matches expected ID (AI might hallucinate)\n\tif (validationResult.data.id !== expectedTaskId) {\n\t\treport(\n\t\t\t'warn',\n\t\t\t`AI returned task with ID ${validationResult.data.id}, but expected ${expectedTaskId}. Overwriting ID.`\n\t\t);\n\t\tvalidationResult.data.id = expectedTaskId; // Enforce correct ID\n\t}\n\n\treport('info', 'Successfully validated updated task structure.');\n\treturn validationResult.data; // Return the validated task data\n}\n\n/**\n * Update a task by ID with new information using the unified AI service.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} taskId - ID of the task to update\n * @param {string} prompt - Prompt for generating updated task information\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP server.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path.\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json').\n * @param {boolean} [appendMode=false] - If true, append to details instead of full update.\n * @returns {Promise<Object|null>} - The updated task or null if update failed.\n */\nasync function updateTaskById(\n\ttasksPath,\n\ttaskId,\n\tprompt,\n\tuseResearch = false,\n\tcontext = {},\n\toutputFormat = 'text',\n\tappendMode = false\n) {\n\tconst { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;\n\tconst logFn = mcpLog || consoleLog;\n\tconst isMCP = !!mcpLog;\n\n\t// Use report helper for logging\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\tlogFn(level, ...args);\n\t\t}\n\t};\n\n\ttry {\n\t\treport('info', `Updating single task ${taskId} with prompt: \"${prompt}\"`);\n\n\t\t// --- Input Validations (Keep existing) ---\n\t\tif (!Number.isInteger(taskId) || taskId <= 0)\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid task ID: ${taskId}. Task ID must be a positive integer.`\n\t\t\t);\n\t\tif (!prompt || typeof prompt !== 'string' || prompt.trim() === '')\n\t\t\tthrow new Error('Prompt cannot be empty.');\n\t\tif (useResearch && !isApiKeySet('perplexity', session)) {\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t'Perplexity research requested but API key not set. Falling back.'\n\t\t\t);\n\t\t\tif (outputFormat === 'text')\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow('Perplexity AI not available. Falling back to main AI.')\n\t\t\t\t);\n\t\t\tuseResearch = false;\n\t\t}\n\t\tif (!fs.existsSync(tasksPath))\n\t\t\tthrow new Error(`Tasks file not found: ${tasksPath}`);\n\t\t// --- End Input Validations ---\n\n\t\t// Determine project root\n\t\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\t\tif (!projectRoot) {\n\t\t\tthrow new Error('Could not determine project root directory');\n\t\t}\n\n\t\t// --- Task Loading and Status Check (Keep existing) ---\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks)\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}.`);\n\t\tconst taskIndex = data.tasks.findIndex((task) => task.id === taskId);\n\t\tif (taskIndex === -1) throw new Error(`Task with ID ${taskId} not found.`);\n\t\tconst taskToUpdate = data.tasks[taskIndex];\n\t\tif (taskToUpdate.status === 'done' || taskToUpdate.status === 'completed') {\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t`Task ${taskId} is already marked as done and cannot be updated`\n\t\t\t);\n\n\t\t\t// Only show warning box for text output (CLI)\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Task ${taskId} is already marked as ${taskToUpdate.status} and cannot be updated.`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t\t'Completed tasks are locked to maintain consistency. To modify a completed task, you must first:'\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t\t'1. Change its status to \"pending\" or \"in-progress\"'\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\tchalk.white('2. Then run the update-task command'),\n\t\t\t\t\t\t{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn null;\n\t\t}\n\t\t// --- End Task Loading ---\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update-task');\n\t\t\tconst searchQuery = `${taskToUpdate.title} ${taskToUpdate.description} ${prompt}`;\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([taskId.toString(), ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\treport('warn', `Could not gather context: ${contextError.message}`);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\t// --- Display Task Info (CLI Only - Keep existing) ---\n\t\tif (outputFormat === 'text') {\n\t\t\t// Show the task that will be updated\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Status')\n\t\t\t\t],\n\t\t\t\tcolWidths: [5, 60, 10]\n\t\t\t});\n\n\t\t\ttable.push([\n\t\t\t\ttaskToUpdate.id,\n\t\t\t\ttruncate(taskToUpdate.title, 57),\n\t\t\t\tgetStatusWithColor(taskToUpdate.status)\n\t\t\t]);\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating Task #${taskId}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t})\n\t\t\t);\n\n\t\t\tconsole.log(table.toString());\n\n\t\t\t// Display a message about how completed subtasks are handled\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.cyan.bold('How Completed Subtasks Are Handled:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• Subtasks marked as \"done\" or \"completed\" will be preserved\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• New subtasks will build upon what has already been completed\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• If completed work needs revision, a new subtask will be created instead of modifying done items\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• This approach maintains a clear record of completed work and new requirements'\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\t// --- Build Prompts using PromptManager ---\n\t\tconst promptManager = getPromptManager();\n\n\t\tconst promptParams = {\n\t\t\ttask: taskToUpdate,\n\t\t\ttaskJson: JSON.stringify(taskToUpdate, null, 2),\n\t\t\tupdatePrompt: prompt,\n\t\t\tappendMode: appendMode,\n\t\t\tuseResearch: useResearch,\n\t\t\tcurrentDetails: taskToUpdate.details || '(No existing details)',\n\t\t\tgatheredContext: gatheredContext || ''\n\t\t};\n\n\t\tconst variantKey = appendMode\n\t\t\t? 'append'\n\t\t\t: useResearch\n\t\t\t\t? 'research'\n\t\t\t\t: 'default';\n\n\t\treport(\n\t\t\t'info',\n\t\t\t`Loading prompt template with variant: ${variantKey}, appendMode: ${appendMode}, useResearch: ${useResearch}`\n\t\t);\n\n\t\tlet systemPrompt;\n\t\tlet userPrompt;\n\t\ttry {\n\t\t\tconst promptResult = await promptManager.loadPrompt(\n\t\t\t\t'update-task',\n\t\t\t\tpromptParams,\n\t\t\t\tvariantKey\n\t\t\t);\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t`Prompt result type: ${typeof promptResult}, keys: ${promptResult ? Object.keys(promptResult).join(', ') : 'null'}`\n\t\t\t);\n\n\t\t\t// Extract prompts - loadPrompt returns { systemPrompt, userPrompt, metadata }\n\t\t\tsystemPrompt = promptResult.systemPrompt;\n\t\t\tuserPrompt = promptResult.userPrompt;\n\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t`Loaded prompts - systemPrompt length: ${systemPrompt?.length}, userPrompt length: ${userPrompt?.length}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\treport('error', `Failed to load prompt template: ${error.message}`);\n\t\t\tthrow new Error(`Failed to load prompt template: ${error.message}`);\n\t\t}\n\n\t\t// If prompts are still not set, throw an error\n\t\tif (!systemPrompt || !userPrompt) {\n\t\t\tthrow new Error(\n\t\t\t\t`Failed to load prompts: systemPrompt=${!!systemPrompt}, userPrompt=${!!userPrompt}`\n\t\t\t);\n\t\t}\n\t\t// --- End Build Prompts ---\n\n\t\tlet loadingIndicator = null;\n\t\tlet aiServiceResponse = null;\n\n\t\tif (!isMCP && outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\tuseResearch ? 'Updating task with research...\\n' : 'Updating task...\\n'\n\t\t\t);\n\t\t}\n\n\t\ttry {\n\t\t\tconst serviceRole = useResearch ? 'research' : 'main';\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\trole: serviceRole,\n\t\t\t\tsession: session,\n\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tcommandName: 'update-task',\n\t\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (loadingIndicator)\n\t\t\t\tstopLoadingIndicator(loadingIndicator, 'AI update complete.');\n\n\t\t\tif (appendMode) {\n\t\t\t\t// Append mode: handle as plain text\n\t\t\t\tconst generatedContentString = aiServiceResponse.mainResult;\n\t\t\t\tlet newlyAddedSnippet = '';\n\n\t\t\t\tif (generatedContentString && generatedContentString.trim()) {\n\t\t\t\t\tconst timestamp = new Date().toISOString();\n\t\t\t\t\tconst formattedBlock = `<info added on ${timestamp}>\\n${generatedContentString.trim()}\\n</info added on ${timestamp}>`;\n\t\t\t\t\tnewlyAddedSnippet = formattedBlock;\n\n\t\t\t\t\t// Append to task details\n\t\t\t\t\ttaskToUpdate.details =\n\t\t\t\t\t\t(taskToUpdate.details ? taskToUpdate.details + '\\n' : '') +\n\t\t\t\t\t\tformattedBlock;\n\t\t\t\t} else {\n\t\t\t\t\treport(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t'AI response was empty or whitespace after trimming. Original details remain unchanged.'\n\t\t\t\t\t);\n\t\t\t\t\tnewlyAddedSnippet = 'No new details were added by the AI.';\n\t\t\t\t}\n\n\t\t\t\t// Update description with timestamp if prompt is short\n\t\t\t\tif (prompt.length < 100) {\n\t\t\t\t\tif (taskToUpdate.description) {\n\t\t\t\t\t\ttaskToUpdate.description += ` [Updated: ${new Date().toLocaleDateString()}]`;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Write the updated task back to file\n\t\t\t\tdata.tasks[taskIndex] = taskToUpdate;\n\t\t\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t\t\treport('success', `Successfully appended to task ${taskId}`);\n\n\t\t\t\t// Display success message for CLI\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.green(`Successfully appended to task #${taskId}`) +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white.bold('Title:') +\n\t\t\t\t\t\t\t\t' ' +\n\t\t\t\t\t\t\t\ttaskToUpdate.title +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white.bold('Newly Added Content:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.white(newlyAddedSnippet),\n\t\t\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Display AI usage telemetry for CLI users\n\t\t\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t\t}\n\n\t\t\t\t// Return the updated task\n\t\t\t\treturn {\n\t\t\t\t\tupdatedTask: taskToUpdate,\n\t\t\t\t\ttelemetryData: aiServiceResponse.telemetryData,\n\t\t\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Full update mode: Use mainResult (text) for parsing\n\t\t\tconst updatedTask = parseUpdatedTaskFromText(\n\t\t\t\taiServiceResponse.mainResult,\n\t\t\t\ttaskId,\n\t\t\t\tlogFn,\n\t\t\t\tisMCP\n\t\t\t);\n\n\t\t\t// --- Task Validation/Correction (Keep existing logic) ---\n\t\t\tif (!updatedTask || typeof updatedTask !== 'object')\n\t\t\t\tthrow new Error('Received invalid task object from AI.');\n\t\t\tif (!updatedTask.title || !updatedTask.description)\n\t\t\t\tthrow new Error('Updated task missing required fields.');\n\t\t\t// Preserve ID if AI changed it\n\t\t\tif (updatedTask.id !== taskId) {\n\t\t\t\treport('warn', `AI changed task ID. Restoring original ID ${taskId}.`);\n\t\t\t\tupdatedTask.id = taskId;\n\t\t\t}\n\t\t\t// Preserve status if AI changed it\n\t\t\tif (\n\t\t\t\tupdatedTask.status !== taskToUpdate.status &&\n\t\t\t\t!prompt.toLowerCase().includes('status')\n\t\t\t) {\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`AI changed task status. Restoring original status '${taskToUpdate.status}'.`\n\t\t\t\t);\n\t\t\t\tupdatedTask.status = taskToUpdate.status;\n\t\t\t}\n\t\t\t// Fix subtask IDs if they exist (ensure they are numeric and sequential)\n\t\t\tif (updatedTask.subtasks && Array.isArray(updatedTask.subtasks)) {\n\t\t\t\tlet currentSubtaskId = 1;\n\t\t\t\tupdatedTask.subtasks = updatedTask.subtasks.map((subtask) => {\n\t\t\t\t\t// Fix AI-generated subtask IDs that might be strings or use parent ID as prefix\n\t\t\t\t\tconst correctedSubtask = {\n\t\t\t\t\t\t...subtask,\n\t\t\t\t\t\tid: currentSubtaskId, // Override AI-generated ID with correct sequential ID\n\t\t\t\t\t\tdependencies: Array.isArray(subtask.dependencies)\n\t\t\t\t\t\t\t? subtask.dependencies\n\t\t\t\t\t\t\t\t\t.map((dep) =>\n\t\t\t\t\t\t\t\t\t\ttypeof dep === 'string' ? parseInt(dep, 10) : dep\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t.filter(\n\t\t\t\t\t\t\t\t\t\t(depId) =>\n\t\t\t\t\t\t\t\t\t\t\t!Number.isNaN(depId) &&\n\t\t\t\t\t\t\t\t\t\t\tdepId >= 1 &&\n\t\t\t\t\t\t\t\t\t\t\tdepId < currentSubtaskId\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t: [],\n\t\t\t\t\t\tstatus: subtask.status || 'pending'\n\t\t\t\t\t};\n\t\t\t\t\tcurrentSubtaskId++;\n\t\t\t\t\treturn correctedSubtask;\n\t\t\t\t});\n\t\t\t\treport(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Fixed ${updatedTask.subtasks.length} subtask IDs to be sequential numeric IDs.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Preserve completed subtasks (Keep existing logic)\n\t\t\tif (taskToUpdate.subtasks?.length > 0) {\n\t\t\t\tif (!updatedTask.subtasks) {\n\t\t\t\t\treport(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t'Subtasks removed by AI. Restoring original subtasks.'\n\t\t\t\t\t);\n\t\t\t\t\tupdatedTask.subtasks = taskToUpdate.subtasks;\n\t\t\t\t} else {\n\t\t\t\t\tconst completedOriginal = taskToUpdate.subtasks.filter(\n\t\t\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t\t\t);\n\t\t\t\t\tcompletedOriginal.forEach((compSub) => {\n\t\t\t\t\t\tconst updatedSub = updatedTask.subtasks.find(\n\t\t\t\t\t\t\t(st) => st.id === compSub.id\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t!updatedSub ||\n\t\t\t\t\t\t\tJSON.stringify(updatedSub) !== JSON.stringify(compSub)\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\treport(\n\t\t\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t\t\t`Completed subtask ${compSub.id} was modified or removed. Restoring.`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Remove potentially modified version\n\t\t\t\t\t\t\tupdatedTask.subtasks = updatedTask.subtasks.filter(\n\t\t\t\t\t\t\t\t(st) => st.id !== compSub.id\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Add back original\n\t\t\t\t\t\t\tupdatedTask.subtasks.push(compSub);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\t// Deduplicate just in case\n\t\t\t\t\tconst subtaskIds = new Set();\n\t\t\t\t\tupdatedTask.subtasks = updatedTask.subtasks.filter((st) => {\n\t\t\t\t\t\tif (!subtaskIds.has(st.id)) {\n\t\t\t\t\t\t\tsubtaskIds.add(st.id);\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treport('warn', `Duplicate subtask ID ${st.id} removed.`);\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\t\t\t// --- End Task Validation/Correction ---\n\n\t\t\t// --- Update Task Data (Keep existing) ---\n\t\t\tdata.tasks[taskIndex] = updatedTask;\n\t\t\t// --- End Update Task Data ---\n\n\t\t\t// --- Write File and Generate (Unchanged) ---\n\t\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t\treport('success', `Successfully updated task ${taskId}`);\n\t\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\t\t\t// --- End Write File ---\n\n\t\t\t// --- Display CLI Telemetry ---\n\t\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli'); // <<< ADD display\n\t\t\t}\n\n\t\t\t// --- Return Success with Telemetry ---\n\t\t\treturn {\n\t\t\t\tupdatedTask: updatedTask, // Return the updated task object\n\t\t\t\ttelemetryData: aiServiceResponse.telemetryData, // <<< ADD telemetryData\n\t\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t\t};\n\t\t} catch (error) {\n\t\t\t// Catch errors from generateTextService\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\treport('error', `Error during AI service call: ${error.message}`);\n\t\t\tif (error.message.includes('API key')) {\n\t\t\t\treport('error', 'Please ensure API keys are configured correctly.');\n\t\t\t}\n\t\t\tthrow error; // Re-throw error\n\t\t}\n\t} catch (error) {\n\t\t// General error catch\n\t\t// --- General Error Handling (Keep existing) ---\n\t\treport('error', `Error updating task: ${error.message}`);\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t// ... helpful hints ...\n\t\t\tif (getDebugFlag(session)) console.error(error);\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error; // Re-throw for MCP\n\t\t}\n\t\treturn null; // Indicate failure in CLI case if process doesn't exit\n\t\t// --- End General Error Handling ---\n\t}\n}\n\nexport default updateTaskById;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/next-task.js", "/**\n * next-task.js\n * Direct function implementation for finding the next task to work on\n */\n\nimport { findNextTask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\treadJSON,\n\treadComplexityReport\n} from '../../../../scripts/modules/utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for finding the next task to work on with error handling and caching.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.reportPath - Path to the report file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Next task result { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function nextTaskDirect(args, log, context = {}) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, reportPath, projectRoot, tag } = args;\n\tconst { session } = context;\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('nextTaskDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Define the action function to be executed on cache miss\n\tconst coreNextTaskAction = async () => {\n\t\ttry {\n\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\tenableSilentMode();\n\n\t\t\tlog.info(`Finding next task from ${tasksJsonPath}`);\n\n\t\t\t// Read tasks data using the provided path\n\t\t\tconst data = readJSON(tasksJsonPath, projectRoot, tag);\n\t\t\tif (!data || !data.tasks) {\n\t\t\t\tdisableSilentMode(); // Disable before return\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'INVALID_TASKS_FILE',\n\t\t\t\t\t\tmessage: `No valid tasks found in ${tasksJsonPath}`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Read the complexity report\n\t\t\tconst complexityReport = readComplexityReport(reportPath);\n\n\t\t\t// Find the next task\n\t\t\tconst nextTask = findNextTask(data.tasks, complexityReport);\n\n\t\t\tif (!nextTask) {\n\t\t\t\tlog.info(\n\t\t\t\t\t'No eligible next task found. All tasks are either completed or have unsatisfied dependencies'\n\t\t\t\t);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: true,\n\t\t\t\t\tdata: {\n\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\t'No eligible next task found. All tasks are either completed or have unsatisfied dependencies',\n\t\t\t\t\t\tnextTask: null\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Check if it's a subtask\n\t\t\tconst isSubtask =\n\t\t\t\ttypeof nextTask.id === 'string' && nextTask.id.includes('.');\n\n\t\t\tconst taskOrSubtask = isSubtask ? 'subtask' : 'task';\n\n\t\t\tconst additionalAdvice = isSubtask\n\t\t\t\t? 'Subtasks can be updated with timestamped details as you implement them. This is useful for tracking progress, marking milestones and insights (of successful or successive falures in attempting to implement the subtask). Research can be used when updating the subtask to collect up-to-date information, and can be helpful to solve a repeating problem the agent is unable to solve. It is a good idea to get-task the parent task to collect the overall context of the task, and to get-task the subtask to collect the specific details of the subtask.'\n\t\t\t\t: 'Tasks can be updated to reflect a change in the direction of the task, or to reformulate the task per your prompt. Research can be used when updating the task to collect up-to-date information. It is best to update subtasks as you work on them, and to update the task for more high-level changes that may affect pending subtasks or the general direction of the task.';\n\n\t\t\t// Restore normal logging\n\t\t\tdisableSilentMode();\n\n\t\t\t// Return the next task data with the full tasks array for reference\n\t\t\tlog.info(\n\t\t\t\t`Successfully found next task ${nextTask.id}: ${nextTask.title}. Is subtask: ${isSubtask}`\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tnextTask,\n\t\t\t\t\tisSubtask,\n\t\t\t\t\tnextSteps: `When ready to work on the ${taskOrSubtask}, use set-status to set the status to \"in progress\" ${additionalAdvice}`\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Error finding next task: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage: error.message || 'Failed to find next task'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t};\n\n\t// Use the caching utility\n\ttry {\n\t\tconst result = await coreNextTaskAction();\n\t\tlog.info('nextTaskDirect completed.');\n\t\treturn result;\n\t} catch (error) {\n\t\tlog.error(`Unexpected error during nextTask: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/ui.js", "/**\n * ui.js\n * User interface functions for the Task Master CLI\n */\n\nimport chalk from 'chalk';\nimport figlet from 'figlet';\nimport boxen from 'boxen';\nimport ora from 'ora';\nimport Table from 'cli-table3';\nimport gradient from 'gradient-string';\nimport readline from 'readline';\nimport {\n\tlog,\n\tfindTaskById,\n\treadJSON,\n\ttruncate,\n\tisSilentMode\n} from './utils.js';\nimport fs from 'fs';\nimport {\n\tfindNextTask,\n\tanalyzeTaskComplexity,\n\treadComplexityReport\n} from './task-manager.js';\nimport { getProjectName, getDefaultSubtasks } from './config-manager.js';\nimport { TASK_STATUS_OPTIONS } from '../../src/constants/task-status.js';\nimport {\n\tTASKMASTER_CONFIG_FILE,\n\tTASKMASTER_TASKS_FILE\n} from '../../src/constants/paths.js';\nimport { getTaskMasterVersion } from '../../src/utils/getVersion.js';\n\n// Create a color gradient for the banner\nconst coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);\nconst warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']);\n\n/**\n * Display FYI notice about tagged task lists (only if migration occurred)\n * @param {Object} data - Data object that may contain _migrationHappened flag\n */\nfunction displayTaggedTasksFYI(data) {\n\tif (isSilentMode() || !data || !data._migrationHappened) return;\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('FYI: ') +\n\t\t\t\tchalk.gray('Taskmaster now supports separate task lists per tag. ') +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t'Use the --tag flag to create/read/update/filter tasks by tag.'\n\t\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 2, right: 2 },\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Display a small, non-intrusive indicator showing the current tag context\n * @param {string} tagName - The tag name to display\n * @param {Object} options - Display options\n * @param {boolean} [options.skipIfMaster=false] - Don't show indicator if tag is 'master'\n * @param {boolean} [options.dim=false] - Use dimmed styling\n */\nfunction displayCurrentTagIndicator(tag, options = {}) {\n\tif (isSilentMode()) return;\n\n\tconst { skipIfMaster = false, dim = false } = options;\n\n\t// Skip display for master tag only if explicitly requested\n\tif (skipIfMaster && tag === 'master') return;\n\n\t// Create a small, tasteful tag indicator\n\tconst tagIcon = '🏷️';\n\tconst tagText = dim\n\t\t? chalk.gray(`${tagIcon} tag: ${tag}`)\n\t\t: chalk.dim(`${tagIcon} tag: `) + chalk.cyan(tag);\n\n\tconsole.log(tagText);\n}\n\n/**\n * Display a fancy banner for the CLI\n */\nfunction displayBanner() {\n\tif (isSilentMode()) return;\n\n\t// console.clear(); // Removing this to avoid clearing the terminal per command\n\tconst bannerText = figlet.textSync('Task Master', {\n\t\tfont: 'Standard',\n\t\thorizontalLayout: 'default',\n\t\tverticalLayout: 'default'\n\t});\n\n\tconsole.log(coolGradient(bannerText));\n\n\t// Add creator credit line below the banner\n\tconsole.log(\n\t\tchalk.dim('by ') + chalk.cyan.underline('https://x.com/eyaltoledano')\n\t);\n\n\t// Read version directly from package.json\n\tconst version = getTaskMasterVersion();\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white(\n\t\t\t\t`${chalk.bold('Version:')} ${version} ${chalk.bold('Project:')} ${getProjectName(null)}`\n\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tmargin: { top: 0, bottom: 1 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'cyan'\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Start a loading indicator with an animated spinner\n * @param {string} message - Message to display next to the spinner\n * @returns {Object} Spinner object\n */\nfunction startLoadingIndicator(message) {\n\tif (isSilentMode()) return null;\n\n\tconst spinner = ora({\n\t\ttext: message,\n\t\tcolor: 'cyan'\n\t}).start();\n\n\treturn spinner;\n}\n\n/**\n * Stop a loading indicator (basic stop, no success/fail indicator)\n * @param {Object} spinner - Spinner object to stop\n */\nfunction stopLoadingIndicator(spinner) {\n\tif (spinner && typeof spinner.stop === 'function') {\n\t\tspinner.stop();\n\t}\n}\n\n/**\n * Complete a loading indicator with success (shows checkmark)\n * @param {Object} spinner - Spinner object to complete\n * @param {string} message - Optional success message (defaults to current text)\n */\nfunction succeedLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.succeed === 'function') {\n\t\tif (message) {\n\t\t\tspinner.succeed(message);\n\t\t} else {\n\t\t\tspinner.succeed();\n\t\t}\n\t}\n}\n\n/**\n * Complete a loading indicator with failure (shows X)\n * @param {Object} spinner - Spinner object to fail\n * @param {string} message - Optional failure message (defaults to current text)\n */\nfunction failLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.fail === 'function') {\n\t\tif (message) {\n\t\t\tspinner.fail(message);\n\t\t} else {\n\t\t\tspinner.fail();\n\t\t}\n\t}\n}\n\n/**\n * Complete a loading indicator with warning (shows warning symbol)\n * @param {Object} spinner - Spinner object to warn\n * @param {string} message - Optional warning message (defaults to current text)\n */\nfunction warnLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.warn === 'function') {\n\t\tif (message) {\n\t\t\tspinner.warn(message);\n\t\t} else {\n\t\t\tspinner.warn();\n\t\t}\n\t}\n}\n\n/**\n * Complete a loading indicator with info (shows info symbol)\n * @param {Object} spinner - Spinner object to complete with info\n * @param {string} message - Optional info message (defaults to current text)\n */\nfunction infoLoadingIndicator(spinner, message = null) {\n\tif (spinner && typeof spinner.info === 'function') {\n\t\tif (message) {\n\t\t\tspinner.info(message);\n\t\t} else {\n\t\t\tspinner.info();\n\t\t}\n\t}\n}\n\n/**\n * Create a colored progress bar\n * @param {number} percent - The completion percentage\n * @param {number} length - The total length of the progress bar in characters\n * @param {Object} statusBreakdown - Optional breakdown of non-complete statuses (e.g., {pending: 20, 'in-progress': 10})\n * @returns {string} The formatted progress bar\n */\nfunction createProgressBar(percent, length = 30, statusBreakdown = null) {\n\t// Adjust the percent to treat deferred and cancelled as complete\n\tconst effectivePercent = statusBreakdown\n\t\t? Math.min(\n\t\t\t\t100,\n\t\t\t\tpercent +\n\t\t\t\t\t(statusBreakdown.deferred || 0) +\n\t\t\t\t\t(statusBreakdown.cancelled || 0)\n\t\t\t)\n\t\t: percent;\n\n\t// Calculate how many characters to fill for \"true completion\"\n\tconst trueCompletedFilled = Math.round((percent * length) / 100);\n\n\t// Calculate how many characters to fill for \"effective completion\" (including deferred/cancelled)\n\tconst effectiveCompletedFilled = Math.round(\n\t\t(effectivePercent * length) / 100\n\t);\n\n\t// The \"deferred/cancelled\" section (difference between true and effective)\n\tconst deferredCancelledFilled =\n\t\teffectiveCompletedFilled - trueCompletedFilled;\n\n\t// Set the empty section (remaining after effective completion)\n\tconst empty = length - effectiveCompletedFilled;\n\n\t// Determine color based on percentage for the completed section\n\tlet completedColor;\n\tif (percent < 25) {\n\t\tcompletedColor = chalk.red;\n\t} else if (percent < 50) {\n\t\tcompletedColor = chalk.hex('#FFA500'); // Orange\n\t} else if (percent < 75) {\n\t\tcompletedColor = chalk.yellow;\n\t} else if (percent < 100) {\n\t\tcompletedColor = chalk.green;\n\t} else {\n\t\tcompletedColor = chalk.hex('#006400'); // Dark green\n\t}\n\n\t// Create colored sections\n\tconst completedSection = completedColor('█'.repeat(trueCompletedFilled));\n\n\t// Gray section for deferred/cancelled items\n\tconst deferredCancelledSection = chalk.gray(\n\t\t'█'.repeat(deferredCancelledFilled)\n\t);\n\n\t// If we have a status breakdown, create a multi-colored remaining section\n\tlet remainingSection = '';\n\n\tif (statusBreakdown && empty > 0) {\n\t\t// Status colors (matching the statusConfig colors in getStatusWithColor)\n\t\tconst statusColors = {\n\t\t\tpending: chalk.yellow,\n\t\t\t'in-progress': chalk.hex('#FFA500'), // Orange\n\t\t\tblocked: chalk.red,\n\t\t\treview: chalk.magenta\n\t\t\t// Deferred and cancelled are treated as part of the completed section\n\t\t};\n\n\t\t// Calculate proportions for each status\n\t\tconst totalRemaining = Object.entries(statusBreakdown)\n\t\t\t.filter(\n\t\t\t\t([status]) =>\n\t\t\t\t\t!['deferred', 'cancelled', 'done', 'completed'].includes(status)\n\t\t\t)\n\t\t\t.reduce((sum, [_, val]) => sum + val, 0);\n\n\t\t// If no remaining tasks with tracked statuses, just use gray\n\t\tif (totalRemaining <= 0) {\n\t\t\tremainingSection = chalk.gray('░'.repeat(empty));\n\t\t} else {\n\t\t\t// Track how many characters we've added\n\t\t\tlet addedChars = 0;\n\n\t\t\t// Add each status section proportionally\n\t\t\tfor (const [status, percentage] of Object.entries(statusBreakdown)) {\n\t\t\t\t// Skip statuses that are considered complete\n\t\t\t\tif (['deferred', 'cancelled', 'done', 'completed'].includes(status))\n\t\t\t\t\tcontinue;\n\n\t\t\t\t// Calculate how many characters this status should fill\n\t\t\t\tconst statusChars = Math.round((percentage / totalRemaining) * empty);\n\n\t\t\t\t// Make sure we don't exceed the total length due to rounding\n\t\t\t\tconst actualChars = Math.min(statusChars, empty - addedChars);\n\n\t\t\t\t// Add colored section for this status\n\t\t\t\tconst colorFn = statusColors[status] || chalk.gray;\n\t\t\t\tremainingSection += colorFn('░'.repeat(actualChars));\n\n\t\t\t\taddedChars += actualChars;\n\t\t\t}\n\n\t\t\t// If we have any remaining space due to rounding, fill with gray\n\t\t\tif (addedChars < empty) {\n\t\t\t\tremainingSection += chalk.gray('░'.repeat(empty - addedChars));\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Default to gray for the empty section if no breakdown provided\n\t\tremainingSection = chalk.gray('░'.repeat(empty));\n\t}\n\n\t// Effective percentage text color should reflect the highest category\n\tconst percentTextColor =\n\t\tpercent === 100\n\t\t\t? chalk.hex('#006400') // Dark green for 100%\n\t\t\t: effectivePercent === 100\n\t\t\t\t? chalk.gray // Gray for 100% with deferred/cancelled\n\t\t\t\t: completedColor; // Otherwise match the completed color\n\n\t// Build the complete progress bar\n\treturn `${completedSection}${deferredCancelledSection}${remainingSection} ${percentTextColor(`${effectivePercent.toFixed(0)}%`)}`;\n}\n\n/**\n * Get a colored status string based on the status value\n * @param {string} status - Task status (e.g., \"done\", \"pending\", \"in-progress\")\n * @param {boolean} forTable - Whether the status is being displayed in a table\n * @returns {string} Colored status string\n */\nfunction getStatusWithColor(status, forTable = false) {\n\tif (!status) {\n\t\treturn chalk.gray('❓ unknown');\n\t}\n\n\tconst statusConfig = {\n\t\tdone: { color: chalk.green, icon: '✓', tableIcon: '✓' },\n\t\tcompleted: { color: chalk.green, icon: '✓', tableIcon: '✓' },\n\t\tpending: { color: chalk.yellow, icon: '○', tableIcon: '⏱' },\n\t\t'in-progress': { color: chalk.hex('#FFA500'), icon: '🔄', tableIcon: '►' },\n\t\tdeferred: { color: chalk.gray, icon: 'x', tableIcon: '⏱' },\n\t\tblocked: { color: chalk.red, icon: '!', tableIcon: '✗' },\n\t\treview: { color: chalk.magenta, icon: '?', tableIcon: '?' },\n\t\tcancelled: { color: chalk.gray, icon: '❌', tableIcon: 'x' }\n\t};\n\n\tconst config = statusConfig[status.toLowerCase()] || {\n\t\tcolor: chalk.red,\n\t\ticon: '❌',\n\t\ttableIcon: '✗'\n\t};\n\n\t// Use simpler icons for table display to prevent border issues\n\tif (forTable) {\n\t\t// Use ASCII characters instead of Unicode for completely stable display\n\t\tconst simpleIcons = {\n\t\t\tdone: '✓',\n\t\t\tcompleted: '✓',\n\t\t\tpending: '○',\n\t\t\t'in-progress': '►',\n\t\t\tdeferred: 'x',\n\t\t\tblocked: '!', // Using plain x character for better compatibility\n\t\t\treview: '?' // Using circled dot symbol\n\t\t};\n\t\tconst simpleIcon = simpleIcons[status.toLowerCase()] || 'x';\n\t\treturn config.color(`${simpleIcon} ${status}`);\n\t}\n\n\treturn config.color(`${config.icon} ${status}`);\n}\n\n/**\n * Format dependencies list with status indicators\n * @param {Array} dependencies - Array of dependency IDs\n * @param {Array} allTasks - Array of all tasks\n * @param {boolean} forConsole - Whether the output is for console display\n * @param {Object|null} complexityReport - Optional pre-loaded complexity report\n * @returns {string} Formatted dependencies string\n */\nfunction formatDependenciesWithStatus(\n\tdependencies,\n\tallTasks,\n\tforConsole = false,\n\tcomplexityReport = null // Add complexityReport parameter\n) {\n\tif (\n\t\t!dependencies ||\n\t\t!Array.isArray(dependencies) ||\n\t\tdependencies.length === 0\n\t) {\n\t\treturn forConsole ? chalk.gray('None') : 'None';\n\t}\n\n\tconst formattedDeps = dependencies.map((depId) => {\n\t\tconst depIdStr = depId.toString(); // Ensure string format for display\n\n\t\t// Check if it's already a fully qualified subtask ID (like \"22.1\")\n\t\tif (depIdStr.includes('.')) {\n\t\t\tconst [parentId, subtaskId] = depIdStr\n\t\t\t\t.split('.')\n\t\t\t\t.map((id) => parseInt(id, 10));\n\n\t\t\t// Find the parent task\n\t\t\tconst parentTask = allTasks.find((t) => t.id === parentId);\n\t\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\t\treturn forConsole\n\t\t\t\t\t? chalk.red(`${depIdStr} (Not found)`)\n\t\t\t\t\t: `${depIdStr} (Not found)`;\n\t\t\t}\n\n\t\t\t// Find the subtask\n\t\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\t\tif (!subtask) {\n\t\t\t\treturn forConsole\n\t\t\t\t\t? chalk.red(`${depIdStr} (Not found)`)\n\t\t\t\t\t: `${depIdStr} (Not found)`;\n\t\t\t}\n\n\t\t\t// Format with status\n\t\t\tconst status = subtask.status || 'pending';\n\t\t\tconst isDone =\n\t\t\t\tstatus.toLowerCase() === 'done' || status.toLowerCase() === 'completed';\n\t\t\tconst isInProgress = status.toLowerCase() === 'in-progress';\n\n\t\t\tif (forConsole) {\n\t\t\t\tif (isDone) {\n\t\t\t\t\treturn chalk.green.bold(depIdStr);\n\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\treturn chalk.hex('#FFA500').bold(depIdStr);\n\t\t\t\t} else {\n\t\t\t\t\treturn chalk.red.bold(depIdStr);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// For plain text output (task files), return just the ID without any formatting or emoji\n\t\t\treturn depIdStr;\n\t\t}\n\n\t\t// If depId is a number less than 100, it's likely a reference to a subtask ID in the current task\n\t\t// This case is typically handled elsewhere (in task-specific code) before calling this function\n\n\t\t// For regular task dependencies (not subtasks)\n\t\t// Convert string depId to number if needed\n\t\tconst numericDepId =\n\t\t\ttypeof depId === 'string' ? parseInt(depId, 10) : depId;\n\n\t\t// Look up the task using the numeric ID\n\t\tconst depTaskResult = findTaskById(\n\t\t\tallTasks,\n\t\t\tnumericDepId,\n\t\t\tcomplexityReport\n\t\t);\n\t\tconst depTask = depTaskResult.task; // Access the task object from the result\n\n\t\tif (!depTask) {\n\t\t\treturn forConsole\n\t\t\t\t? chalk.red(`${depIdStr} (Not found)`)\n\t\t\t\t: `${depIdStr} (Not found)`;\n\t\t}\n\n\t\t// Format with status\n\t\tconst status = depTask.status || 'pending';\n\t\tconst isDone =\n\t\t\tstatus.toLowerCase() === 'done' || status.toLowerCase() === 'completed';\n\t\tconst isInProgress = status.toLowerCase() === 'in-progress';\n\n\t\tif (forConsole) {\n\t\t\tif (isDone) {\n\t\t\t\treturn chalk.green.bold(depIdStr);\n\t\t\t} else if (isInProgress) {\n\t\t\t\treturn chalk.yellow.bold(depIdStr);\n\t\t\t} else {\n\t\t\t\treturn chalk.red.bold(depIdStr);\n\t\t\t}\n\t\t}\n\n\t\t// For plain text output (task files), return just the ID without any formatting or emoji\n\t\treturn depIdStr;\n\t});\n\n\treturn formattedDeps.join(', ');\n}\n\n/**\n * Display a comprehensive help guide\n */\nfunction displayHelp() {\n\t// Get terminal width - moved to top of function to make it available throughout\n\tconst terminalWidth = process.stdout.columns || 100; // Default to 100 if can't detect\n\n\tconsole.log(\n\t\tboxen(chalk.white.bold('Task Master CLI'), {\n\t\t\tpadding: 1,\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t})\n\t);\n\n\t// Command categories\n\tconst commandCategories = [\n\t\t{\n\t\t\ttitle: 'Project Setup & Configuration',\n\t\t\tcolor: 'blue',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'init',\n\t\t\t\t\targs: '[--name=<name>] [--description=<desc>] [-y]',\n\t\t\t\t\tdesc: 'Initialize a new project with Task Master structure'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'View current AI model configuration and available models'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --setup',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Run interactive setup to configure AI models'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --set-main',\n\t\t\t\t\targs: '<model_id>',\n\t\t\t\t\tdesc: 'Set the primary model for task generation'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --set-research',\n\t\t\t\t\targs: '<model_id>',\n\t\t\t\t\tdesc: 'Set the model for research operations'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'models --set-fallback',\n\t\t\t\t\targs: '<model_id>',\n\t\t\t\t\tdesc: 'Set the fallback model (optional)'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Generation',\n\t\t\tcolor: 'cyan',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'parse-prd',\n\t\t\t\t\targs: '--input=<file.txt> [--num-tasks=10]',\n\t\t\t\t\tdesc: 'Generate tasks from a PRD document'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'generate',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Create individual task files from tasks.json'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Management',\n\t\t\tcolor: 'green',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'list',\n\t\t\t\t\targs: '[--status=<status>] [--with-subtasks]',\n\t\t\t\t\tdesc: 'List all tasks with their status'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'set-status',\n\t\t\t\t\targs: '--id=<id> --status=<status>',\n\t\t\t\t\tdesc: `Update task status (${TASK_STATUS_OPTIONS.join(', ')})`\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'sync-readme',\n\t\t\t\t\targs: '[--with-subtasks] [--status=<status>]',\n\t\t\t\t\tdesc: 'Export tasks to README.md with professional formatting'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'update',\n\t\t\t\t\targs: '--from=<id> --prompt=\"<context>\"',\n\t\t\t\t\tdesc: 'Update multiple tasks based on new requirements'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'update-task',\n\t\t\t\t\targs: '--id=<id> --prompt=\"<context>\"',\n\t\t\t\t\tdesc: 'Update a single specific task with new information'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'update-subtask',\n\t\t\t\t\targs: '--id=<parentId.subtaskId> --prompt=\"<context>\"',\n\t\t\t\t\tdesc: 'Append additional information to a subtask'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'add-task',\n\t\t\t\t\targs: '--prompt=\"<text>\" [--dependencies=<ids>] [--priority=<priority>]',\n\t\t\t\t\tdesc: 'Add a new task using AI'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'remove-task',\n\t\t\t\t\targs: '--id=<id> [-y]',\n\t\t\t\t\tdesc: 'Permanently remove a task or subtask'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Subtask Management',\n\t\t\tcolor: 'yellow',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'add-subtask',\n\t\t\t\t\targs: '--parent=<id> --title=\"<title>\" [--description=\"<desc>\"]',\n\t\t\t\t\tdesc: 'Add a new subtask to a parent task'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'add-subtask',\n\t\t\t\t\targs: '--parent=<id> --task-id=<id>',\n\t\t\t\t\tdesc: 'Convert an existing task into a subtask'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'remove-subtask',\n\t\t\t\t\targs: '--id=<parentId.subtaskId> [--convert]',\n\t\t\t\t\tdesc: 'Remove a subtask (optionally convert to standalone task)'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'clear-subtasks',\n\t\t\t\t\targs: '--id=<id>',\n\t\t\t\t\tdesc: 'Remove all subtasks from specified tasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'clear-subtasks --all',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Remove subtasks from all tasks'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Analysis & Breakdown',\n\t\t\tcolor: 'magenta',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'analyze-complexity',\n\t\t\t\t\targs: '[--research] [--threshold=5]',\n\t\t\t\t\tdesc: 'Analyze tasks and generate expansion recommendations'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'complexity-report',\n\t\t\t\t\targs: '[--file=<path>]',\n\t\t\t\t\tdesc: 'Display the complexity analysis report'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'expand',\n\t\t\t\t\targs: '--id=<id> [--num=5] [--research] [--prompt=\"<context>\"]',\n\t\t\t\t\tdesc: 'Break down tasks into detailed subtasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'expand --all',\n\t\t\t\t\targs: '[--force] [--research]',\n\t\t\t\t\tdesc: 'Expand all pending tasks with subtasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'research',\n\t\t\t\t\targs: '\"<prompt>\" [-i=<task_ids>] [-f=<file_paths>] [-c=\"<context>\"] [--tree] [-s=<save_file>] [-d=<detail_level>]',\n\t\t\t\t\tdesc: 'Perform AI-powered research queries with project context'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Task Navigation & Viewing',\n\t\t\tcolor: 'cyan',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'next',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Show the next task to work on based on dependencies'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'show',\n\t\t\t\t\targs: '<id>',\n\t\t\t\t\tdesc: 'Display detailed information about a specific task'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Tag Management',\n\t\t\tcolor: 'magenta',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'tags',\n\t\t\t\t\targs: '[--show-metadata]',\n\t\t\t\t\tdesc: 'List all available tags with task counts'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'add-tag',\n\t\t\t\t\targs: '<tagName> [--copy-from-current] [--copy-from=<tag>] [-d=\"<desc>\"]',\n\t\t\t\t\tdesc: 'Create a new tag context for organizing tasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'use-tag',\n\t\t\t\t\targs: '<tagName>',\n\t\t\t\t\tdesc: 'Switch to a different tag context'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'delete-tag',\n\t\t\t\t\targs: '<tagName> [--yes]',\n\t\t\t\t\tdesc: 'Delete an existing tag and all its tasks'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'rename-tag',\n\t\t\t\t\targs: '<oldName> <newName>',\n\t\t\t\t\tdesc: 'Rename an existing tag'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'copy-tag',\n\t\t\t\t\targs: '<sourceName> <targetName> [-d=\"<desc>\"]',\n\t\t\t\t\tdesc: 'Copy an existing tag to create a new tag with the same tasks'\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\ttitle: 'Dependency Management',\n\t\t\tcolor: 'blue',\n\t\t\tcommands: [\n\t\t\t\t{\n\t\t\t\t\tname: 'add-dependency',\n\t\t\t\t\targs: '--id=<id> --depends-on=<id>',\n\t\t\t\t\tdesc: 'Add a dependency to a task'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'remove-dependency',\n\t\t\t\t\targs: '--id=<id> --depends-on=<id>',\n\t\t\t\t\tdesc: 'Remove a dependency from a task'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'validate-dependencies',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Identify invalid dependencies without fixing them'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: 'fix-dependencies',\n\t\t\t\t\targs: '',\n\t\t\t\t\tdesc: 'Fix invalid dependencies automatically'\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t];\n\n\t// Display each category\n\tcommandCategories.forEach((category) => {\n\t\tconsole.log(\n\t\t\tboxen(chalk[category.color].bold(category.title), {\n\t\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: category.color,\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\t// Calculate dynamic column widths - adjust ratios as needed\n\t\tconst nameWidth = Math.max(25, Math.floor(terminalWidth * 0.2)); // 20% of width but min 25\n\t\tconst argsWidth = Math.max(40, Math.floor(terminalWidth * 0.35)); // 35% of width but min 40\n\t\tconst descWidth = Math.max(45, Math.floor(terminalWidth * 0.45) - 10); // 45% of width but min 45, minus some buffer\n\n\t\tconst commandTable = new Table({\n\t\t\tcolWidths: [nameWidth, argsWidth, descWidth],\n\t\t\tchars: {\n\t\t\t\ttop: '',\n\t\t\t\t'top-mid': '',\n\t\t\t\t'top-left': '',\n\t\t\t\t'top-right': '',\n\t\t\t\tbottom: '',\n\t\t\t\t'bottom-mid': '',\n\t\t\t\t'bottom-left': '',\n\t\t\t\t'bottom-right': '',\n\t\t\t\tleft: '',\n\t\t\t\t'left-mid': '',\n\t\t\t\tmid: '',\n\t\t\t\t'mid-mid': '',\n\t\t\t\tright: '',\n\t\t\t\t'right-mid': '',\n\t\t\t\tmiddle: ' '\n\t\t\t},\n\t\t\tstyle: { border: [], 'padding-left': 4 },\n\t\t\twordWrap: true\n\t\t});\n\n\t\tcategory.commands.forEach((cmd, index) => {\n\t\t\tcommandTable.push([\n\t\t\t\t`${chalk.yellow.bold(cmd.name)}${chalk.reset('')}`,\n\t\t\t\t`${chalk.white(cmd.args)}${chalk.reset('')}`,\n\t\t\t\t`${chalk.dim(cmd.desc)}${chalk.reset('')}`\n\t\t\t]);\n\t\t});\n\n\t\tconsole.log(commandTable.toString());\n\t\tconsole.log('');\n\t});\n\n\t// Display configuration section\n\tconsole.log(\n\t\tboxen(chalk.cyan.bold('Configuration'), {\n\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\tborderColor: 'cyan',\n\t\t\tborderStyle: 'round'\n\t\t})\n\t);\n\n\t// Get terminal width if not already defined\n\tconst configTerminalWidth = terminalWidth || process.stdout.columns || 100;\n\n\t// Calculate dynamic column widths for config table\n\tconst configKeyWidth = Math.max(30, Math.floor(configTerminalWidth * 0.25));\n\tconst configDescWidth = Math.max(50, Math.floor(configTerminalWidth * 0.45));\n\tconst configValueWidth = Math.max(\n\t\t30,\n\t\tMath.floor(configTerminalWidth * 0.3) - 10\n\t);\n\n\tconst configTable = new Table({\n\t\tcolWidths: [configKeyWidth, configDescWidth, configValueWidth],\n\t\tchars: {\n\t\t\ttop: '',\n\t\t\t'top-mid': '',\n\t\t\t'top-left': '',\n\t\t\t'top-right': '',\n\t\t\tbottom: '',\n\t\t\t'bottom-mid': '',\n\t\t\t'bottom-left': '',\n\t\t\t'bottom-right': '',\n\t\t\tleft: '',\n\t\t\t'left-mid': '',\n\t\t\tmid: '',\n\t\t\t'mid-mid': '',\n\t\t\tright: '',\n\t\t\t'right-mid': '',\n\t\t\tmiddle: ' '\n\t\t},\n\t\tstyle: { border: [], 'padding-left': 4 },\n\t\twordWrap: true\n\t});\n\n\tconfigTable.push(\n\t\t[\n\t\t\t`${chalk.yellow(TASKMASTER_CONFIG_FILE)}${chalk.reset('')}`,\n\t\t\t`${chalk.white('AI model configuration file (project root)')}${chalk.reset('')}`,\n\t\t\t`${chalk.dim('Managed by models cmd')}${chalk.reset('')}`\n\t\t],\n\t\t[\n\t\t\t`${chalk.yellow('API Keys (.env)')}${chalk.reset('')}`,\n\t\t\t`${chalk.white('API keys for AI providers (ANTHROPIC_API_KEY, etc.)')}${chalk.reset('')}`,\n\t\t\t`${chalk.dim('Required in .env file')}${chalk.reset('')}`\n\t\t],\n\t\t[\n\t\t\t`${chalk.yellow('MCP Keys (mcp.json)')}${chalk.reset('')}`,\n\t\t\t`${chalk.white('API keys for Cursor integration')}${chalk.reset('')}`,\n\t\t\t`${chalk.dim('Required in .cursor/')}${chalk.reset('')}`\n\t\t]\n\t);\n\n\tconsole.log(configTable.toString());\n\tconsole.log('');\n\n\t// Show helpful hints\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Quick Start:') +\n\t\t\t\t'\\n\\n' +\n\t\t\t\tchalk.cyan('1. Create Project: ') +\n\t\t\t\tchalk.white('task-master init') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('2. Setup Models: ') +\n\t\t\t\tchalk.white('task-master models --setup') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('3. Parse PRD: ') +\n\t\t\t\tchalk.white('task-master parse-prd --input=<prd-file>') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('4. List Tasks: ') +\n\t\t\t\tchalk.white('task-master list') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan('5. Find Next Task: ') +\n\t\t\t\tchalk.white('task-master next'),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 },\n\t\t\t\twidth: Math.min(configTerminalWidth - 10, 100) // Limit width to terminal width minus padding, max 100\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Get colored complexity score\n * @param {number} score - Complexity score (1-10)\n * @returns {string} Colored complexity score\n */\nfunction getComplexityWithColor(score) {\n\tif (score <= 3) return chalk.green(`● ${score}`);\n\tif (score <= 6) return chalk.yellow(`● ${score}`);\n\treturn chalk.red(`● ${score}`);\n}\n\n/**\n * Truncate a string to a maximum length and add ellipsis if needed\n * @param {string} str - The string to truncate\n * @param {number} maxLength - Maximum length\n * @returns {string} Truncated string\n */\nfunction truncateString(str, maxLength) {\n\tif (!str) return '';\n\tif (str.length <= maxLength) return str;\n\treturn str.substring(0, maxLength - 3) + '...';\n}\n\n/**\n * Display the next task to work on\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} complexityReportPath - Path to the complexity report file\n * @param {string} tag - Optional tag to override current tag resolution\n */\nasync function displayNextTask(\n\ttasksPath,\n\tcomplexityReportPath = null,\n\tcontext = {}\n) {\n\t// Extract parameters from context\n\tconst { projectRoot, tag } = context;\n\n\t// Read the tasks file with proper projectRoot for tag resolution\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Read complexity report once\n\tconst complexityReport = readComplexityReport(complexityReportPath);\n\n\t// Find the next task\n\tconst nextTask = findNextTask(data.tasks, complexityReport);\n\n\tif (!nextTask) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow('No eligible tasks found!\\n\\n') +\n\t\t\t\t\t'All pending tasks have unsatisfied dependencies, or all tasks are completed.',\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t\treturn;\n\t}\n\n\t// Display the task in a nice format\n\tconsole.log(\n\t\tboxen(chalk.white.bold(`Next Task: #${nextTask.id} - ${nextTask.title}`), {\n\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t})\n\t);\n\n\t// Create a table with task details\n\tconst taskTable = new Table({\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\tcolWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],\n\t\twordWrap: true\n\t});\n\n\t// Priority with color\n\tconst priorityColors = {\n\t\thigh: chalk.red.bold,\n\t\tmedium: chalk.yellow,\n\t\tlow: chalk.gray\n\t};\n\tconst priorityColor =\n\t\tpriorityColors[nextTask.priority || 'medium'] || chalk.white;\n\n\t// Add task details to table\n\ttaskTable.push(\n\t\t[chalk.cyan.bold('ID:'), nextTask.id.toString()],\n\t\t[chalk.cyan.bold('Title:'), nextTask.title],\n\t\t[\n\t\t\tchalk.cyan.bold('Priority:'),\n\t\t\tpriorityColor(nextTask.priority || 'medium')\n\t\t],\n\t\t[\n\t\t\tchalk.cyan.bold('Dependencies:'),\n\t\t\tformatDependenciesWithStatus(\n\t\t\t\tnextTask.dependencies,\n\t\t\t\tdata.tasks,\n\t\t\t\ttrue,\n\t\t\t\tcomplexityReport\n\t\t\t)\n\t\t],\n\t\t[\n\t\t\tchalk.cyan.bold('Complexity:'),\n\t\t\tnextTask.complexityScore\n\t\t\t\t? getComplexityWithColor(nextTask.complexityScore)\n\t\t\t\t: chalk.gray('N/A')\n\t\t],\n\t\t[chalk.cyan.bold('Description:'), nextTask.description]\n\t);\n\n\tconsole.log(taskTable.toString());\n\n\t// If task has details, show them in a separate box\n\tif (nextTask.details && nextTask.details.trim().length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Implementation Details:') + '\\n\\n' + nextTask.details,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Determine if the nextTask is a subtask\n\tconst isSubtask = !!nextTask.parentId;\n\n\t// Show subtasks if they exist (only for parent tasks)\n\tif (!isSubtask && nextTask.subtasks && nextTask.subtasks.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Subtasks'), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\t// Calculate available width for the subtask table\n\t\tconst availableWidth = process.stdout.columns - 10 || 100; // Default to 100 if can't detect\n\n\t\t// Define percentage-based column widths\n\t\tconst idWidthPct = 8;\n\t\tconst statusWidthPct = 15;\n\t\tconst depsWidthPct = 25;\n\t\tconst titleWidthPct = 100 - idWidthPct - statusWidthPct - depsWidthPct;\n\n\t\t// Calculate actual column widths\n\t\tconst idWidth = Math.floor(availableWidth * (idWidthPct / 100));\n\t\tconst statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));\n\t\tconst depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));\n\t\tconst titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));\n\n\t\t// Create a table for subtasks with improved handling\n\t\tconst subtaskTable = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.magenta.bold('ID'),\n\t\t\t\tchalk.magenta.bold('Status'),\n\t\t\t\tchalk.magenta.bold('Title'),\n\t\t\t\tchalk.magenta.bold('Deps')\n\t\t\t],\n\t\t\tcolWidths: [idWidth, statusWidth, titleWidth, depsWidth],\n\t\t\tstyle: {\n\t\t\t\thead: [],\n\t\t\t\tborder: [],\n\t\t\t\t'padding-top': 0,\n\t\t\t\t'padding-bottom': 0,\n\t\t\t\tcompact: true\n\t\t\t},\n\t\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\t\twordWrap: true\n\t\t});\n\n\t\t// Add subtasks to table\n\t\tnextTask.subtasks.forEach((st) => {\n\t\t\tconst statusColor =\n\t\t\t\t{\n\t\t\t\t\tdone: chalk.green,\n\t\t\t\t\tcompleted: chalk.green,\n\t\t\t\t\tpending: chalk.yellow,\n\t\t\t\t\t'in-progress': chalk.blue\n\t\t\t\t}[st.status || 'pending'] || chalk.white;\n\n\t\t\t// Format subtask dependencies\n\t\t\tlet subtaskDeps = 'None';\n\t\t\tif (st.dependencies && st.dependencies.length > 0) {\n\t\t\t\t// Format dependencies with correct notation\n\t\t\t\tconst formattedDeps = st.dependencies.map((depId) => {\n\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\tconst foundSubtask = nextTask.subtasks.find(\n\t\t\t\t\t\t\t(st) => st.id === depId\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (foundSubtask) {\n\t\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\t\tfoundSubtask.status === 'done' ||\n\t\t\t\t\t\t\t\tfoundSubtask.status === 'completed';\n\t\t\t\t\t\t\tconst isInProgress = foundSubtask.status === 'in-progress';\n\n\t\t\t\t\t\t\t// Use consistent color formatting instead of emojis\n\t\t\t\t\t\t\tif (isDone) {\n\t\t\t\t\t\t\t\treturn chalk.green.bold(`${nextTask.id}.${depId}`);\n\t\t\t\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\t\t\t\treturn chalk.hex('#FFA500').bold(`${nextTask.id}.${depId}`);\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\treturn chalk.red.bold(`${nextTask.id}.${depId}`);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn chalk.red(`${nextTask.id}.${depId} (Not found)`);\n\t\t\t\t\t}\n\t\t\t\t\treturn depId;\n\t\t\t\t});\n\n\t\t\t\t// Join the formatted dependencies directly instead of passing to formatDependenciesWithStatus again\n\t\t\t\tsubtaskDeps =\n\t\t\t\t\tformattedDeps.length === 1\n\t\t\t\t\t\t? formattedDeps[0]\n\t\t\t\t\t\t: formattedDeps.join(chalk.white(', '));\n\t\t\t}\n\n\t\t\tsubtaskTable.push([\n\t\t\t\t`${nextTask.id}.${st.id}`,\n\t\t\t\tstatusColor(st.status || 'pending'),\n\t\t\t\tst.title,\n\t\t\t\tsubtaskDeps\n\t\t\t]);\n\t\t});\n\n\t\tconsole.log(subtaskTable.toString());\n\t}\n\n\t// Suggest expanding if no subtasks (only for parent tasks without subtasks)\n\tif (!isSubtask && (!nextTask.subtasks || nextTask.subtasks.length === 0)) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow('No subtasks found. Consider breaking down this task:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t`Run: ${chalk.cyan(`task-master expand --id=${nextTask.id}`)}`\n\t\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Show action suggestions\n\tlet suggestedActionsContent = chalk.white.bold('Suggested Actions:') + '\\n';\n\tif (isSubtask) {\n\t\t// Suggested actions for a subtask\n\t\tsuggestedActionsContent +=\n\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=in-progress`)}\\n` +\n\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=done`)}\\n` +\n\t\t\t`${chalk.cyan('3.')} View parent task: ${chalk.yellow(`task-master show --id=${nextTask.parentId}`)}`;\n\t} else {\n\t\t// Suggested actions for a parent task\n\t\tsuggestedActionsContent +=\n\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=in-progress`)}\\n` +\n\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${nextTask.id} --status=done`)}\\n` +\n\t\t\t(nextTask.subtasks && nextTask.subtasks.length > 0\n\t\t\t\t? `${chalk.cyan('3.')} Update subtask status: ${chalk.yellow(`task-master set-status --id=${nextTask.id}.1 --status=done`)}` // Example: first subtask\n\t\t\t\t: `${chalk.cyan('3.')} Break down into subtasks: ${chalk.yellow(`task-master expand --id=${nextTask.id}`)}`);\n\t}\n\n\tconsole.log(\n\t\tboxen(suggestedActionsContent, {\n\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\tborderColor: 'green',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1 }\n\t\t})\n\t);\n\n\t// Show FYI notice if migration occurred\n\tdisplayTaggedTasksFYI(data);\n}\n\n/**\n * Display a specific task by ID\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string|number} taskId - The ID of the task to display\n * @param {string} complexityReportPath - Path to the complexity report file\n * @param {string} [statusFilter] - Optional status to filter subtasks by\n * @param {object} context - Context object containing projectRoot and tag\n * @param {string} context.projectRoot - Project root path\n * @param {string} context.tag - Tag for the task\n */\nasync function displayTaskById(\n\ttasksPath,\n\ttaskId,\n\tcomplexityReportPath = null,\n\tstatusFilter = null,\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\n\t// Read the tasks file with proper projectRoot for tag resolution\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Read complexity report once\n\tconst complexityReport = readComplexityReport(complexityReportPath);\n\n\t// Find the task by ID, applying the status filter if provided\n\t// Returns { task, originalSubtaskCount, originalSubtasks }\n\tconst { task, originalSubtaskCount, originalSubtasks } = findTaskById(\n\t\tdata.tasks,\n\t\ttaskId,\n\t\tcomplexityReport,\n\t\tstatusFilter\n\t);\n\n\tif (!task) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.yellow(`Task with ID ${taskId} not found!`), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t})\n\t\t);\n\t\treturn;\n\t}\n\n\t// Handle subtask display specially (This logic remains the same)\n\tif (task.isSubtask || task.parentTask) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold(\n\t\t\t\t\t`Subtask: #${task.parentTask.id}.${task.id} - ${task.title}`\n\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'magenta',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\n\t\tconst subtaskTable = new Table({\n\t\t\tstyle: {\n\t\t\t\thead: [],\n\t\t\t\tborder: [],\n\t\t\t\t'padding-top': 0,\n\t\t\t\t'padding-bottom': 0,\n\t\t\t\tcompact: true\n\t\t\t},\n\t\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\t\tcolWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],\n\t\t\twordWrap: true\n\t\t});\n\t\tsubtaskTable.push(\n\t\t\t[chalk.cyan.bold('ID:'), `${task.parentTask.id}.${task.id}`],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Parent Task:'),\n\t\t\t\t`#${task.parentTask.id} - ${task.parentTask.title}`\n\t\t\t],\n\t\t\t[chalk.cyan.bold('Title:'), task.title],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Status:'),\n\t\t\t\tgetStatusWithColor(task.status || 'pending', true)\n\t\t\t],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Complexity:'),\n\t\t\t\ttask.complexityScore\n\t\t\t\t\t? getComplexityWithColor(task.complexityScore)\n\t\t\t\t\t: chalk.gray('N/A')\n\t\t\t],\n\t\t\t[\n\t\t\t\tchalk.cyan.bold('Description:'),\n\t\t\t\ttask.description || 'No description provided.'\n\t\t\t]\n\t\t);\n\t\tconsole.log(subtaskTable.toString());\n\n\t\tif (task.details && task.details.trim().length > 0) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Implementation Details:') + '\\n\\n' + task.details,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${task.parentTask.id}.${task.id} --status=in-progress`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${task.parentTask.id}.${task.id} --status=done`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('3.')} View parent task: ${chalk.yellow(`task-master show --id=${task.parentTask.id}`)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t\treturn; // Exit after displaying subtask details\n\t}\n\n\t// --- Display Regular Task Details ---\n\tconsole.log(\n\t\tboxen(chalk.white.bold(`Task: #${task.id} - ${task.title}`), {\n\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t})\n\t);\n\n\tconst taskTable = new Table({\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\tcolWidths: [15, Math.min(75, process.stdout.columns - 20 || 60)],\n\t\twordWrap: true\n\t});\n\tconst priorityColors = {\n\t\thigh: chalk.red.bold,\n\t\tmedium: chalk.yellow,\n\t\tlow: chalk.gray\n\t};\n\tconst priorityColor =\n\t\tpriorityColors[task.priority || 'medium'] || chalk.white;\n\ttaskTable.push(\n\t\t[chalk.cyan.bold('ID:'), task.id.toString()],\n\t\t[chalk.cyan.bold('Title:'), task.title],\n\t\t[\n\t\t\tchalk.cyan.bold('Status:'),\n\t\t\tgetStatusWithColor(task.status || 'pending', true)\n\t\t],\n\t\t[chalk.cyan.bold('Priority:'), priorityColor(task.priority || 'medium')],\n\t\t[\n\t\t\tchalk.cyan.bold('Dependencies:'),\n\t\t\tformatDependenciesWithStatus(\n\t\t\t\ttask.dependencies,\n\t\t\t\tdata.tasks,\n\t\t\t\ttrue,\n\t\t\t\tcomplexityReport\n\t\t\t)\n\t\t],\n\t\t[\n\t\t\tchalk.cyan.bold('Complexity:'),\n\t\t\ttask.complexityScore\n\t\t\t\t? getComplexityWithColor(task.complexityScore)\n\t\t\t\t: chalk.gray('N/A')\n\t\t],\n\t\t[chalk.cyan.bold('Description:'), task.description]\n\t);\n\tconsole.log(taskTable.toString());\n\n\tif (task.details && task.details.trim().length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Implementation Details:') + '\\n\\n' + task.details,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\tif (task.testStrategy && task.testStrategy.trim().length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Test Strategy:') + '\\n\\n' + task.testStrategy, {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t})\n\t\t);\n\t}\n\n\t// --- Subtask Table Display (uses filtered list: task.subtasks) ---\n\tif (task.subtasks && task.subtasks.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Subtasks'), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\tconst availableWidth = process.stdout.columns - 10 || 100;\n\t\tconst idWidthPct = 10;\n\t\tconst statusWidthPct = 15;\n\t\tconst depsWidthPct = 25;\n\t\tconst titleWidthPct = 100 - idWidthPct - statusWidthPct - depsWidthPct;\n\t\tconst idWidth = Math.floor(availableWidth * (idWidthPct / 100));\n\t\tconst statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));\n\t\tconst depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));\n\t\tconst titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));\n\n\t\tconst subtaskTable = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.magenta.bold('ID'),\n\t\t\t\tchalk.magenta.bold('Status'),\n\t\t\t\tchalk.magenta.bold('Title'),\n\t\t\t\tchalk.magenta.bold('Deps')\n\t\t\t],\n\t\t\tcolWidths: [idWidth, statusWidth, titleWidth, depsWidth],\n\t\t\tstyle: {\n\t\t\t\thead: [],\n\t\t\t\tborder: [],\n\t\t\t\t'padding-top': 0,\n\t\t\t\t'padding-bottom': 0,\n\t\t\t\tcompact: true\n\t\t\t},\n\t\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\t\twordWrap: true\n\t\t});\n\n\t\t// Populate table with the potentially filtered subtasks\n\t\ttask.subtasks.forEach((st) => {\n\t\t\tconst statusColorMap = {\n\t\t\t\tdone: chalk.green,\n\t\t\t\tcompleted: chalk.green,\n\t\t\t\tpending: chalk.yellow,\n\t\t\t\t'in-progress': chalk.blue\n\t\t\t};\n\t\t\tconst statusColor = statusColorMap[st.status || 'pending'] || chalk.white;\n\t\t\tlet subtaskDeps = 'None';\n\t\t\tif (st.dependencies && st.dependencies.length > 0) {\n\t\t\t\tconst formattedDeps = st.dependencies.map((depId) => {\n\t\t\t\t\t// Use the original, unfiltered list for dependency status lookup\n\t\t\t\t\tconst sourceListForDeps = originalSubtasks || task.subtasks;\n\t\t\t\t\tconst foundDepSubtask =\n\t\t\t\t\t\ttypeof depId === 'number' && depId < 100\n\t\t\t\t\t\t\t? sourceListForDeps.find((sub) => sub.id === depId)\n\t\t\t\t\t\t\t: null;\n\n\t\t\t\t\tif (foundDepSubtask) {\n\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\tfoundDepSubtask.status === 'done' ||\n\t\t\t\t\t\t\tfoundDepSubtask.status === 'completed';\n\t\t\t\t\t\tconst isInProgress = foundDepSubtask.status === 'in-progress';\n\t\t\t\t\t\tconst color = isDone\n\t\t\t\t\t\t\t? chalk.green.bold\n\t\t\t\t\t\t\t: isInProgress\n\t\t\t\t\t\t\t\t? chalk.hex('#FFA500').bold\n\t\t\t\t\t\t\t\t: chalk.red.bold;\n\t\t\t\t\t\treturn color(`${task.id}.${depId}`);\n\t\t\t\t\t} else if (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\treturn chalk.red(`${task.id}.${depId} (Not found)`);\n\t\t\t\t\t}\n\t\t\t\t\treturn depId; // Assume it's a top-level task ID if not a number < 100\n\t\t\t\t});\n\t\t\t\tsubtaskDeps =\n\t\t\t\t\tformattedDeps.length === 1\n\t\t\t\t\t\t? formattedDeps[0]\n\t\t\t\t\t\t: formattedDeps.join(chalk.white(', '));\n\t\t\t}\n\t\t\tsubtaskTable.push([\n\t\t\t\t`${task.id}.${st.id}`,\n\t\t\t\tstatusColor(st.status || 'pending'),\n\t\t\t\tst.title,\n\t\t\t\tsubtaskDeps\n\t\t\t]);\n\t\t});\n\t\tconsole.log(subtaskTable.toString());\n\n\t\t// Display filter summary line *immediately after the table* if a filter was applied\n\t\tif (statusFilter && originalSubtaskCount !== null) {\n\t\t\tconsole.log(\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t` Filtered by status: ${chalk.bold(statusFilter)}. Showing ${chalk.bold(task.subtasks.length)} of ${chalk.bold(originalSubtaskCount)} subtasks.`\n\t\t\t\t)\n\t\t\t);\n\t\t\t// Add a newline for spacing before the progress bar if the filter line was shown\n\t\t\tconsole.log();\n\t\t}\n\t\t// --- Conditional Messages for No Subtasks Shown ---\n\t} else if (statusFilter && originalSubtaskCount === 0) {\n\t\t// Case where filter applied, but the parent task had 0 subtasks originally\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t`No subtasks found matching status: ${statusFilter} (Task has no subtasks)`\n\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t} else if (\n\t\tstatusFilter &&\n\t\toriginalSubtaskCount > 0 &&\n\t\ttask.subtasks.length === 0\n\t) {\n\t\t// Case where filter applied, original subtasks existed, but none matched\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t`No subtasks found matching status: ${statusFilter} (out of ${originalSubtaskCount} total)`\n\t\t\t\t),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t} else if (\n\t\t!statusFilter &&\n\t\t(!originalSubtasks || originalSubtasks.length === 0)\n\t) {\n\t\t// Case where NO filter applied AND the task genuinely has no subtasks\n\t\t// Use the authoritative originalSubtasks if it exists (from filtering), else check task.subtasks\n\t\tconst actualSubtasks = originalSubtasks || task.subtasks;\n\t\tif (!actualSubtasks || actualSubtasks.length === 0) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.yellow('No subtasks found. Consider breaking down this task:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t`Run: ${chalk.cyan(`task-master expand --id=${task.id}`)}`\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\t}\n\n\t// --- Subtask Progress Bar Display (uses originalSubtasks or task.subtasks) ---\n\t// Determine the list to use for progress calculation (always the original if available and filtering happened)\n\tconst subtasksForProgress = originalSubtasks || task.subtasks; // Use original if filtering occurred, else the potentially empty task.subtasks\n\n\t// Only show progress if there are actually subtasks\n\tif (subtasksForProgress && subtasksForProgress.length > 0) {\n\t\tconst totalSubtasks = subtasksForProgress.length;\n\t\tconst completedSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t).length;\n\n\t\t// Count other statuses from the original/complete list\n\t\tconst inProgressSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'in-progress'\n\t\t).length;\n\t\tconst pendingSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'pending'\n\t\t).length;\n\t\tconst blockedSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'blocked'\n\t\t).length;\n\t\tconst deferredSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'deferred'\n\t\t).length;\n\t\tconst cancelledSubtasks = subtasksForProgress.filter(\n\t\t\t(st) => st.status === 'cancelled'\n\t\t).length;\n\n\t\tconst statusBreakdown = {\n\t\t\t// Calculate breakdown based on the complete list\n\t\t\t'in-progress': (inProgressSubtasks / totalSubtasks) * 100,\n\t\t\tpending: (pendingSubtasks / totalSubtasks) * 100,\n\t\t\tblocked: (blockedSubtasks / totalSubtasks) * 100,\n\t\t\tdeferred: (deferredSubtasks / totalSubtasks) * 100,\n\t\t\tcancelled: (cancelledSubtasks / totalSubtasks) * 100\n\t\t};\n\t\tconst completionPercentage = (completedSubtasks / totalSubtasks) * 100;\n\n\t\tconst availableWidth = process.stdout.columns || 80;\n\t\tconst boxPadding = 2;\n\t\tconst boxBorders = 2;\n\t\tconst percentTextLength = 5;\n\t\tconst progressBarLength = Math.max(\n\t\t\t20,\n\t\t\tMath.min(\n\t\t\t\t60,\n\t\t\t\tavailableWidth - boxPadding - boxBorders - percentTextLength - 35\n\t\t\t)\n\t\t);\n\n\t\tconst statusCounts =\n\t\t\t`${chalk.green('✓ Done:')} ${completedSubtasks} ${chalk.hex('#FFA500')('► In Progress:')} ${inProgressSubtasks} ${chalk.yellow('○ Pending:')} ${pendingSubtasks}\\n` +\n\t\t\t`${chalk.red('! Blocked:')} ${blockedSubtasks} ${chalk.gray('⏱ Deferred:')} ${deferredSubtasks} ${chalk.gray('✗ Cancelled:')} ${cancelledSubtasks}`;\n\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Subtask Progress:') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t`${chalk.cyan('Completed:')} ${completedSubtasks}/${totalSubtasks} (${completionPercentage.toFixed(1)}%)\\n` +\n\t\t\t\t\t`${statusCounts}\\n` +\n\t\t\t\t\t`${chalk.cyan('Progress:')} ${createProgressBar(completionPercentage, progressBarLength, statusBreakdown)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\twidth: Math.min(availableWidth - 10, 100),\n\t\t\t\t\ttextAlignment: 'left'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// --- Suggested Actions ---\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t'\\n' +\n\t\t\t\t`${chalk.cyan('1.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${task.id} --status=in-progress`)}\\n` +\n\t\t\t\t`${chalk.cyan('2.')} Mark as done when completed: ${chalk.yellow(`task-master set-status --id=${task.id} --status=done`)}\\n` +\n\t\t\t\t// Determine action 3 based on whether subtasks *exist* (use the source list for progress)\n\t\t\t\t(subtasksForProgress && subtasksForProgress.length > 0\n\t\t\t\t\t? `${chalk.cyan('3.')} Update subtask status: ${chalk.yellow(`task-master set-status --id=${task.id}.1 --status=done`)}` // Example uses .1\n\t\t\t\t\t: `${chalk.cyan('3.')} Break down into subtasks: ${chalk.yellow(`task-master expand --id=${task.id}`)}`),\n\t\t\t{\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'green',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Show FYI notice if migration occurred\n\tdisplayTaggedTasksFYI(data);\n}\n\n/**\n * Display the complexity analysis report in a nice format\n * @param {string} reportPath - Path to the complexity report file\n */\nasync function displayComplexityReport(reportPath) {\n\t// Check if the report exists\n\tif (!fs.existsSync(reportPath)) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.yellow(`No complexity report found at ${reportPath}\\n\\n`) +\n\t\t\t\t\t'Would you like to generate one now?',\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\n\t\tconst rl = readline.createInterface({\n\t\t\tinput: process.stdin,\n\t\t\toutput: process.stdout\n\t\t});\n\n\t\tconst answer = await new Promise((resolve) => {\n\t\t\trl.question(chalk.cyan('Generate complexity report? (y/n): '), resolve);\n\t\t});\n\t\trl.close();\n\n\t\tif (answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes') {\n\t\t\t// Call the analyze-complexity command\n\t\t\tconsole.log(chalk.blue('Generating complexity report...'));\n\t\t\tconst tasksPath = TASKMASTER_TASKS_FILE;\n\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\t'❌ No tasks.json file found. Please run \"task-master init\" or create a tasks.json file.'\n\t\t\t\t);\n\t\t\t\treturn null;\n\t\t\t}\n\n\t\t\tawait analyzeTaskComplexity({\n\t\t\t\toutput: reportPath,\n\t\t\t\tresearch: false, // Default to no research for speed\n\t\t\t\tfile: tasksPath\n\t\t\t});\n\t\t\t// Read the newly generated report\n\t\t\treturn displayComplexityReport(reportPath);\n\t\t} else {\n\t\t\tconsole.log(chalk.yellow('Report generation cancelled.'));\n\t\t\treturn;\n\t\t}\n\t}\n\n\t// Read the report\n\tlet report;\n\ttry {\n\t\treport = JSON.parse(fs.readFileSync(reportPath, 'utf8'));\n\t} catch (error) {\n\t\tlog('error', `Error reading complexity report: ${error.message}`);\n\t\treturn;\n\t}\n\n\t// Display report header\n\tconsole.log(\n\t\tboxen(chalk.white.bold('Task Complexity Analysis Report'), {\n\t\t\tpadding: 1,\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t})\n\t);\n\n\t// Display metadata\n\tconst metaTable = new Table({\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: {\n\t\t\tmid: '',\n\t\t\t'left-mid': '',\n\t\t\t'mid-mid': '',\n\t\t\t'right-mid': ''\n\t\t},\n\t\tcolWidths: [20, 50]\n\t});\n\n\tmetaTable.push(\n\t\t[\n\t\t\tchalk.cyan.bold('Generated:'),\n\t\t\tnew Date(report.meta.generatedAt).toLocaleString()\n\t\t],\n\t\t[chalk.cyan.bold('Tasks Analyzed:'), report.meta.tasksAnalyzed],\n\t\t[chalk.cyan.bold('Threshold Score:'), report.meta.thresholdScore],\n\t\t[chalk.cyan.bold('Project:'), report.meta.projectName],\n\t\t[\n\t\t\tchalk.cyan.bold('Research-backed:'),\n\t\t\treport.meta.usedResearch ? 'Yes' : 'No'\n\t\t]\n\t);\n\n\tconsole.log(metaTable.toString());\n\n\t// Sort tasks by complexity score (highest first)\n\tconst sortedTasks = [...report.complexityAnalysis].sort(\n\t\t(a, b) => b.complexityScore - a.complexityScore\n\t);\n\n\t// Determine which tasks need expansion based on threshold\n\tconst tasksNeedingExpansion = sortedTasks.filter(\n\t\t(task) => task.complexityScore >= report.meta.thresholdScore\n\t);\n\tconst simpleTasks = sortedTasks.filter(\n\t\t(task) => task.complexityScore < report.meta.thresholdScore\n\t);\n\n\t// Create progress bar to show complexity distribution\n\tconst complexityDistribution = [0, 0, 0]; // Low (0-4), Medium (5-7), High (8-10)\n\tsortedTasks.forEach((task) => {\n\t\tif (task.complexityScore < 5) complexityDistribution[0]++;\n\t\telse if (task.complexityScore < 8) complexityDistribution[1]++;\n\t\telse complexityDistribution[2]++;\n\t});\n\n\tconst percentLow = Math.round(\n\t\t(complexityDistribution[0] / sortedTasks.length) * 100\n\t);\n\tconst percentMedium = Math.round(\n\t\t(complexityDistribution[1] / sortedTasks.length) * 100\n\t);\n\tconst percentHigh = Math.round(\n\t\t(complexityDistribution[2] / sortedTasks.length) * 100\n\t);\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Complexity Distribution\\n\\n') +\n\t\t\t\t`${chalk.green.bold('Low (1-4):')} ${complexityDistribution[0]} tasks (${percentLow}%)\\n` +\n\t\t\t\t`${chalk.yellow.bold('Medium (5-7):')} ${complexityDistribution[1]} tasks (${percentMedium}%)\\n` +\n\t\t\t\t`${chalk.red.bold('High (8-10):')} ${complexityDistribution[2]} tasks (${percentHigh}%)`,\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Get terminal width\n\tconst terminalWidth = process.stdout.columns || 100; // Default to 100 if can't detect\n\n\t// Calculate dynamic column widths\n\tconst idWidth = 12;\n\tconst titleWidth = Math.floor(terminalWidth * 0.25); // 25% of width\n\tconst scoreWidth = 8;\n\tconst subtasksWidth = 8;\n\t// Command column gets the remaining space (minus some buffer for borders)\n\tconst commandWidth =\n\t\tterminalWidth - idWidth - titleWidth - scoreWidth - subtasksWidth - 10;\n\n\t// Create table with new column widths and word wrapping\n\tconst complexTable = new Table({\n\t\thead: [\n\t\t\tchalk.yellow.bold('ID'),\n\t\t\tchalk.yellow.bold('Title'),\n\t\t\tchalk.yellow.bold('Score'),\n\t\t\tchalk.yellow.bold('Subtasks'),\n\t\t\tchalk.yellow.bold('Expansion Command')\n\t\t],\n\t\tcolWidths: [idWidth, titleWidth, scoreWidth, subtasksWidth, commandWidth],\n\t\tstyle: { head: [], border: [] },\n\t\twordWrap: true,\n\t\twrapOnWordBoundary: true\n\t});\n\n\t// When adding rows, don't truncate the expansion command\n\ttasksNeedingExpansion.forEach((task) => {\n\t\tconst expansionCommand = `task-master expand --id=${task.taskId} --num=${task.recommendedSubtasks}${task.expansionPrompt ? ` --prompt=\"${task.expansionPrompt}\"` : ''}`;\n\n\t\tcomplexTable.push([\n\t\t\ttask.taskId,\n\t\t\ttruncate(task.taskTitle, titleWidth - 3), // Still truncate title for readability\n\t\t\tgetComplexityWithColor(task.complexityScore),\n\t\t\ttask.recommendedSubtasks,\n\t\t\tchalk.cyan(expansionCommand) // Don't truncate - allow wrapping\n\t\t]);\n\t});\n\n\tconsole.log(complexTable.toString());\n\n\t// Create table for simple tasks\n\tif (simpleTasks.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.green.bold(`Simple Tasks (${simpleTasks.length})`), {\n\t\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\tborderColor: 'green',\n\t\t\t\tborderStyle: 'round'\n\t\t\t})\n\t\t);\n\n\t\tconst simpleTable = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.green.bold('ID'),\n\t\t\t\tchalk.green.bold('Title'),\n\t\t\t\tchalk.green.bold('Score'),\n\t\t\t\tchalk.green.bold('Reasoning')\n\t\t\t],\n\t\t\tcolWidths: [5, 40, 8, 50],\n\t\t\tstyle: { head: [], border: [] }\n\t\t});\n\n\t\tsimpleTasks.forEach((task) => {\n\t\t\tsimpleTable.push([\n\t\t\t\ttask.taskId,\n\t\t\t\ttruncate(task.taskTitle, 37),\n\t\t\t\tgetComplexityWithColor(task.complexityScore),\n\t\t\t\ttruncate(task.reasoning, 47)\n\t\t\t]);\n\t\t});\n\n\t\tconsole.log(simpleTable.toString());\n\t}\n\n\t// Show action suggestions\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t'\\n\\n' +\n\t\t\t\t`${chalk.cyan('1.')} Expand all complex tasks: ${chalk.yellow(`task-master expand --all`)}\\n` +\n\t\t\t\t`${chalk.cyan('2.')} Expand a specific task: ${chalk.yellow(`task-master expand --id=<id>`)}\\n` +\n\t\t\t\t`${chalk.cyan('3.')} Regenerate with research: ${chalk.yellow(`task-master analyze-complexity --research`)}`,\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Generate a prompt for complexity analysis\n * @param {Object} tasksData - Tasks data object containing tasks array\n * @returns {string} Generated prompt\n */\nfunction generateComplexityAnalysisPrompt(tasksData) {\n\tconst defaultSubtasks = getDefaultSubtasks(null); // Use the getter\n\treturn `Analyze the complexity of the following tasks and provide recommendations for subtask breakdown:\n\n${tasksData.tasks\n\t.map(\n\t\t(task) => `\nTask ID: ${task.id}\nTitle: ${task.title}\nDescription: ${task.description}\nDetails: ${task.details}\nDependencies: ${JSON.stringify(task.dependencies || [])}\nPriority: ${task.priority || 'medium'}\n`\n\t)\n\t.join('\\n---\\n')}\n\nAnalyze each task and return a JSON array with the following structure for each task:\n[\n {\n \"taskId\": number,\n \"taskTitle\": string,\n \"complexityScore\": number (1-10),\n \"recommendedSubtasks\": number (${Math.max(3, defaultSubtasks - 1)}-${Math.min(8, defaultSubtasks + 2)}),\n \"expansionPrompt\": string (a specific prompt for generating good subtasks),\n \"reasoning\": string (brief explanation of your assessment)\n },\n ...\n]\n\nIMPORTANT: Make sure to include an analysis for EVERY task listed above, with the correct taskId matching each task's ID.\n`;\n}\n\n/**\n * Confirm overwriting existing tasks.json file\n * @param {string} tasksPath - Path to the tasks.json file\n * @returns {Promise<boolean>} - Promise resolving to true if user confirms, false otherwise\n */\nasync function confirmTaskOverwrite(tasksPath) {\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.yellow(\n\t\t\t\t\"It looks like you've already generated tasks for this project.\\n\"\n\t\t\t) +\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t'Executing this command will overwrite any existing tasks.'\n\t\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\tconst rl = readline.createInterface({\n\t\tinput: process.stdin,\n\t\toutput: process.stdout\n\t});\n\n\tconst answer = await new Promise((resolve) => {\n\t\trl.question(\n\t\t\tchalk.cyan('Are you sure you wish to continue? (y/N): '),\n\t\t\tresolve\n\t\t);\n\t});\n\trl.close();\n\n\treturn answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes';\n}\n\n/**\n * Displays the API key status for different providers.\n * @param {Array<{provider: string, cli: boolean, mcp: boolean}>} statusReport - The report generated by getApiKeyStatusReport.\n */\nfunction displayApiKeyStatus(statusReport) {\n\tif (!statusReport || statusReport.length === 0) {\n\t\tconsole.log(chalk.yellow('No API key status information available.'));\n\t\treturn;\n\t}\n\n\tconst table = new Table({\n\t\thead: [\n\t\t\tchalk.cyan('Provider'),\n\t\t\tchalk.cyan('CLI Key (.env)'),\n\t\t\tchalk.cyan('MCP Key (mcp.json)')\n\t\t],\n\t\tcolWidths: [15, 20, 25],\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' }\n\t});\n\n\tstatusReport.forEach(({ provider, cli, mcp }) => {\n\t\tconst cliStatus = cli ? chalk.green('✅ Found') : chalk.red('❌ Missing');\n\t\tconst mcpStatus = mcp ? chalk.green('✅ Found') : chalk.red('❌ Missing');\n\t\t// Capitalize provider name for display\n\t\tconst providerName = provider.charAt(0).toUpperCase() + provider.slice(1);\n\t\ttable.push([providerName, cliStatus, mcpStatus]);\n\t});\n\n\tconsole.log(chalk.bold('\\n🔑 API Key Status:'));\n\tconsole.log(table.toString());\n\tconsole.log(\n\t\tchalk.gray(\n\t\t\t` Note: Some providers (e.g., Azure, Ollama) may require additional endpoint configuration in ${TASKMASTER_CONFIG_FILE}.`\n\t\t)\n\t);\n}\n\n// --- Formatting Helpers (Potentially move some to utils.js if reusable) ---\n\nconst formatSweScoreWithTertileStars = (score, allModels) => {\n\t// ... (Implementation from previous version or refine) ...\n\tif (score === null || score === undefined || score <= 0) return 'N/A';\n\tconst formattedPercentage = `${(score * 100).toFixed(1)}%`;\n\n\tconst validScores = allModels\n\t\t.map((m) => m.sweScore)\n\t\t.filter((s) => s !== null && s !== undefined && s > 0);\n\tconst sortedScores = [...validScores].sort((a, b) => b - a);\n\tconst n = sortedScores.length;\n\tlet stars = chalk.gray('☆☆☆');\n\n\tif (n > 0) {\n\t\tconst topThirdIndex = Math.max(0, Math.floor(n / 3) - 1);\n\t\tconst midThirdIndex = Math.max(0, Math.floor((2 * n) / 3) - 1);\n\t\tif (score >= sortedScores[topThirdIndex]) stars = chalk.yellow('★★★');\n\t\telse if (score >= sortedScores[midThirdIndex])\n\t\t\tstars = chalk.yellow('★★') + chalk.gray('☆');\n\t\telse stars = chalk.yellow('★') + chalk.gray('☆☆');\n\t}\n\treturn `${formattedPercentage} ${stars}`;\n};\n\nconst formatCost = (costObj) => {\n\t// ... (Implementation from previous version or refine) ...\n\tif (!costObj) return 'N/A';\n\tif (costObj.input === 0 && costObj.output === 0) {\n\t\treturn chalk.green('Free');\n\t}\n\tconst formatSingleCost = (costValue) => {\n\t\tif (costValue === null || costValue === undefined) return 'N/A';\n\t\tconst isInteger = Number.isInteger(costValue);\n\t\treturn `$${costValue.toFixed(isInteger ? 0 : 2)}`;\n\t};\n\treturn `${formatSingleCost(costObj.input)} in, ${formatSingleCost(costObj.output)} out`;\n};\n\n// --- Display Functions ---\n\n/**\n * Displays the currently configured active models.\n * @param {ConfigData} configData - The active configuration data.\n * @param {AvailableModel[]} allAvailableModels - Needed for SWE score tertiles.\n */\nfunction displayModelConfiguration(configData, allAvailableModels = []) {\n\tconsole.log(chalk.cyan.bold('\\nActive Model Configuration:'));\n\tconst active = configData.activeModels;\n\tconst activeTable = new Table({\n\t\thead: [\n\t\t\t'Role',\n\t\t\t'Provider',\n\t\t\t'Model ID',\n\t\t\t'SWE Score',\n\t\t\t'Cost ($/1M tkns)'\n\t\t\t// 'API Key Status' // Removed, handled by separate displayApiKeyStatus\n\t\t].map((h) => chalk.cyan.bold(h)),\n\t\tcolWidths: [10, 14, 30, 18, 20 /*, 28 */], // Adjusted widths\n\t\tstyle: { head: ['cyan', 'bold'] }\n\t});\n\n\tactiveTable.push([\n\t\tchalk.white('Main'),\n\t\tactive.main.provider,\n\t\tactive.main.modelId,\n\t\tformatSweScoreWithTertileStars(active.main.sweScore, allAvailableModels),\n\t\tformatCost(active.main.cost)\n\t\t// getCombinedStatus(active.main.keyStatus) // Removed\n\t]);\n\tactiveTable.push([\n\t\tchalk.white('Research'),\n\t\tactive.research.provider,\n\t\tactive.research.modelId,\n\t\tformatSweScoreWithTertileStars(\n\t\t\tactive.research.sweScore,\n\t\t\tallAvailableModels\n\t\t),\n\t\tformatCost(active.research.cost)\n\t\t// getCombinedStatus(active.research.keyStatus) // Removed\n\t]);\n\tif (active.fallback && active.fallback.provider && active.fallback.modelId) {\n\t\tactiveTable.push([\n\t\t\tchalk.white('Fallback'),\n\t\t\tactive.fallback.provider,\n\t\t\tactive.fallback.modelId,\n\t\t\tformatSweScoreWithTertileStars(\n\t\t\t\tactive.fallback.sweScore,\n\t\t\t\tallAvailableModels\n\t\t\t),\n\t\t\tformatCost(active.fallback.cost)\n\t\t\t// getCombinedStatus(active.fallback.keyStatus) // Removed\n\t\t]);\n\t} else {\n\t\tactiveTable.push([\n\t\t\tchalk.white('Fallback'),\n\t\t\tchalk.gray('-'),\n\t\t\tchalk.gray('(Not Set)'),\n\t\t\tchalk.gray('-'),\n\t\t\tchalk.gray('-')\n\t\t\t// chalk.gray('-') // Removed\n\t\t]);\n\t}\n\tconsole.log(activeTable.toString());\n}\n\n/**\n * Displays the list of available models not currently configured.\n * @param {AvailableModel[]} availableModels - List of available models.\n */\nfunction displayAvailableModels(availableModels) {\n\tif (!availableModels || availableModels.length === 0) {\n\t\tconsole.log(\n\t\t\tchalk.gray('\\n(No other models available or all are configured)')\n\t\t);\n\t\treturn;\n\t}\n\n\tconsole.log(chalk.cyan.bold('\\nOther Available Models:'));\n\tconst availableTable = new Table({\n\t\thead: ['Provider', 'Model ID', 'SWE Score', 'Cost ($/1M tkns)'].map((h) =>\n\t\t\tchalk.cyan.bold(h)\n\t\t),\n\t\tcolWidths: [15, 40, 18, 25],\n\t\tstyle: { head: ['cyan', 'bold'] }\n\t});\n\n\tavailableModels.forEach((model) => {\n\t\tavailableTable.push([\n\t\t\tmodel.provider,\n\t\t\tmodel.modelId,\n\t\t\tformatSweScoreWithTertileStars(model.sweScore, availableModels), // Pass itself for comparison\n\t\t\tformatCost(model.cost)\n\t\t]);\n\t});\n\tconsole.log(availableTable.toString());\n\n\t// --- Suggested Actions Section (moved here from models command) ---\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`1. Set main model: ${chalk.yellow('task-master models --set-main <model_id>')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`2. Set research model: ${chalk.yellow('task-master models --set-research <model_id>')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`3. Set fallback model: ${chalk.yellow('task-master models --set-fallback <model_id>')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`4. Run interactive setup: ${chalk.yellow('task-master models --setup')}`\n\t\t\t\t) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t`5. Use custom ollama/openrouter models: ${chalk.yellow('task-master models --openrouter|ollama --set-main|research|fallback <model_id>')}`\n\t\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t}\n\t\t)\n\t);\n}\n\n/**\n * Displays AI usage telemetry summary in the CLI.\n * @param {object} telemetryData - The telemetry data object.\n * @param {string} outputType - 'cli' or 'mcp' (though typically only called for 'cli').\n */\nfunction displayAiUsageSummary(telemetryData, outputType = 'cli') {\n\tif (\n\t\t(outputType !== 'cli' && outputType !== 'text') ||\n\t\t!telemetryData ||\n\t\tisSilentMode()\n\t) {\n\t\treturn; // Only display for CLI and if data exists and not in silent mode\n\t}\n\n\tconst {\n\t\tmodelUsed,\n\t\tproviderName,\n\t\tinputTokens,\n\t\toutputTokens,\n\t\ttotalTokens,\n\t\ttotalCost,\n\t\tcommandName\n\t} = telemetryData;\n\n\tlet summary = chalk.bold.blue('AI Usage Summary:') + '\\n';\n\tsummary += chalk.gray(` Command: ${commandName}\\n`);\n\tsummary += chalk.gray(` Provider: ${providerName}\\n`);\n\tsummary += chalk.gray(` Model: ${modelUsed}\\n`);\n\tsummary += chalk.gray(\n\t\t` Tokens: ${totalTokens} (Input: ${inputTokens}, Output: ${outputTokens})\\n`\n\t);\n\tsummary += chalk.gray(` Est. Cost: $${totalCost.toFixed(6)}`);\n\n\tconsole.log(\n\t\tboxen(summary, {\n\t\t\tpadding: 1,\n\t\t\tmargin: { top: 1 },\n\t\t\tborderColor: 'blue',\n\t\t\tborderStyle: 'round',\n\t\t\ttitle: '💡 Telemetry',\n\t\t\ttitleAlignment: 'center'\n\t\t})\n\t);\n}\n\n/**\n * Display multiple tasks in a compact summary format with interactive drill-down\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {Array<string>} taskIds - Array of task IDs to display\n * @param {string} complexityReportPath - Path to complexity report\n * @param {string} statusFilter - Optional status filter for subtasks\n * @param {Object} context - Context object containing projectRoot and tag\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nasync function displayMultipleTasksSummary(\n\ttasksPath,\n\ttaskIds,\n\tcomplexityReportPath = null,\n\tstatusFilter = null,\n\tcontext = {}\n) {\n\tdisplayBanner();\n\n\t// Extract projectRoot and tag from context\n\tconst projectRoot = context.projectRoot || null;\n\tconst tag = context.tag || null;\n\n\t// Read the tasks file with proper projectRoot for tag resolution\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Read complexity report once\n\tconst complexityReport = readComplexityReport(complexityReportPath);\n\n\t// Find all requested tasks\n\tconst foundTasks = [];\n\tconst notFoundIds = [];\n\n\ttaskIds.forEach((id) => {\n\t\tconst { task } = findTaskById(\n\t\t\tdata.tasks,\n\t\t\tid,\n\t\t\tcomplexityReport,\n\t\t\tstatusFilter\n\t\t);\n\t\tif (task) {\n\t\t\tfoundTasks.push(task);\n\t\t} else {\n\t\t\tnotFoundIds.push(id);\n\t\t}\n\t});\n\n\t// Show not found tasks\n\tif (notFoundIds.length > 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.yellow(`Tasks not found: ${notFoundIds.join(', ')}`), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'yellow',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t})\n\t\t);\n\t}\n\n\tif (foundTasks.length === 0) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.red('No valid tasks found to display'), {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'red',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1 }\n\t\t\t})\n\t\t);\n\t\treturn;\n\t}\n\n\t// Display header\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.white.bold(\n\t\t\t\t`Task Summary (${foundTasks.length} task${foundTasks.length === 1 ? '' : 's'})`\n\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Calculate terminal width for responsive layout\n\tconst terminalWidth = process.stdout.columns || 100;\n\tconst availableWidth = terminalWidth - 10;\n\n\t// Create compact summary table\n\tconst summaryTable = new Table({\n\t\thead: [\n\t\t\tchalk.cyan.bold('ID'),\n\t\t\tchalk.cyan.bold('Title'),\n\t\t\tchalk.cyan.bold('Status'),\n\t\t\tchalk.cyan.bold('Priority'),\n\t\t\tchalk.cyan.bold('Subtasks'),\n\t\t\tchalk.cyan.bold('Progress')\n\t\t],\n\t\tcolWidths: [\n\t\t\tMath.floor(availableWidth * 0.08), // ID: 8%\n\t\t\tMath.floor(availableWidth * 0.35), // Title: 35%\n\t\t\tMath.floor(availableWidth * 0.12), // Status: 12%\n\t\t\tMath.floor(availableWidth * 0.1), // Priority: 10%\n\t\t\tMath.floor(availableWidth * 0.15), // Subtasks: 15%\n\t\t\tMath.floor(availableWidth * 0.2) // Progress: 20%\n\t\t],\n\t\tstyle: {\n\t\t\thead: [],\n\t\t\tborder: [],\n\t\t\t'padding-top': 0,\n\t\t\t'padding-bottom': 0,\n\t\t\tcompact: true\n\t\t},\n\t\tchars: { mid: '', 'left-mid': '', 'mid-mid': '', 'right-mid': '' },\n\t\twordWrap: true\n\t});\n\n\t// Add each task to the summary table\n\tfoundTasks.forEach((task) => {\n\t\t// Handle subtask case\n\t\tif (task.isSubtask || task.parentTask) {\n\t\t\tconst parentId = task.parentTask ? task.parentTask.id : 'Unknown';\n\t\t\tsummaryTable.push([\n\t\t\t\t`${parentId}.${task.id}`,\n\t\t\t\ttruncate(task.title, Math.floor(availableWidth * 0.35) - 3),\n\t\t\t\tgetStatusWithColor(task.status || 'pending', true),\n\t\t\t\tchalk.gray('(subtask)'),\n\t\t\t\tchalk.gray('N/A'),\n\t\t\t\tchalk.gray('N/A')\n\t\t\t]);\n\t\t\treturn;\n\t\t}\n\n\t\t// Handle regular task\n\t\tconst priorityColors = {\n\t\t\thigh: chalk.red.bold,\n\t\t\tmedium: chalk.yellow,\n\t\t\tlow: chalk.gray\n\t\t};\n\t\tconst priorityColor =\n\t\t\tpriorityColors[task.priority || 'medium'] || chalk.white;\n\n\t\t// Calculate subtask summary\n\t\tlet subtaskSummary = chalk.gray('None');\n\t\tlet progressBar = chalk.gray('N/A');\n\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tconst total = task.subtasks.length;\n\t\t\tconst completed = task.subtasks.filter(\n\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t).length;\n\t\t\tconst inProgress = task.subtasks.filter(\n\t\t\t\t(st) => st.status === 'in-progress'\n\t\t\t).length;\n\t\t\tconst pending = task.subtasks.filter(\n\t\t\t\t(st) => st.status === 'pending'\n\t\t\t).length;\n\n\t\t\t// Compact subtask count with status indicators\n\t\t\tsubtaskSummary = `${chalk.green(completed)}/${total}`;\n\t\t\tif (inProgress > 0)\n\t\t\t\tsubtaskSummary += ` ${chalk.hex('#FFA500')(`+${inProgress}`)}`;\n\t\t\tif (pending > 0) subtaskSummary += ` ${chalk.yellow(`(${pending})`)}`;\n\n\t\t\t// Mini progress bar (shorter than usual)\n\t\t\tconst completionPercentage = (completed / total) * 100;\n\t\t\tconst barLength = 8; // Compact bar\n\t\t\tconst statusBreakdown = {\n\t\t\t\t'in-progress': (inProgress / total) * 100,\n\t\t\t\tpending: (pending / total) * 100\n\t\t\t};\n\t\t\tprogressBar = createProgressBar(\n\t\t\t\tcompletionPercentage,\n\t\t\t\tbarLength,\n\t\t\t\tstatusBreakdown\n\t\t\t);\n\t\t}\n\n\t\tsummaryTable.push([\n\t\t\ttask.id.toString(),\n\t\t\ttruncate(task.title, Math.floor(availableWidth * 0.35) - 3),\n\t\t\tgetStatusWithColor(task.status || 'pending', true),\n\t\t\tpriorityColor(task.priority || 'medium'),\n\t\t\tsubtaskSummary,\n\t\t\tprogressBar\n\t\t]);\n\t});\n\n\tconsole.log(summaryTable.toString());\n\n\t// Interactive drill-down prompt\n\tif (foundTasks.length > 1) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Interactive Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.cyan('• Press Enter to view available actions for all tasks') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t'• Type a task ID (e.g., \"3\" or \"3.2\") to view that specific task'\n\t\t\t\t\t) +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\tchalk.cyan('• Type \"q\" to quit'),\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\n\t\tconst rl = readline.createInterface({\n\t\t\tinput: process.stdin,\n\t\t\toutput: process.stdout\n\t\t});\n\n\t\tconst choice = await new Promise((resolve) => {\n\t\t\trl.question(chalk.cyan('Your choice: '), resolve);\n\t\t});\n\t\trl.close();\n\n\t\tif (choice.toLowerCase() === 'q') {\n\t\t\treturn;\n\t\t} else if (choice.trim() === '') {\n\t\t\t// Show action menu for selected tasks\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Available Actions for Selected Tasks:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('1.') +\n\t\t\t\t\t\t' Mark all as in-progress' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('2.') +\n\t\t\t\t\t\t' Mark all as done' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('3.') +\n\t\t\t\t\t\t' Show next available task' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('4.') +\n\t\t\t\t\t\t' Expand all tasks (generate subtasks)' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('5.') +\n\t\t\t\t\t\t' View dependency relationships' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan('6.') +\n\t\t\t\t\t\t' Generate task files' +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.gray('Or type a task ID to view details'),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tconst rl2 = readline.createInterface({\n\t\t\t\tinput: process.stdin,\n\t\t\t\toutput: process.stdout\n\t\t\t});\n\n\t\t\tconst actionChoice = await new Promise((resolve) => {\n\t\t\t\trl2.question(chalk.cyan('Choose action (1-6): '), resolve);\n\t\t\t});\n\t\t\trl2.close();\n\n\t\t\tconst taskIdList = foundTasks.map((t) => t.id).join(',');\n\n\t\t\tswitch (actionChoice.trim()) {\n\t\t\t\tcase '1':\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`\\n→ Command: task-master set-status --id=${taskIdList} --status=in-progress`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t'✓ Copy and run this command to mark all tasks as in-progress'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '2':\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`\\n→ Command: task-master set-status --id=${taskIdList} --status=done`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green('✓ Copy and run this command to mark all tasks as done')\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '3':\n\t\t\t\t\tconsole.log(chalk.blue(`\\n→ Command: task-master next`));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t'✓ Copy and run this command to see the next available task'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '4':\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`\\n→ Command: task-master expand --id=${taskIdList} --research`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t'✓ Copy and run this command to expand all selected tasks into subtasks'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase '5': {\n\t\t\t\t\t// Show dependency visualization\n\t\t\t\t\tconsole.log(chalk.white.bold('\\nDependency Relationships:'));\n\t\t\t\t\tlet hasDependencies = false;\n\t\t\t\t\tfoundTasks.forEach((task) => {\n\t\t\t\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t`Task ${task.id} depends on: ${task.dependencies.join(', ')}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\thasDependencies = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\tif (!hasDependencies) {\n\t\t\t\t\t\tconsole.log(chalk.gray('No dependencies found for selected tasks'));\n\t\t\t\t\t}\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase '6':\n\t\t\t\t\tconsole.log(chalk.blue(`\\n→ Command: task-master generate`));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green('✓ Copy and run this command to generate task files')\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tdefault:\n\t\t\t\t\tif (actionChoice.trim().length > 0) {\n\t\t\t\t\t\tconsole.log(chalk.yellow(`Invalid choice: ${actionChoice.trim()}`));\n\t\t\t\t\t\tconsole.log(chalk.gray('Please choose 1-6 or type a task ID'));\n\t\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Show specific task\n\t\t\tawait displayTaskById(\n\t\t\t\ttasksPath,\n\t\t\t\tchoice.trim(),\n\t\t\t\tcomplexityReportPath,\n\t\t\t\tstatusFilter,\n\t\t\t\tcontext\n\t\t\t);\n\t\t}\n\t} else {\n\t\t// Single task - show suggested actions\n\t\tconst task = foundTasks[0];\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Suggested Actions:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t`${chalk.cyan('1.')} View full details: ${chalk.yellow(`task-master show ${task.id}`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('2.')} Mark as in-progress: ${chalk.yellow(`task-master set-status --id=${task.id} --status=in-progress`)}\\n` +\n\t\t\t\t\t`${chalk.cyan('3.')} Mark as done: ${chalk.yellow(`task-master set-status --id=${task.id} --status=done`)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n}\n\n/**\n * Display context analysis results with beautiful formatting\n * @param {Object} analysisData - Analysis data from ContextGatherer\n * @param {string} semanticQuery - The original query used for semantic search\n * @param {number} contextSize - Size of gathered context in characters\n */\nfunction displayContextAnalysis(analysisData, semanticQuery, contextSize) {\n\tif (isSilentMode() || !analysisData) return;\n\n\tconst { highRelevance, mediumRelevance, recentTasks, allRelevantTasks } =\n\t\tanalysisData;\n\n\t// Create the context analysis display\n\tlet analysisContent = chalk.white.bold('Context Analysis') + '\\n\\n';\n\n\t// Query info\n\tanalysisContent +=\n\t\tchalk.gray('Query: ') + chalk.white(`\"${semanticQuery}\"`) + '\\n';\n\tanalysisContent +=\n\t\tchalk.gray('Context size: ') +\n\t\tchalk.cyan(`${contextSize.toLocaleString()} characters`) +\n\t\t'\\n';\n\tanalysisContent +=\n\t\tchalk.gray('Tasks found: ') +\n\t\tchalk.yellow(`${allRelevantTasks.length} relevant tasks`) +\n\t\t'\\n\\n';\n\n\t// High relevance matches\n\tif (highRelevance.length > 0) {\n\t\tanalysisContent += chalk.green.bold('🎯 High Relevance Matches:') + '\\n';\n\t\thighRelevance.slice(0, 3).forEach((task) => {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.green(` • Task ${task.id}: ${truncate(task.title, 50)}`) + '\\n';\n\t\t});\n\t\tif (highRelevance.length > 3) {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.green(\n\t\t\t\t\t` • ... and ${highRelevance.length - 3} more high relevance tasks`\n\t\t\t\t) + '\\n';\n\t\t}\n\t\tanalysisContent += '\\n';\n\t}\n\n\t// Medium relevance matches\n\tif (mediumRelevance.length > 0) {\n\t\tanalysisContent += chalk.yellow.bold('📋 Medium Relevance Matches:') + '\\n';\n\t\tmediumRelevance.slice(0, 3).forEach((task) => {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.yellow(` • Task ${task.id}: ${truncate(task.title, 50)}`) + '\\n';\n\t\t});\n\t\tif (mediumRelevance.length > 3) {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t` • ... and ${mediumRelevance.length - 3} more medium relevance tasks`\n\t\t\t\t) + '\\n';\n\t\t}\n\t\tanalysisContent += '\\n';\n\t}\n\n\t// Recent tasks (if they contributed)\n\tconst recentTasksNotInRelevance = recentTasks.filter(\n\t\t(task) =>\n\t\t\t!highRelevance.some((hr) => hr.id === task.id) &&\n\t\t\t!mediumRelevance.some((mr) => mr.id === task.id)\n\t);\n\n\tif (recentTasksNotInRelevance.length > 0) {\n\t\tanalysisContent += chalk.cyan.bold('🕒 Recent Tasks (for context):') + '\\n';\n\t\trecentTasksNotInRelevance.slice(0, 2).forEach((task) => {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.cyan(` • Task ${task.id}: ${truncate(task.title, 50)}`) + '\\n';\n\t\t});\n\t\tif (recentTasksNotInRelevance.length > 2) {\n\t\t\tanalysisContent +=\n\t\t\t\tchalk.cyan(\n\t\t\t\t\t` • ... and ${recentTasksNotInRelevance.length - 2} more recent tasks`\n\t\t\t\t) + '\\n';\n\t\t}\n\t}\n\n\tconsole.log(\n\t\tboxen(analysisContent, {\n\t\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\tborderStyle: 'round',\n\t\t\tborderColor: 'blue',\n\t\t\ttitle: chalk.blue('🔍 Context Gathering'),\n\t\t\ttitleAlignment: 'center'\n\t\t})\n\t);\n}\n\n// Export UI functions\nexport {\n\tdisplayBanner,\n\tdisplayTaggedTasksFYI,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tcreateProgressBar,\n\tgetStatusWithColor,\n\tformatDependenciesWithStatus,\n\tdisplayHelp,\n\tgetComplexityWithColor,\n\tdisplayNextTask,\n\tdisplayTaskById,\n\tdisplayComplexityReport,\n\tgenerateComplexityAnalysisPrompt,\n\tconfirmTaskOverwrite,\n\tdisplayApiKeyStatus,\n\tdisplayModelConfiguration,\n\tdisplayAvailableModels,\n\tdisplayAiUsageSummary,\n\tdisplayMultipleTasksSummary,\n\tsucceedLoadingIndicator,\n\tfailLoadingIndicator,\n\twarnLoadingIndicator,\n\tinfoLoadingIndicator,\n\tdisplayContextAnalysis,\n\tdisplayCurrentTagIndicator\n};\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/remove-subtask.js", "/**\n * Direct function wrapper for removeSubtask\n */\n\nimport { removeSubtask } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Remove a subtask from its parent task\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.id - Subtask ID in format \"parentId.subtaskId\" (required)\n * @param {boolean} [args.convert] - Whether to convert the subtask to a standalone task\n * @param {boolean} [args.skipGenerate] - Skip regenerating task files\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function removeSubtaskDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, convert, skipGenerate, projectRoot, tag } = args;\n\ttry {\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tlog.info(`Removing subtask with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('removeSubtaskDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!id) {\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\t'Subtask ID is required and must be in format \"parentId.subtaskId\"'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate subtask ID format\n\t\tif (!id.includes('.')) {\n\t\t\tdisableSilentMode(); // Disable before returning\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: `Invalid subtask ID format: ${id}. Expected format: \"parentId.subtaskId\"`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Convert convertToTask to a boolean\n\t\tconst convertToTask = convert === true;\n\n\t\t// Determine if we should generate files\n\t\tconst generateFiles = !skipGenerate;\n\n\t\tlog.info(\n\t\t\t`Removing subtask ${id} (convertToTask: ${convertToTask}, generateFiles: ${generateFiles})`\n\t\t);\n\n\t\t// Use the provided tasksPath\n\t\tconst result = await removeSubtask(\n\t\t\ttasksPath,\n\t\t\tid,\n\t\t\tconvertToTask,\n\t\t\tgenerateFiles,\n\t\t\t{\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t}\n\t\t);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\tif (convertToTask && result) {\n\t\t\t// Return info about the converted task\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Subtask ${id} successfully converted to task #${result.id}`,\n\t\t\t\t\ttask: result\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Return simple success message for deletion\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: `Subtask ${id} successfully removed`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled even if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in removeSubtaskDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/list-tasks.js", "import chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport {\n\tlog,\n\treadJSON,\n\ttruncate,\n\treadComplexityReport,\n\taddComplexityToTask\n} from '../utils.js';\nimport findNextTask from './find-next-task.js';\n\nimport {\n\tdisplayBanner,\n\tgetStatusWithColor,\n\tformatDependenciesWithStatus,\n\tgetComplexityWithColor,\n\tcreateProgressBar\n} from '../ui.js';\n\n/**\n * List all tasks\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} statusFilter - Filter by status (single status or comma-separated list, e.g., 'pending' or 'blocked,deferred')\n * @param {string} reportPath - Path to the complexity report\n * @param {boolean} withSubtasks - Whether to show subtasks\n * @param {string} outputFormat - Output format (text or json)\n * @param {Object} context - Context object (required)\n * @param {string} context.projectRoot - Project root path\n * @param {string} context.tag - Tag for the task\n * @returns {Object} - Task list result for json format\n */\nfunction listTasks(\n\ttasksPath,\n\tstatusFilter,\n\treportPath = null,\n\twithSubtasks = false,\n\toutputFormat = 'text',\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\ttry {\n\t\t// Extract projectRoot from context if provided\n\t\tconst data = readJSON(tasksPath, projectRoot, tag); // Pass projectRoot to readJSON\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}`);\n\t\t}\n\n\t\t// Add complexity scores to tasks if report exists\n\t\t// `reportPath` is already tag-aware (resolved at the CLI boundary).\n\t\tconst complexityReport = readComplexityReport(reportPath);\n\t\t// Apply complexity scores to tasks\n\t\tif (complexityReport && complexityReport.complexityAnalysis) {\n\t\t\tdata.tasks.forEach((task) => addComplexityToTask(task, complexityReport));\n\t\t}\n\n\t\t// Filter tasks by status if specified - now supports comma-separated statuses\n\t\tlet filteredTasks;\n\t\tif (statusFilter && statusFilter.toLowerCase() !== 'all') {\n\t\t\t// Handle comma-separated statuses\n\t\t\tconst allowedStatuses = statusFilter\n\t\t\t\t.split(',')\n\t\t\t\t.map((s) => s.trim().toLowerCase())\n\t\t\t\t.filter((s) => s.length > 0); // Remove empty strings\n\n\t\t\tfilteredTasks = data.tasks.filter(\n\t\t\t\t(task) =>\n\t\t\t\t\ttask.status && allowedStatuses.includes(task.status.toLowerCase())\n\t\t\t);\n\t\t} else {\n\t\t\t// Default to all tasks if no filter or filter is 'all'\n\t\t\tfilteredTasks = data.tasks;\n\t\t}\n\n\t\t// Calculate completion statistics\n\t\tconst totalTasks = data.tasks.length;\n\t\tconst completedTasks = data.tasks.filter(\n\t\t\t(task) => task.status === 'done' || task.status === 'completed'\n\t\t).length;\n\t\tconst completionPercentage =\n\t\t\ttotalTasks > 0 ? (completedTasks / totalTasks) * 100 : 0;\n\n\t\t// Count statuses for tasks\n\t\tconst doneCount = completedTasks;\n\t\tconst inProgressCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'in-progress'\n\t\t).length;\n\t\tconst pendingCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'pending'\n\t\t).length;\n\t\tconst blockedCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'blocked'\n\t\t).length;\n\t\tconst deferredCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'deferred'\n\t\t).length;\n\t\tconst cancelledCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'cancelled'\n\t\t).length;\n\t\tconst reviewCount = data.tasks.filter(\n\t\t\t(task) => task.status === 'review'\n\t\t).length;\n\n\t\t// Count subtasks and their statuses\n\t\tlet totalSubtasks = 0;\n\t\tlet completedSubtasks = 0;\n\t\tlet inProgressSubtasks = 0;\n\t\tlet pendingSubtasks = 0;\n\t\tlet blockedSubtasks = 0;\n\t\tlet deferredSubtasks = 0;\n\t\tlet cancelledSubtasks = 0;\n\t\tlet reviewSubtasks = 0;\n\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\t\ttotalSubtasks += task.subtasks.length;\n\t\t\t\tcompletedSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t\t).length;\n\t\t\t\tinProgressSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'in-progress'\n\t\t\t\t).length;\n\t\t\t\tpendingSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'pending'\n\t\t\t\t).length;\n\t\t\t\tblockedSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'blocked'\n\t\t\t\t).length;\n\t\t\t\tdeferredSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'deferred'\n\t\t\t\t).length;\n\t\t\t\tcancelledSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'cancelled'\n\t\t\t\t).length;\n\t\t\t\treviewSubtasks += task.subtasks.filter(\n\t\t\t\t\t(st) => st.status === 'review'\n\t\t\t\t).length;\n\t\t\t}\n\t\t});\n\n\t\tconst subtaskCompletionPercentage =\n\t\t\ttotalSubtasks > 0 ? (completedSubtasks / totalSubtasks) * 100 : 0;\n\n\t\t// Calculate dependency statistics (moved up to be available for all output formats)\n\t\tconst completedTaskIds = new Set(\n\t\t\tdata.tasks\n\t\t\t\t.filter((t) => t.status === 'done' || t.status === 'completed')\n\t\t\t\t.map((t) => t.id)\n\t\t);\n\n\t\tconst tasksWithNoDeps = data.tasks.filter(\n\t\t\t(t) =>\n\t\t\t\tt.status !== 'done' &&\n\t\t\t\tt.status !== 'completed' &&\n\t\t\t\t(!t.dependencies || t.dependencies.length === 0)\n\t\t).length;\n\n\t\tconst tasksWithAllDepsSatisfied = data.tasks.filter(\n\t\t\t(t) =>\n\t\t\t\tt.status !== 'done' &&\n\t\t\t\tt.status !== 'completed' &&\n\t\t\t\tt.dependencies &&\n\t\t\t\tt.dependencies.length > 0 &&\n\t\t\t\tt.dependencies.every((depId) => completedTaskIds.has(depId))\n\t\t).length;\n\n\t\tconst tasksWithUnsatisfiedDeps = data.tasks.filter(\n\t\t\t(t) =>\n\t\t\t\tt.status !== 'done' &&\n\t\t\t\tt.status !== 'completed' &&\n\t\t\t\tt.dependencies &&\n\t\t\t\tt.dependencies.length > 0 &&\n\t\t\t\t!t.dependencies.every((depId) => completedTaskIds.has(depId))\n\t\t).length;\n\n\t\t// Calculate total tasks ready to work on (no deps + satisfied deps)\n\t\tconst tasksReadyToWork = tasksWithNoDeps + tasksWithAllDepsSatisfied;\n\n\t\t// Calculate most depended-on tasks\n\t\tconst dependencyCount = {};\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\ttask.dependencies.forEach((depId) => {\n\t\t\t\t\tdependencyCount[depId] = (dependencyCount[depId] || 0) + 1;\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Find the most depended-on task\n\t\tlet mostDependedOnTaskId = null;\n\t\tlet maxDependents = 0;\n\n\t\tfor (const [taskId, count] of Object.entries(dependencyCount)) {\n\t\t\tif (count > maxDependents) {\n\t\t\t\tmaxDependents = count;\n\t\t\t\tmostDependedOnTaskId = parseInt(taskId);\n\t\t\t}\n\t\t}\n\n\t\t// Get the most depended-on task\n\t\tconst mostDependedOnTask =\n\t\t\tmostDependedOnTaskId !== null\n\t\t\t\t? data.tasks.find((t) => t.id === mostDependedOnTaskId)\n\t\t\t\t: null;\n\n\t\t// Calculate average dependencies per task\n\t\tconst totalDependencies = data.tasks.reduce(\n\t\t\t(sum, task) => sum + (task.dependencies ? task.dependencies.length : 0),\n\t\t\t0\n\t\t);\n\t\tconst avgDependenciesPerTask = totalDependencies / data.tasks.length;\n\n\t\t// Find next task to work on, passing the complexity report\n\t\tconst nextItem = findNextTask(data.tasks, complexityReport);\n\n\t\t// For JSON output, return structured data\n\t\tif (outputFormat === 'json') {\n\t\t\t// *** Modification: Remove 'details' field for JSON output ***\n\t\t\tconst tasksWithoutDetails = filteredTasks.map((task) => {\n\t\t\t\t// <-- USES filteredTasks!\n\t\t\t\t// Omit 'details' from the parent task\n\t\t\t\tconst { details, ...taskRest } = task;\n\n\t\t\t\t// If subtasks exist, omit 'details' from them too\n\t\t\t\tif (taskRest.subtasks && Array.isArray(taskRest.subtasks)) {\n\t\t\t\t\ttaskRest.subtasks = taskRest.subtasks.map((subtask) => {\n\t\t\t\t\t\tconst { details: subtaskDetails, ...subtaskRest } = subtask;\n\t\t\t\t\t\treturn subtaskRest;\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\treturn taskRest;\n\t\t\t});\n\t\t\t// *** End of Modification ***\n\n\t\t\treturn {\n\t\t\t\ttasks: tasksWithoutDetails, // <--- THIS IS THE ARRAY BEING RETURNED\n\t\t\t\tfilter: statusFilter || 'all', // Return the actual filter used\n\t\t\t\tstats: {\n\t\t\t\t\ttotal: totalTasks,\n\t\t\t\t\tcompleted: doneCount,\n\t\t\t\t\tinProgress: inProgressCount,\n\t\t\t\t\tpending: pendingCount,\n\t\t\t\t\tblocked: blockedCount,\n\t\t\t\t\tdeferred: deferredCount,\n\t\t\t\t\tcancelled: cancelledCount,\n\t\t\t\t\treview: reviewCount,\n\t\t\t\t\tcompletionPercentage,\n\t\t\t\t\tsubtasks: {\n\t\t\t\t\t\ttotal: totalSubtasks,\n\t\t\t\t\t\tcompleted: completedSubtasks,\n\t\t\t\t\t\tinProgress: inProgressSubtasks,\n\t\t\t\t\t\tpending: pendingSubtasks,\n\t\t\t\t\t\tblocked: blockedSubtasks,\n\t\t\t\t\t\tdeferred: deferredSubtasks,\n\t\t\t\t\t\tcancelled: cancelledSubtasks,\n\t\t\t\t\t\tcompletionPercentage: subtaskCompletionPercentage\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// For markdown-readme output, return formatted markdown\n\t\tif (outputFormat === 'markdown-readme') {\n\t\t\treturn generateMarkdownOutput(data, filteredTasks, {\n\t\t\t\ttotalTasks,\n\t\t\t\tcompletedTasks,\n\t\t\t\tcompletionPercentage,\n\t\t\t\tdoneCount,\n\t\t\t\tinProgressCount,\n\t\t\t\tpendingCount,\n\t\t\t\tblockedCount,\n\t\t\t\tdeferredCount,\n\t\t\t\tcancelledCount,\n\t\t\t\ttotalSubtasks,\n\t\t\t\tcompletedSubtasks,\n\t\t\t\tsubtaskCompletionPercentage,\n\t\t\t\tinProgressSubtasks,\n\t\t\t\tpendingSubtasks,\n\t\t\t\tblockedSubtasks,\n\t\t\t\tdeferredSubtasks,\n\t\t\t\tcancelledSubtasks,\n\t\t\t\treviewSubtasks,\n\t\t\t\ttasksWithNoDeps,\n\t\t\t\ttasksReadyToWork,\n\t\t\t\ttasksWithUnsatisfiedDeps,\n\t\t\t\tmostDependedOnTask,\n\t\t\t\tmostDependedOnTaskId,\n\t\t\t\tmaxDependents,\n\t\t\t\tavgDependenciesPerTask,\n\t\t\t\tcomplexityReport,\n\t\t\t\twithSubtasks,\n\t\t\t\tnextItem\n\t\t\t});\n\t\t}\n\n\t\t// ... existing code for text output ...\n\n\t\t// Calculate status breakdowns as percentages of total\n\t\tconst taskStatusBreakdown = {\n\t\t\t'in-progress': totalTasks > 0 ? (inProgressCount / totalTasks) * 100 : 0,\n\t\t\tpending: totalTasks > 0 ? (pendingCount / totalTasks) * 100 : 0,\n\t\t\tblocked: totalTasks > 0 ? (blockedCount / totalTasks) * 100 : 0,\n\t\t\tdeferred: totalTasks > 0 ? (deferredCount / totalTasks) * 100 : 0,\n\t\t\tcancelled: totalTasks > 0 ? (cancelledCount / totalTasks) * 100 : 0,\n\t\t\treview: totalTasks > 0 ? (reviewCount / totalTasks) * 100 : 0\n\t\t};\n\n\t\tconst subtaskStatusBreakdown = {\n\t\t\t'in-progress':\n\t\t\t\ttotalSubtasks > 0 ? (inProgressSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tpending: totalSubtasks > 0 ? (pendingSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tblocked: totalSubtasks > 0 ? (blockedSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tdeferred:\n\t\t\t\ttotalSubtasks > 0 ? (deferredSubtasks / totalSubtasks) * 100 : 0,\n\t\t\tcancelled:\n\t\t\t\ttotalSubtasks > 0 ? (cancelledSubtasks / totalSubtasks) * 100 : 0,\n\t\t\treview: totalSubtasks > 0 ? (reviewSubtasks / totalSubtasks) * 100 : 0\n\t\t};\n\n\t\t// Create progress bars with status breakdowns\n\t\tconst taskProgressBar = createProgressBar(\n\t\t\tcompletionPercentage,\n\t\t\t30,\n\t\t\ttaskStatusBreakdown\n\t\t);\n\t\tconst subtaskProgressBar = createProgressBar(\n\t\t\tsubtaskCompletionPercentage,\n\t\t\t30,\n\t\t\tsubtaskStatusBreakdown\n\t\t);\n\n\t\t// Get terminal width - more reliable method\n\t\tlet terminalWidth;\n\t\ttry {\n\t\t\t// Try to get the actual terminal columns\n\t\t\tterminalWidth = process.stdout.columns;\n\t\t} catch (e) {\n\t\t\t// Fallback if columns cannot be determined\n\t\t\tlog('debug', 'Could not determine terminal width, using default');\n\t\t}\n\t\t// Ensure we have a reasonable default if detection fails\n\t\tterminalWidth = terminalWidth || 80;\n\n\t\t// Ensure terminal width is at least a minimum value to prevent layout issues\n\t\tterminalWidth = Math.max(terminalWidth, 80);\n\n\t\t// Create dashboard content\n\t\tconst projectDashboardContent =\n\t\t\tchalk.white.bold('Project Dashboard') +\n\t\t\t'\\n' +\n\t\t\t`Tasks Progress: ${chalk.greenBright(taskProgressBar)} ${completionPercentage.toFixed(0)}%\\n` +\n\t\t\t`Done: ${chalk.green(doneCount)} In Progress: ${chalk.blue(inProgressCount)} Pending: ${chalk.yellow(pendingCount)} Blocked: ${chalk.red(blockedCount)} Deferred: ${chalk.gray(deferredCount)} Cancelled: ${chalk.gray(cancelledCount)}\\n\\n` +\n\t\t\t`Subtasks Progress: ${chalk.cyan(subtaskProgressBar)} ${subtaskCompletionPercentage.toFixed(0)}%\\n` +\n\t\t\t`Completed: ${chalk.green(completedSubtasks)}/${totalSubtasks} In Progress: ${chalk.blue(inProgressSubtasks)} Pending: ${chalk.yellow(pendingSubtasks)} Blocked: ${chalk.red(blockedSubtasks)} Deferred: ${chalk.gray(deferredSubtasks)} Cancelled: ${chalk.gray(cancelledSubtasks)}\\n\\n` +\n\t\t\tchalk.cyan.bold('Priority Breakdown:') +\n\t\t\t'\\n' +\n\t\t\t`${chalk.red('•')} ${chalk.white('High priority:')} ${data.tasks.filter((t) => t.priority === 'high').length}\\n` +\n\t\t\t`${chalk.yellow('•')} ${chalk.white('Medium priority:')} ${data.tasks.filter((t) => t.priority === 'medium').length}\\n` +\n\t\t\t`${chalk.green('•')} ${chalk.white('Low priority:')} ${data.tasks.filter((t) => t.priority === 'low').length}`;\n\n\t\tconst dependencyDashboardContent =\n\t\t\tchalk.white.bold('Dependency Status & Next Task') +\n\t\t\t'\\n' +\n\t\t\tchalk.cyan.bold('Dependency Metrics:') +\n\t\t\t'\\n' +\n\t\t\t`${chalk.green('•')} ${chalk.white('Tasks with no dependencies:')} ${tasksWithNoDeps}\\n` +\n\t\t\t`${chalk.green('•')} ${chalk.white('Tasks ready to work on:')} ${tasksReadyToWork}\\n` +\n\t\t\t`${chalk.yellow('•')} ${chalk.white('Tasks blocked by dependencies:')} ${tasksWithUnsatisfiedDeps}\\n` +\n\t\t\t`${chalk.magenta('•')} ${chalk.white('Most depended-on task:')} ${mostDependedOnTask ? chalk.cyan(`#${mostDependedOnTaskId} (${maxDependents} dependents)`) : chalk.gray('None')}\\n` +\n\t\t\t`${chalk.blue('•')} ${chalk.white('Avg dependencies per task:')} ${avgDependenciesPerTask.toFixed(1)}\\n\\n` +\n\t\t\tchalk.cyan.bold('Next Task to Work On:') +\n\t\t\t'\\n' +\n\t\t\t`ID: ${chalk.cyan(nextItem ? nextItem.id : 'N/A')} - ${nextItem ? chalk.white.bold(truncate(nextItem.title, 40)) : chalk.yellow('No task available')}\n` +\n\t\t\t`Priority: ${nextItem ? chalk.white(nextItem.priority || 'medium') : ''} Dependencies: ${nextItem ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : ''}\n` +\n\t\t\t`Complexity: ${nextItem && nextItem.complexityScore ? getComplexityWithColor(nextItem.complexityScore) : chalk.gray('N/A')}`;\n\n\t\t// Calculate width for side-by-side display\n\t\t// Box borders, padding take approximately 4 chars on each side\n\t\tconst minDashboardWidth = 50; // Minimum width for dashboard\n\t\tconst minDependencyWidth = 50; // Minimum width for dependency dashboard\n\t\tconst totalMinWidth = minDashboardWidth + minDependencyWidth + 4; // Extra 4 chars for spacing\n\n\t\t// If terminal is wide enough, show boxes side by side with responsive widths\n\t\tif (terminalWidth >= totalMinWidth) {\n\t\t\t// Calculate widths proportionally for each box - use exact 50% width each\n\t\t\tconst availableWidth = terminalWidth;\n\t\t\tconst halfWidth = Math.floor(availableWidth / 2);\n\n\t\t\t// Account for border characters (2 chars on each side)\n\t\t\tconst boxContentWidth = halfWidth - 4;\n\n\t\t\t// Create boxen options with precise widths\n\t\t\tconst dashboardBox = boxen(projectDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\twidth: boxContentWidth,\n\t\t\t\tdimBorder: false\n\t\t\t});\n\n\t\t\tconst dependencyBox = boxen(dependencyDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\twidth: boxContentWidth,\n\t\t\t\tdimBorder: false\n\t\t\t});\n\n\t\t\t// Create a better side-by-side layout with exact spacing\n\t\t\tconst dashboardLines = dashboardBox.split('\\n');\n\t\t\tconst dependencyLines = dependencyBox.split('\\n');\n\n\t\t\t// Make sure both boxes have the same height\n\t\t\tconst maxHeight = Math.max(dashboardLines.length, dependencyLines.length);\n\n\t\t\t// For each line of output, pad the dashboard line to exactly halfWidth chars\n\t\t\t// This ensures the dependency box starts at exactly the right position\n\t\t\tconst combinedLines = [];\n\t\t\tfor (let i = 0; i < maxHeight; i++) {\n\t\t\t\t// Get the dashboard line (or empty string if we've run out of lines)\n\t\t\t\tconst dashLine = i < dashboardLines.length ? dashboardLines[i] : '';\n\t\t\t\t// Get the dependency line (or empty string if we've run out of lines)\n\t\t\t\tconst depLine = i < dependencyLines.length ? dependencyLines[i] : '';\n\n\t\t\t\t// Remove any trailing spaces from dashLine before padding to exact width\n\t\t\t\tconst trimmedDashLine = dashLine.trimEnd();\n\t\t\t\t// Pad the dashboard line to exactly halfWidth chars with no extra spaces\n\t\t\t\tconst paddedDashLine = trimmedDashLine.padEnd(halfWidth, ' ');\n\n\t\t\t\t// Join the lines with no space in between\n\t\t\t\tcombinedLines.push(paddedDashLine + depLine);\n\t\t\t}\n\n\t\t\t// Join all lines and output\n\t\t\tconsole.log(combinedLines.join('\\n'));\n\t\t} else {\n\t\t\t// Terminal too narrow, show boxes stacked vertically\n\t\t\tconst dashboardBox = boxen(projectDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 0, bottom: 1 }\n\t\t\t});\n\n\t\t\tconst dependencyBox = boxen(dependencyDashboardContent, {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'magenta',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 0, bottom: 1 }\n\t\t\t});\n\n\t\t\t// Display stacked vertically\n\t\t\tconsole.log(dashboardBox);\n\t\t\tconsole.log(dependencyBox);\n\t\t}\n\n\t\tif (filteredTasks.length === 0) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tstatusFilter\n\t\t\t\t\t\t? chalk.yellow(`No tasks with status '${statusFilter}' found`)\n\t\t\t\t\t\t: chalk.yellow('No tasks found'),\n\t\t\t\t\t{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// COMPLETELY REVISED TABLE APPROACH\n\t\t// Define percentage-based column widths and calculate actual widths\n\t\t// Adjust percentages based on content type and user requirements\n\n\t\t// Adjust ID width if showing subtasks (subtask IDs are longer: e.g., \"1.2\")\n\t\tconst idWidthPct = withSubtasks ? 10 : 7;\n\n\t\t// Calculate max status length to accommodate \"in-progress\"\n\t\tconst statusWidthPct = 15;\n\n\t\t// Increase priority column width as requested\n\t\tconst priorityWidthPct = 12;\n\n\t\t// Make dependencies column smaller as requested (-20%)\n\t\tconst depsWidthPct = 20;\n\n\t\tconst complexityWidthPct = 10;\n\n\t\t// Calculate title/description width as remaining space (+20% from dependencies reduction)\n\t\tconst titleWidthPct =\n\t\t\t100 -\n\t\t\tidWidthPct -\n\t\t\tstatusWidthPct -\n\t\t\tpriorityWidthPct -\n\t\t\tdepsWidthPct -\n\t\t\tcomplexityWidthPct;\n\n\t\t// Allow 10 characters for borders and padding\n\t\tconst availableWidth = terminalWidth - 10;\n\n\t\t// Calculate actual column widths based on percentages\n\t\tconst idWidth = Math.floor(availableWidth * (idWidthPct / 100));\n\t\tconst statusWidth = Math.floor(availableWidth * (statusWidthPct / 100));\n\t\tconst priorityWidth = Math.floor(availableWidth * (priorityWidthPct / 100));\n\t\tconst depsWidth = Math.floor(availableWidth * (depsWidthPct / 100));\n\t\tconst complexityWidth = Math.floor(\n\t\t\tavailableWidth * (complexityWidthPct / 100)\n\t\t);\n\t\tconst titleWidth = Math.floor(availableWidth * (titleWidthPct / 100));\n\n\t\t// Create a table with correct borders and spacing\n\t\tconst table = new Table({\n\t\t\thead: [\n\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\tchalk.cyan.bold('Status'),\n\t\t\t\tchalk.cyan.bold('Priority'),\n\t\t\t\tchalk.cyan.bold('Dependencies'),\n\t\t\t\tchalk.cyan.bold('Complexity')\n\t\t\t],\n\t\t\tcolWidths: [\n\t\t\t\tidWidth,\n\t\t\t\ttitleWidth,\n\t\t\t\tstatusWidth,\n\t\t\t\tpriorityWidth,\n\t\t\t\tdepsWidth,\n\t\t\t\tcomplexityWidth // Added complexity column width\n\t\t\t],\n\t\t\tstyle: {\n\t\t\t\thead: [], // No special styling for header\n\t\t\t\tborder: [], // No special styling for border\n\t\t\t\tcompact: false // Use default spacing\n\t\t\t},\n\t\t\twordWrap: true,\n\t\t\twrapOnWordBoundary: true\n\t\t});\n\n\t\t// Process tasks for the table\n\t\tfilteredTasks.forEach((task) => {\n\t\t\t// Format dependencies with status indicators (colored)\n\t\t\tlet depText = 'None';\n\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\t// Use the proper formatDependenciesWithStatus function for colored status\n\t\t\t\tdepText = formatDependenciesWithStatus(\n\t\t\t\t\ttask.dependencies,\n\t\t\t\t\tdata.tasks,\n\t\t\t\t\ttrue,\n\t\t\t\t\tcomplexityReport\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tdepText = chalk.gray('None');\n\t\t\t}\n\n\t\t\t// Clean up any ANSI codes or confusing characters\n\t\t\tconst cleanTitle = task.title.replace(/\\n/g, ' ');\n\n\t\t\t// Get priority color\n\t\t\tconst priorityColor =\n\t\t\t\t{\n\t\t\t\t\thigh: chalk.red,\n\t\t\t\t\tmedium: chalk.yellow,\n\t\t\t\t\tlow: chalk.gray\n\t\t\t\t}[task.priority || 'medium'] || chalk.white;\n\n\t\t\t// Format status\n\t\t\tconst status = getStatusWithColor(task.status, true);\n\n\t\t\t// Add the row without truncating dependencies\n\t\t\ttable.push([\n\t\t\t\ttask.id.toString(),\n\t\t\t\ttruncate(cleanTitle, titleWidth - 3),\n\t\t\t\tstatus,\n\t\t\t\tpriorityColor(truncate(task.priority || 'medium', priorityWidth - 2)),\n\t\t\t\tdepText,\n\t\t\t\ttask.complexityScore\n\t\t\t\t\t? getComplexityWithColor(task.complexityScore)\n\t\t\t\t\t: chalk.gray('N/A')\n\t\t\t]);\n\n\t\t\t// Add subtasks if requested\n\t\t\tif (withSubtasks && task.subtasks && task.subtasks.length > 0) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\t// Format subtask dependencies with status indicators\n\t\t\t\t\tlet subtaskDepText = 'None';\n\t\t\t\t\tif (subtask.dependencies && subtask.dependencies.length > 0) {\n\t\t\t\t\t\t// Handle both subtask-to-subtask and subtask-to-task dependencies\n\t\t\t\t\t\tconst formattedDeps = subtask.dependencies\n\t\t\t\t\t\t\t.map((depId) => {\n\t\t\t\t\t\t\t\t// Check if it's a dependency on another subtask\n\t\t\t\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\t\tconst foundSubtask = task.subtasks.find(\n\t\t\t\t\t\t\t\t\t\t(st) => st.id === depId\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tif (foundSubtask) {\n\t\t\t\t\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\t\t\t\t\tfoundSubtask.status === 'done' ||\n\t\t\t\t\t\t\t\t\t\t\tfoundSubtask.status === 'completed';\n\t\t\t\t\t\t\t\t\t\tconst isInProgress = foundSubtask.status === 'in-progress';\n\n\t\t\t\t\t\t\t\t\t\t// Use consistent color formatting instead of emojis\n\t\t\t\t\t\t\t\t\t\tif (isDone) {\n\t\t\t\t\t\t\t\t\t\t\treturn chalk.green.bold(`${task.id}.${depId}`);\n\t\t\t\t\t\t\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\t\t\t\t\t\t\treturn chalk.hex('#FFA500').bold(`${task.id}.${depId}`);\n\t\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\t\treturn chalk.red.bold(`${task.id}.${depId}`);\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t// Default to regular task dependency\n\t\t\t\t\t\t\t\tconst depTask = data.tasks.find((t) => t.id === depId);\n\t\t\t\t\t\t\t\tif (depTask) {\n\t\t\t\t\t\t\t\t\t// Add complexity to depTask before checking status\n\t\t\t\t\t\t\t\t\taddComplexityToTask(depTask, complexityReport);\n\t\t\t\t\t\t\t\t\tconst isDone =\n\t\t\t\t\t\t\t\t\t\tdepTask.status === 'done' || depTask.status === 'completed';\n\t\t\t\t\t\t\t\t\tconst isInProgress = depTask.status === 'in-progress';\n\t\t\t\t\t\t\t\t\t// Use the same color scheme as in formatDependenciesWithStatus\n\t\t\t\t\t\t\t\t\tif (isDone) {\n\t\t\t\t\t\t\t\t\t\treturn chalk.green.bold(`${depId}`);\n\t\t\t\t\t\t\t\t\t} else if (isInProgress) {\n\t\t\t\t\t\t\t\t\t\treturn chalk.hex('#FFA500').bold(`${depId}`);\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\treturn chalk.red.bold(`${depId}`);\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn chalk.cyan(depId.toString());\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t.join(', ');\n\n\t\t\t\t\t\tsubtaskDepText = formattedDeps || chalk.gray('None');\n\t\t\t\t\t}\n\n\t\t\t\t\t// Add the subtask row without truncating dependencies\n\t\t\t\t\ttable.push([\n\t\t\t\t\t\t`${task.id}.${subtask.id}`,\n\t\t\t\t\t\tchalk.dim(`└─ ${truncate(subtask.title, titleWidth - 5)}`),\n\t\t\t\t\t\tgetStatusWithColor(subtask.status, true),\n\t\t\t\t\t\tchalk.dim('-'),\n\t\t\t\t\t\tsubtaskDepText,\n\t\t\t\t\t\tsubtask.complexityScore\n\t\t\t\t\t\t\t? chalk.gray(`${subtask.complexityScore}`)\n\t\t\t\t\t\t\t: chalk.gray('N/A')\n\t\t\t\t\t]);\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Ensure we output the table even if it had to wrap\n\t\ttry {\n\t\t\tconsole.log(table.toString());\n\t\t} catch (err) {\n\t\t\tlog('error', `Error rendering table: ${err.message}`);\n\n\t\t\t// Fall back to simpler output\n\t\t\tconsole.log(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t'\\nFalling back to simple task list due to terminal width constraints:'\n\t\t\t\t)\n\t\t\t);\n\t\t\tfilteredTasks.forEach((task) => {\n\t\t\t\tconsole.log(\n\t\t\t\t\t`${chalk.cyan(task.id)}: ${chalk.white(task.title)} - ${getStatusWithColor(task.status)}`\n\t\t\t\t);\n\t\t\t});\n\t\t}\n\n\t\t// Show filter info if applied\n\t\tif (statusFilter) {\n\t\t\tconsole.log(chalk.yellow(`\\nFiltered by status: ${statusFilter}`));\n\t\t\tconsole.log(\n\t\t\t\tchalk.yellow(`Showing ${filteredTasks.length} of ${totalTasks} tasks`)\n\t\t\t);\n\t\t}\n\n\t\t// Define priority colors\n\t\tconst priorityColors = {\n\t\t\thigh: chalk.red.bold,\n\t\t\tmedium: chalk.yellow,\n\t\t\tlow: chalk.gray\n\t\t};\n\n\t\t// Show next task box in a prominent color\n\t\tif (nextItem) {\n\t\t\t// Prepare subtasks section if they exist (Only tasks have .subtasks property)\n\t\t\tlet subtasksSection = '';\n\t\t\t// Check if the nextItem is a top-level task before looking for subtasks\n\t\t\tconst parentTaskForSubtasks = data.tasks.find(\n\t\t\t\t(t) => String(t.id) === String(nextItem.id)\n\t\t\t); // Find the original task object\n\t\t\tif (\n\t\t\t\tparentTaskForSubtasks &&\n\t\t\t\tparentTaskForSubtasks.subtasks &&\n\t\t\t\tparentTaskForSubtasks.subtasks.length > 0\n\t\t\t) {\n\t\t\t\tsubtasksSection = `\\n\\n${chalk.white.bold('Subtasks:')}\\n`;\n\t\t\t\tsubtasksSection += parentTaskForSubtasks.subtasks\n\t\t\t\t\t.map((subtask) => {\n\t\t\t\t\t\t// Add complexity to subtask before display\n\t\t\t\t\t\taddComplexityToTask(subtask, complexityReport);\n\t\t\t\t\t\t// Using a more simplified format for subtask status display\n\t\t\t\t\t\tconst status = subtask.status || 'pending';\n\t\t\t\t\t\tconst statusColors = {\n\t\t\t\t\t\t\tdone: chalk.green,\n\t\t\t\t\t\t\tcompleted: chalk.green,\n\t\t\t\t\t\t\tpending: chalk.yellow,\n\t\t\t\t\t\t\t'in-progress': chalk.blue,\n\t\t\t\t\t\t\tdeferred: chalk.gray,\n\t\t\t\t\t\t\tblocked: chalk.red,\n\t\t\t\t\t\t\tcancelled: chalk.gray\n\t\t\t\t\t\t};\n\t\t\t\t\t\tconst statusColor =\n\t\t\t\t\t\t\tstatusColors[status.toLowerCase()] || chalk.white;\n\t\t\t\t\t\t// Ensure subtask ID is displayed correctly using parent ID from the original task object\n\t\t\t\t\t\treturn `${chalk.cyan(`${parentTaskForSubtasks.id}.${subtask.id}`)} [${statusColor(status)}] ${subtask.title}`;\n\t\t\t\t\t})\n\t\t\t\t\t.join('\\n');\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.hex('#FF8800').bold(\n\t\t\t\t\t\t// Use nextItem.id and nextItem.title\n\t\t\t\t\t\t`🔥 Next Task to Work On: #${nextItem.id} - ${nextItem.title}`\n\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t// Use nextItem.priority, nextItem.status, nextItem.dependencies\n\t\t\t\t\t\t`${chalk.white('Priority:')} ${priorityColors[nextItem.priority || 'medium'](nextItem.priority || 'medium')} ${chalk.white('Status:')} ${getStatusWithColor(nextItem.status, true)}\\n` +\n\t\t\t\t\t\t`${chalk.white('Dependencies:')} ${nextItem.dependencies && nextItem.dependencies.length > 0 ? formatDependenciesWithStatus(nextItem.dependencies, data.tasks, true, complexityReport) : chalk.gray('None')}\\n\\n` +\n\t\t\t\t\t\t// Use nextTask.description (Note: findNextTask doesn't return description, need to fetch original task/subtask for this)\n\t\t\t\t\t\t// *** Fetching original item for description and details ***\n\t\t\t\t\t\t`${chalk.white('Description:')} ${getWorkItemDescription(nextItem, data.tasks)}` +\n\t\t\t\t\t\tsubtasksSection + // <-- Subtasks are handled above now\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t// Use nextItem.id\n\t\t\t\t\t\t`${chalk.cyan('Start working:')} ${chalk.yellow(`task-master set-status --id=${nextItem.id} --status=in-progress`)}\\n` +\n\t\t\t\t\t\t// Use nextItem.id\n\t\t\t\t\t\t`${chalk.cyan('View details:')} ${chalk.yellow(`task-master show ${nextItem.id}`)}`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: { left: 2, right: 2, top: 1, bottom: 1 },\n\t\t\t\t\t\tborderColor: '#FF8800',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 },\n\t\t\t\t\t\ttitle: '⚡ RECOMMENDED NEXT TASK ⚡',\n\t\t\t\t\t\ttitleAlignment: 'center',\n\t\t\t\t\t\twidth: terminalWidth - 4,\n\t\t\t\t\t\tfullscreen: false\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.hex('#FF8800').bold('No eligible next task found') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t'All pending tasks have dependencies that are not yet completed, or all tasks are done.',\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: '#FF8800',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 },\n\t\t\t\t\t\ttitle: '⚡ NEXT TASK ⚡',\n\t\t\t\t\t\ttitleAlignment: 'center',\n\t\t\t\t\t\twidth: terminalWidth - 4 // Use full terminal width minus a small margin\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\t// Show next steps\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Suggested Next Steps:') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master next')} to see what to work on next\\n` +\n\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks\\n` +\n\t\t\t\t\t`${chalk.cyan('3.')} Run ${chalk.yellow('task-master set-status --id=<id> --status=done')} to mark a task as complete`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'gray',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t} catch (error) {\n\t\tlog('error', `Error listing tasks: ${error.message}`);\n\n\t\tif (outputFormat === 'json') {\n\t\t\t// Return structured error for JSON output\n\t\t\tthrow {\n\t\t\t\tcode: 'TASK_LIST_ERROR',\n\t\t\t\tmessage: error.message,\n\t\t\t\tdetails: error.stack\n\t\t\t};\n\t\t}\n\n\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\tprocess.exit(1);\n\t}\n}\n\n// *** Helper function to get description for task or subtask ***\nfunction getWorkItemDescription(item, allTasks) {\n\tif (!item) return 'N/A';\n\tif (item.parentId) {\n\t\t// It's a subtask\n\t\tconst parent = allTasks.find((t) => t.id === item.parentId);\n\t\tconst subtask = parent?.subtasks?.find(\n\t\t\t(st) => `${parent.id}.${st.id}` === item.id\n\t\t);\n\t\treturn subtask?.description || 'No description available.';\n\t} else {\n\t\t// It's a top-level task\n\t\tconst task = allTasks.find((t) => String(t.id) === String(item.id));\n\t\treturn task?.description || 'No description available.';\n\t}\n}\n\n/**\n * Generate markdown-formatted output for README files\n * @param {Object} data - Full tasks data\n * @param {Array} filteredTasks - Filtered tasks array\n * @param {Object} stats - Statistics object\n * @returns {string} - Formatted markdown string\n */\nfunction generateMarkdownOutput(data, filteredTasks, stats) {\n\tconst {\n\t\ttotalTasks,\n\t\tcompletedTasks,\n\t\tcompletionPercentage,\n\t\tdoneCount,\n\t\tinProgressCount,\n\t\tpendingCount,\n\t\tblockedCount,\n\t\tdeferredCount,\n\t\tcancelledCount,\n\t\ttotalSubtasks,\n\t\tcompletedSubtasks,\n\t\tsubtaskCompletionPercentage,\n\t\tinProgressSubtasks,\n\t\tpendingSubtasks,\n\t\tblockedSubtasks,\n\t\tdeferredSubtasks,\n\t\tcancelledSubtasks,\n\t\ttasksWithNoDeps,\n\t\ttasksReadyToWork,\n\t\ttasksWithUnsatisfiedDeps,\n\t\tmostDependedOnTask,\n\t\tmostDependedOnTaskId,\n\t\tmaxDependents,\n\t\tavgDependenciesPerTask,\n\t\tcomplexityReport,\n\t\twithSubtasks,\n\t\tnextItem\n\t} = stats;\n\n\tlet markdown = '';\n\n\t// Create progress bars for markdown (using Unicode block characters)\n\tconst createMarkdownProgressBar = (percentage, width = 20) => {\n\t\tconst filled = Math.round((percentage / 100) * width);\n\t\tconst empty = width - filled;\n\t\treturn '█'.repeat(filled) + '░'.repeat(empty);\n\t};\n\n\tconst taskProgressBar = createMarkdownProgressBar(completionPercentage, 20);\n\tconst subtaskProgressBar = createMarkdownProgressBar(\n\t\tsubtaskCompletionPercentage,\n\t\t20\n\t);\n\n\t// Dashboard section\n\t// markdown += '```\\n';\n\tmarkdown += '| Project Dashboard | |\\n';\n\tmarkdown += '| :- |:-|\\n';\n\tmarkdown += `| Task Progress | ${taskProgressBar} ${Math.round(completionPercentage)}% |\\n`;\n\tmarkdown += `| Done | ${doneCount} |\\n`;\n\tmarkdown += `| In Progress | ${inProgressCount} |\\n`;\n\tmarkdown += `| Pending | ${pendingCount} |\\n`;\n\tmarkdown += `| Deferred | ${deferredCount} |\\n`;\n\tmarkdown += `| Cancelled | ${cancelledCount} |\\n`;\n\tmarkdown += `|-|-|\\n`;\n\tmarkdown += `| Subtask Progress | ${subtaskProgressBar} ${Math.round(subtaskCompletionPercentage)}% |\\n`;\n\tmarkdown += `| Completed | ${completedSubtasks} |\\n`;\n\tmarkdown += `| In Progress | ${inProgressSubtasks} |\\n`;\n\tmarkdown += `| Pending | ${pendingSubtasks} |\\n`;\n\n\tmarkdown += '\\n\\n';\n\n\t// Tasks table\n\tmarkdown +=\n\t\t'| ID | Title | Status | Priority | Dependencies | Complexity |\\n';\n\tmarkdown +=\n\t\t'| :- | :- | :- | :- | :- | :- |\\n';\n\n\t// Helper function to format status with symbols\n\tconst getStatusSymbol = (status) => {\n\t\tswitch (status) {\n\t\t\tcase 'done':\n\t\t\tcase 'completed':\n\t\t\t\treturn '✓ done';\n\t\t\tcase 'in-progress':\n\t\t\t\treturn '► in-progress';\n\t\t\tcase 'pending':\n\t\t\t\treturn '○ pending';\n\t\t\tcase 'blocked':\n\t\t\t\treturn '⭕ blocked';\n\t\t\tcase 'deferred':\n\t\t\t\treturn 'x deferred';\n\t\t\tcase 'cancelled':\n\t\t\t\treturn 'x cancelled';\n\t\t\tcase 'review':\n\t\t\t\treturn '? review';\n\t\t\tdefault:\n\t\t\t\treturn status || 'pending';\n\t\t}\n\t};\n\n\t// Helper function to format dependencies without color codes\n\tconst formatDependenciesForMarkdown = (deps, allTasks) => {\n\t\tif (!deps || deps.length === 0) return 'None';\n\t\treturn deps\n\t\t\t.map((depId) => {\n\t\t\t\tconst depTask = allTasks.find((t) => t.id === depId);\n\t\t\t\treturn depTask ? depId.toString() : depId.toString();\n\t\t\t})\n\t\t\t.join(', ');\n\t};\n\n\t// Process all tasks\n\tfilteredTasks.forEach((task) => {\n\t\tconst taskTitle = task.title; // No truncation for README\n\t\tconst statusSymbol = getStatusSymbol(task.status);\n\t\tconst priority = task.priority || 'medium';\n\t\tconst deps = formatDependenciesForMarkdown(task.dependencies, data.tasks);\n\t\tconst complexity = task.complexityScore\n\t\t\t? `● ${task.complexityScore}`\n\t\t\t: 'N/A';\n\n\t\tmarkdown += `| ${task.id} | ${taskTitle} | ${statusSymbol} | ${priority} | ${deps} | ${complexity} |\\n`;\n\n\t\t// Add subtasks if requested\n\t\tif (withSubtasks && task.subtasks && task.subtasks.length > 0) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tconst subtaskTitle = `${subtask.title}`; // No truncation\n\t\t\t\tconst subtaskStatus = getStatusSymbol(subtask.status);\n\t\t\t\tconst subtaskDeps = formatDependenciesForMarkdown(\n\t\t\t\t\tsubtask.dependencies,\n\t\t\t\t\tdata.tasks\n\t\t\t\t);\n\t\t\t\tconst subtaskComplexity = subtask.complexityScore\n\t\t\t\t\t? subtask.complexityScore.toString()\n\t\t\t\t\t: 'N/A';\n\n\t\t\t\tmarkdown += `| ${task.id}.${subtask.id} | ${subtaskTitle} | ${subtaskStatus} | - | ${subtaskDeps} | ${subtaskComplexity} |\\n`;\n\t\t\t});\n\t\t}\n\t});\n\n\treturn markdown;\n}\n\nexport default listTasks;\n"], ["/claude-task-master/scripts/modules/task-manager/parse-prd.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport { z } from 'zod';\n\nimport {\n\tlog,\n\twriteJSON,\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode,\n\treadJSON,\n\tfindTaskById,\n\tensureTagMetadata,\n\tgetCurrentTag\n} from '../utils.js';\n\nimport { generateObjectService } from '../ai-services-unified.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport { displayAiUsageSummary } from '../ui.js';\n\n// Define the Zod schema for a SINGLE task object\nconst prdSingleTaskSchema = z.object({\n\tid: z.number().int().positive(),\n\ttitle: z.string().min(1),\n\tdescription: z.string().min(1),\n\tdetails: z.string().nullable(),\n\ttestStrategy: z.string().nullable(),\n\tpriority: z.enum(['high', 'medium', 'low']).nullable(),\n\tdependencies: z.array(z.number().int().positive()).nullable(),\n\tstatus: z.string().nullable()\n});\n\n// Define the Zod schema for the ENTIRE expected AI response object\nconst prdResponseSchema = z.object({\n\ttasks: z.array(prdSingleTaskSchema),\n\tmetadata: z.object({\n\t\tprojectName: z.string(),\n\t\ttotalTasks: z.number(),\n\t\tsourceFile: z.string(),\n\t\tgeneratedAt: z.string()\n\t})\n});\n\n/**\n * Parse a PRD file and generate tasks\n * @param {string} prdPath - Path to the PRD file\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} numTasks - Number of tasks to generate\n * @param {Object} options - Additional options\n * @param {boolean} [options.force=false] - Whether to overwrite existing tasks.json.\n * @param {boolean} [options.append=false] - Append to existing tasks file.\n * @param {boolean} [options.research=false] - Use research model for enhanced PRD analysis.\n * @param {Object} [options.reportProgress] - Function to report progress (optional, likely unused).\n * @param {Object} [options.mcpLog] - MCP logger object (optional).\n * @param {Object} [options.session] - Session object from MCP server (optional).\n * @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).\n * @param {string} [options.tag] - Target tag for task generation.\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json').\n */\nasync function parsePRD(prdPath, tasksPath, numTasks, options = {}) {\n\tconst {\n\t\treportProgress,\n\t\tmcpLog,\n\t\tsession,\n\t\tprojectRoot,\n\t\tforce = false,\n\t\tappend = false,\n\t\tresearch = false,\n\t\ttag\n\t} = options;\n\tconst isMCP = !!mcpLog;\n\tconst outputFormat = isMCP ? 'json' : 'text';\n\n\t// Use the provided tag, or the current active tag, or default to 'master'\n\tconst targetTag = tag;\n\n\tconst logFn = mcpLog\n\t\t? mcpLog\n\t\t: {\n\t\t\t\t// Wrapper for CLI\n\t\t\t\tinfo: (...args) => log('info', ...args),\n\t\t\t\twarn: (...args) => log('warn', ...args),\n\t\t\t\terror: (...args) => log('error', ...args),\n\t\t\t\tdebug: (...args) => log('debug', ...args),\n\t\t\t\tsuccess: (...args) => log('success', ...args)\n\t\t\t};\n\n\t// Create custom reporter using logFn\n\tconst report = (message, level = 'info') => {\n\t\t// Check logFn directly\n\t\tif (logFn && typeof logFn[level] === 'function') {\n\t\t\tlogFn[level](message);\n\t\t} else if (!isSilentMode() && outputFormat === 'text') {\n\t\t\t// Fallback to original log only if necessary and in CLI text mode\n\t\t\tlog(level, message);\n\t\t}\n\t};\n\n\treport(\n\t\t`Parsing PRD file: ${prdPath}, Force: ${force}, Append: ${append}, Research: ${research}`\n\t);\n\n\tlet existingTasks = [];\n\tlet nextId = 1;\n\tlet aiServiceResponse = null;\n\n\ttry {\n\t\t// Check if there are existing tasks in the target tag\n\t\tlet hasExistingTasksInTag = false;\n\t\tif (fs.existsSync(tasksPath)) {\n\t\t\ttry {\n\t\t\t\t// Read the entire file to check if the tag exists\n\t\t\t\tconst existingFileContent = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\tconst allData = JSON.parse(existingFileContent);\n\n\t\t\t\t// Check if the target tag exists and has tasks\n\t\t\t\tif (\n\t\t\t\t\tallData[targetTag] &&\n\t\t\t\t\tArray.isArray(allData[targetTag].tasks) &&\n\t\t\t\t\tallData[targetTag].tasks.length > 0\n\t\t\t\t) {\n\t\t\t\t\thasExistingTasksInTag = true;\n\t\t\t\t\texistingTasks = allData[targetTag].tasks;\n\t\t\t\t\tnextId = Math.max(...existingTasks.map((t) => t.id || 0)) + 1;\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// If we can't read the file or parse it, assume no existing tasks in this tag\n\t\t\t\thasExistingTasksInTag = false;\n\t\t\t}\n\t\t}\n\n\t\t// Handle file existence and overwrite/append logic based on target tag\n\t\tif (hasExistingTasksInTag) {\n\t\t\tif (append) {\n\t\t\t\treport(\n\t\t\t\t\t`Append mode enabled. Found ${existingTasks.length} existing tasks in tag '${targetTag}'. Next ID will be ${nextId}.`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t} else if (!force) {\n\t\t\t\t// Not appending and not forcing overwrite, and there are existing tasks in the target tag\n\t\t\t\tconst overwriteError = new Error(\n\t\t\t\t\t`Tag '${targetTag}' already contains ${existingTasks.length} tasks. Use --force to overwrite or --append to add to existing tasks.`\n\t\t\t\t);\n\t\t\t\treport(overwriteError.message, 'error');\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.error(chalk.red(overwriteError.message));\n\t\t\t\t}\n\t\t\t\tthrow overwriteError;\n\t\t\t} else {\n\t\t\t\t// Force overwrite is true\n\t\t\t\treport(\n\t\t\t\t\t`Force flag enabled. Overwriting existing tasks in tag '${targetTag}'.`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\t// No existing tasks in target tag, proceed without confirmation\n\t\t\treport(\n\t\t\t\t`Tag '${targetTag}' is empty or doesn't exist. Creating/updating tag with new tasks.`,\n\t\t\t\t'info'\n\t\t\t);\n\t\t}\n\n\t\treport(`Reading PRD content from ${prdPath}`, 'info');\n\t\tconst prdContent = fs.readFileSync(prdPath, 'utf8');\n\t\tif (!prdContent) {\n\t\t\tthrow new Error(`Input file ${prdPath} is empty or could not be read.`);\n\t\t}\n\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\t// Get defaultTaskPriority from config\n\t\tconst { getDefaultPriority } = await import('../config-manager.js');\n\t\tconst defaultTaskPriority = getDefaultPriority(projectRoot) || 'medium';\n\n\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t'parse-prd',\n\t\t\t{\n\t\t\t\tresearch,\n\t\t\t\tnumTasks,\n\t\t\t\tnextId,\n\t\t\t\tprdContent,\n\t\t\t\tprdPath,\n\t\t\t\tdefaultTaskPriority\n\t\t\t}\n\t\t);\n\n\t\t// Call the unified AI service\n\t\treport(\n\t\t\t`Calling AI service to generate tasks from PRD${research ? ' with research-backed analysis' : ''}...`,\n\t\t\t'info'\n\t\t);\n\n\t\t// Call generateObjectService with the CORRECT schema and additional telemetry params\n\t\taiServiceResponse = await generateObjectService({\n\t\t\trole: research ? 'research' : 'main', // Use research role if flag is set\n\t\t\tsession: session,\n\t\t\tprojectRoot: projectRoot,\n\t\t\tschema: prdResponseSchema,\n\t\t\tobjectName: 'tasks_data',\n\t\t\tsystemPrompt: systemPrompt,\n\t\t\tprompt: userPrompt,\n\t\t\tcommandName: 'parse-prd',\n\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t});\n\n\t\t// Create the directory if it doesn't exist\n\t\tconst tasksDir = path.dirname(tasksPath);\n\t\tif (!fs.existsSync(tasksDir)) {\n\t\t\tfs.mkdirSync(tasksDir, { recursive: true });\n\t\t}\n\t\tlogFn.success(\n\t\t\t`Successfully parsed PRD via AI service${research ? ' with research-backed analysis' : ''}.`\n\t\t);\n\n\t\t// Validate and Process Tasks\n\t\t// const generatedData = aiServiceResponse?.mainResult?.object;\n\n\t\t// Robustly get the actual AI-generated object\n\t\tlet generatedData = null;\n\t\tif (aiServiceResponse?.mainResult) {\n\t\t\tif (\n\t\t\t\ttypeof aiServiceResponse.mainResult === 'object' &&\n\t\t\t\taiServiceResponse.mainResult !== null &&\n\t\t\t\t'tasks' in aiServiceResponse.mainResult\n\t\t\t) {\n\t\t\t\t// If mainResult itself is the object with a 'tasks' property\n\t\t\t\tgeneratedData = aiServiceResponse.mainResult;\n\t\t\t} else if (\n\t\t\t\ttypeof aiServiceResponse.mainResult.object === 'object' &&\n\t\t\t\taiServiceResponse.mainResult.object !== null &&\n\t\t\t\t'tasks' in aiServiceResponse.mainResult.object\n\t\t\t) {\n\t\t\t\t// If mainResult.object is the object with a 'tasks' property\n\t\t\t\tgeneratedData = aiServiceResponse.mainResult.object;\n\t\t\t}\n\t\t}\n\n\t\tif (!generatedData || !Array.isArray(generatedData.tasks)) {\n\t\t\tlogFn.error(\n\t\t\t\t`Internal Error: generateObjectService returned unexpected data structure: ${JSON.stringify(generatedData)}`\n\t\t\t);\n\t\t\tthrow new Error(\n\t\t\t\t'AI service returned unexpected data structure after validation.'\n\t\t\t);\n\t\t}\n\n\t\tlet currentId = nextId;\n\t\tconst taskMap = new Map();\n\t\tconst processedNewTasks = generatedData.tasks.map((task) => {\n\t\t\tconst newId = currentId++;\n\t\t\ttaskMap.set(task.id, newId);\n\t\t\treturn {\n\t\t\t\t...task,\n\t\t\t\tid: newId,\n\t\t\t\tstatus: 'pending',\n\t\t\t\tpriority: task.priority || 'medium',\n\t\t\t\tdependencies: Array.isArray(task.dependencies) ? task.dependencies : [],\n\t\t\t\tsubtasks: []\n\t\t\t};\n\t\t});\n\n\t\t// Remap dependencies for the NEWLY processed tasks\n\t\tprocessedNewTasks.forEach((task) => {\n\t\t\ttask.dependencies = task.dependencies\n\t\t\t\t.map((depId) => taskMap.get(depId)) // Map old AI ID to new sequential ID\n\t\t\t\t.filter(\n\t\t\t\t\t(newDepId) =>\n\t\t\t\t\t\tnewDepId != null && // Must exist\n\t\t\t\t\t\tnewDepId < task.id && // Must be a lower ID (could be existing or newly generated)\n\t\t\t\t\t\t(findTaskById(existingTasks, newDepId) || // Check if it exists in old tasks OR\n\t\t\t\t\t\t\tprocessedNewTasks.some((t) => t.id === newDepId)) // check if it exists in new tasks\n\t\t\t\t);\n\t\t});\n\n\t\tconst finalTasks = append\n\t\t\t? [...existingTasks, ...processedNewTasks]\n\t\t\t: processedNewTasks;\n\n\t\t// Read the existing file to preserve other tags\n\t\tlet outputData = {};\n\t\tif (fs.existsSync(tasksPath)) {\n\t\t\ttry {\n\t\t\t\tconst existingFileContent = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\toutputData = JSON.parse(existingFileContent);\n\t\t\t} catch (error) {\n\t\t\t\t// If we can't read the existing file, start with empty object\n\t\t\t\toutputData = {};\n\t\t\t}\n\t\t}\n\n\t\t// Update only the target tag, preserving other tags\n\t\toutputData[targetTag] = {\n\t\t\ttasks: finalTasks,\n\t\t\tmetadata: {\n\t\t\t\tcreated:\n\t\t\t\t\toutputData[targetTag]?.metadata?.created || new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription: `Tasks for ${targetTag} context`\n\t\t\t}\n\t\t};\n\n\t\t// Ensure the target tag has proper metadata\n\t\tensureTagMetadata(outputData[targetTag], {\n\t\t\tdescription: `Tasks for ${targetTag} context`\n\t\t});\n\n\t\t// Write the complete data structure back to the file\n\t\tfs.writeFileSync(tasksPath, JSON.stringify(outputData, null, 2));\n\t\treport(\n\t\t\t`Successfully ${append ? 'appended' : 'generated'} ${processedNewTasks.length} tasks in ${tasksPath}${research ? ' with research-backed analysis' : ''}`,\n\t\t\t'success'\n\t\t);\n\n\t\t// Generate markdown task files after writing tasks.json\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath), { mcpLog });\n\n\t\t// Handle CLI output (e.g., success message)\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Successfully generated ${processedNewTasks.length} new tasks${research ? ' with research-backed analysis' : ''}. Total tasks in ${tasksPath}: ${finalTasks.length}`\n\t\t\t\t\t),\n\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master list')} to view all tasks\\n` +\n\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down a task into subtasks`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tif (aiServiceResponse && aiServiceResponse.telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t}\n\t\t}\n\n\t\t// Return telemetry data\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\ttasksPath,\n\t\t\ttelemetryData: aiServiceResponse?.telemetryData,\n\t\t\ttagInfo: aiServiceResponse?.tagInfo\n\t\t};\n\t} catch (error) {\n\t\treport(`Error parsing PRD: ${error.message}`, 'error');\n\n\t\t// Only show error UI for text output (CLI)\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\tif (getDebugFlag(projectRoot)) {\n\t\t\t\t// Use projectRoot for debug flag check\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t}\n\n\t\tthrow error; // Always re-throw for proper error handling\n\t}\n}\n\nexport default parsePRD;\n"], ["/claude-task-master/scripts/modules/task-manager/generate-task-files.js", "import path from 'path';\nimport fs from 'fs';\nimport chalk from 'chalk';\n\nimport { log, readJSON } from '../utils.js';\nimport { formatDependenciesWithStatus } from '../ui.js';\nimport { validateAndFixDependencies } from '../dependency-manager.js';\nimport { getDebugFlag } from '../config-manager.js';\n\n/**\n * Generate individual task files from tasks.json\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} outputDir - Output directory for task files\n * @param {Object} options - Additional options (mcpLog for MCP mode, projectRoot, tag)\n * @param {string} [options.projectRoot] - Project root path\n * @param {string} [options.tag] - Tag for the task\n * @param {Object} [options.mcpLog] - MCP logger object\n * @returns {Object|undefined} Result object in MCP mode, undefined in CLI mode\n */\nfunction generateTaskFiles(tasksPath, outputDir, options = {}) {\n\ttry {\n\t\tconst isMcpMode = !!options?.mcpLog;\n\t\tconst { projectRoot, tag } = options;\n\n\t\t// 1. Read the raw data structure, ensuring we have all tags.\n\t\t// We call readJSON without a specific tag to get the resolved default view,\n\t\t// which correctly contains the full structure in `_rawTaggedData`.\n\t\tconst resolvedData = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!resolvedData) {\n\t\t\tthrow new Error(`Could not read or parse tasks file: ${tasksPath}`);\n\t\t}\n\t\t// Prioritize the _rawTaggedData if it exists, otherwise use the data as is.\n\t\tconst rawData = resolvedData._rawTaggedData || resolvedData;\n\n\t\t// 2. Determine the target tag we need to generate files for.\n\t\tconst tagData = rawData[tag];\n\n\t\tif (!tagData || !tagData.tasks) {\n\t\t\tthrow new Error(`Tag '${tag}' not found or has no tasks in the data.`);\n\t\t}\n\t\tconst tasksForGeneration = tagData.tasks;\n\n\t\t// Create the output directory if it doesn't exist\n\t\tif (!fs.existsSync(outputDir)) {\n\t\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t\t}\n\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Preparing to regenerate ${tasksForGeneration.length} task files for tag '${tag}'`\n\t\t);\n\n\t\t// 3. Validate dependencies using the FULL, raw data structure to prevent data loss.\n\t\tvalidateAndFixDependencies(\n\t\t\trawData, // Pass the entire object with all tags\n\t\t\ttasksPath,\n\t\t\tprojectRoot,\n\t\t\ttag // Provide the current tag context for the operation\n\t\t);\n\n\t\tconst allTasksInTag = tagData.tasks;\n\t\tconst validTaskIds = allTasksInTag.map((task) => task.id);\n\n\t\t// Cleanup orphaned task files\n\t\tlog('info', 'Checking for orphaned task files to clean up...');\n\t\ttry {\n\t\t\tconst files = fs.readdirSync(outputDir);\n\t\t\t// Tag-aware file patterns: master -> task_001.txt, other tags -> task_001_tagname.txt\n\t\t\tconst masterFilePattern = /^task_(\\d+)\\.txt$/;\n\t\t\tconst taggedFilePattern = new RegExp(`^task_(\\\\d+)_${tag}\\\\.txt$`);\n\n\t\t\tconst orphanedFiles = files.filter((file) => {\n\t\t\t\tlet match = null;\n\t\t\t\tlet fileTaskId = null;\n\n\t\t\t\t// Check if file belongs to current tag\n\t\t\t\tif (tag === 'master') {\n\t\t\t\t\tmatch = file.match(masterFilePattern);\n\t\t\t\t\tif (match) {\n\t\t\t\t\t\tfileTaskId = parseInt(match[1], 10);\n\t\t\t\t\t\t// Only clean up master files when processing master tag\n\t\t\t\t\t\treturn !validTaskIds.includes(fileTaskId);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tmatch = file.match(taggedFilePattern);\n\t\t\t\t\tif (match) {\n\t\t\t\t\t\tfileTaskId = parseInt(match[1], 10);\n\t\t\t\t\t\t// Only clean up files for the current tag\n\t\t\t\t\t\treturn !validTaskIds.includes(fileTaskId);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false;\n\t\t\t});\n\n\t\t\tif (orphanedFiles.length > 0) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Found ${orphanedFiles.length} orphaned task files to remove for tag '${tag}'`\n\t\t\t\t);\n\t\t\t\torphanedFiles.forEach((file) => {\n\t\t\t\t\tconst filePath = path.join(outputDir, file);\n\t\t\t\t\tfs.unlinkSync(filePath);\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tlog('info', 'No orphaned task files found.');\n\t\t\t}\n\t\t} catch (err) {\n\t\t\tlog('warn', `Error cleaning up orphaned task files: ${err.message}`);\n\t\t}\n\n\t\t// Generate task files for the target tag\n\t\tlog('info', `Generating individual task files for tag '${tag}'...`);\n\t\ttasksForGeneration.forEach((task) => {\n\t\t\t// Tag-aware file naming: master -> task_001.txt, other tags -> task_001_tagname.txt\n\t\t\tconst taskFileName =\n\t\t\t\ttag === 'master'\n\t\t\t\t\t? `task_${task.id.toString().padStart(3, '0')}.txt`\n\t\t\t\t\t: `task_${task.id.toString().padStart(3, '0')}_${tag}.txt`;\n\n\t\t\tconst taskPath = path.join(outputDir, taskFileName);\n\n\t\t\tlet content = `# Task ID: ${task.id}\\n`;\n\t\t\tcontent += `# Title: ${task.title}\\n`;\n\t\t\tcontent += `# Status: ${task.status || 'pending'}\\n`;\n\n\t\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\t\tcontent += `# Dependencies: ${formatDependenciesWithStatus(task.dependencies, allTasksInTag, false)}\\n`;\n\t\t\t} else {\n\t\t\t\tcontent += '# Dependencies: None\\n';\n\t\t\t}\n\n\t\t\tcontent += `# Priority: ${task.priority || 'medium'}\\n`;\n\t\t\tcontent += `# Description: ${task.description || ''}\\n`;\n\t\t\tcontent += '# Details:\\n';\n\t\t\tcontent += (task.details || '')\n\t\t\t\t.split('\\n')\n\t\t\t\t.map((line) => line)\n\t\t\t\t.join('\\n');\n\t\t\tcontent += '\\n\\n';\n\t\t\tcontent += '# Test Strategy:\\n';\n\t\t\tcontent += (task.testStrategy || '')\n\t\t\t\t.split('\\n')\n\t\t\t\t.map((line) => line)\n\t\t\t\t.join('\\n');\n\t\t\tcontent += '\\n';\n\n\t\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\t\tcontent += '\\n# Subtasks:\\n';\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tcontent += `## ${subtask.id}. ${subtask.title} [${subtask.status || 'pending'}]\\n`;\n\t\t\t\t\tif (subtask.dependencies && subtask.dependencies.length > 0) {\n\t\t\t\t\t\tconst subtaskDeps = subtask.dependencies\n\t\t\t\t\t\t\t.map((depId) =>\n\t\t\t\t\t\t\t\ttypeof depId === 'number'\n\t\t\t\t\t\t\t\t\t? `${task.id}.${depId}`\n\t\t\t\t\t\t\t\t\t: depId.toString()\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t.join(', ');\n\t\t\t\t\t\tcontent += `### Dependencies: ${subtaskDeps}\\n`;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcontent += '### Dependencies: None\\n';\n\t\t\t\t\t}\n\t\t\t\t\tcontent += `### Description: ${subtask.description || ''}\\n`;\n\t\t\t\t\tcontent += '### Details:\\n';\n\t\t\t\t\tcontent += (subtask.details || '')\n\t\t\t\t\t\t.split('\\n')\n\t\t\t\t\t\t.map((line) => line)\n\t\t\t\t\t\t.join('\\n');\n\t\t\t\t\tcontent += '\\n\\n';\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tfs.writeFileSync(taskPath, content);\n\t\t});\n\n\t\tlog(\n\t\t\t'success',\n\t\t\t`All ${tasksForGeneration.length} tasks for tag '${tag}' have been generated into '${outputDir}'.`\n\t\t);\n\n\t\tif (isMcpMode) {\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tcount: tasksForGeneration.length,\n\t\t\t\tdirectory: outputDir\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `Error generating task files: ${error.message}`);\n\t\tif (!options?.mcpLog) {\n\t\t\tconsole.error(chalk.red(`Error generating task files: ${error.message}`));\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error;\n\t\t}\n\t}\n}\n\nexport default generateTaskFiles;\n"], ["/claude-task-master/mcp-server/src/tools/utils.js", "/**\n * tools/utils.js\n * Utility functions for Task Master CLI integration\n */\n\nimport { spawnSync } from 'child_process';\nimport path from 'path';\nimport fs from 'fs';\nimport { contextManager } from '../core/context-manager.js'; // Import the singleton\nimport { fileURLToPath } from 'url';\nimport { getCurrentTag } from '../../../scripts/modules/utils.js';\n\n// Import path utilities to ensure consistent path resolution\nimport {\n\tlastFoundProjectRoot,\n\tPROJECT_MARKERS\n} from '../core/utils/path-utils.js';\n\nconst __filename = fileURLToPath(import.meta.url);\n\n// Cache for version info to avoid repeated file reads\nlet cachedVersionInfo = null;\n\n/**\n * Get version information from package.json\n * @returns {Object} Version information\n */\nfunction getVersionInfo() {\n\t// Return cached version if available\n\tif (cachedVersionInfo) {\n\t\treturn cachedVersionInfo;\n\t}\n\n\ttry {\n\t\t// Navigate to the project root from the tools directory\n\t\tconst packageJsonPath = path.join(\n\t\t\tpath.dirname(__filename),\n\t\t\t'../../../package.json'\n\t\t);\n\t\tif (fs.existsSync(packageJsonPath)) {\n\t\t\tconst packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf-8'));\n\t\t\tcachedVersionInfo = {\n\t\t\t\tversion: packageJson.version,\n\t\t\t\tname: packageJson.name\n\t\t\t};\n\t\t\treturn cachedVersionInfo;\n\t\t}\n\t\tcachedVersionInfo = {\n\t\t\tversion: 'unknown',\n\t\t\tname: 'task-master-ai'\n\t\t};\n\t\treturn cachedVersionInfo;\n\t} catch (error) {\n\t\t// Fallback version info if package.json can't be read\n\t\tcachedVersionInfo = {\n\t\t\tversion: 'unknown',\n\t\t\tname: 'task-master-ai'\n\t\t};\n\t\treturn cachedVersionInfo;\n\t}\n}\n\n/**\n * Get current tag information for MCP responses\n * @param {string} projectRoot - The project root directory\n * @param {Object} log - Logger object\n * @returns {Object} Tag information object\n */\nfunction getTagInfo(projectRoot, log) {\n\ttry {\n\t\tif (!projectRoot) {\n\t\t\tlog.warn('No project root provided for tag information');\n\t\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t\t}\n\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\n\t\t// Read available tags from tasks.json\n\t\tlet availableTags = ['master']; // Default fallback\n\t\ttry {\n\t\t\tconst tasksJsonPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster',\n\t\t\t\t'tasks',\n\t\t\t\t'tasks.json'\n\t\t\t);\n\t\t\tif (fs.existsSync(tasksJsonPath)) {\n\t\t\t\tconst tasksData = JSON.parse(fs.readFileSync(tasksJsonPath, 'utf-8'));\n\n\t\t\t\t// If it's the new tagged format, extract tag keys\n\t\t\t\tif (\n\t\t\t\t\ttasksData &&\n\t\t\t\t\ttypeof tasksData === 'object' &&\n\t\t\t\t\t!Array.isArray(tasksData.tasks)\n\t\t\t\t) {\n\t\t\t\t\tconst tagKeys = Object.keys(tasksData).filter(\n\t\t\t\t\t\t(key) =>\n\t\t\t\t\t\t\ttasksData[key] &&\n\t\t\t\t\t\t\ttypeof tasksData[key] === 'object' &&\n\t\t\t\t\t\t\tArray.isArray(tasksData[key].tasks)\n\t\t\t\t\t);\n\t\t\t\t\tif (tagKeys.length > 0) {\n\t\t\t\t\t\tavailableTags = tagKeys;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (tagError) {\n\t\t\tlog.debug(`Could not read available tags: ${tagError.message}`);\n\t\t}\n\n\t\treturn {\n\t\t\tcurrentTag: currentTag || 'master',\n\t\t\tavailableTags: availableTags\n\t\t};\n\t} catch (error) {\n\t\tlog.warn(`Error getting tag information: ${error.message}`);\n\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t}\n}\n\n/**\n * Get normalized project root path\n * @param {string|undefined} projectRootRaw - Raw project root from arguments\n * @param {Object} log - Logger object\n * @returns {string} - Normalized absolute path to project root\n */\nfunction getProjectRoot(projectRootRaw, log) {\n\t// PRECEDENCE ORDER:\n\t// 1. Environment variable override (TASK_MASTER_PROJECT_ROOT)\n\t// 2. Explicitly provided projectRoot in args\n\t// 3. Previously found/cached project root\n\t// 4. Current directory if it has project markers\n\t// 5. Current directory with warning\n\n\t// 1. Check for environment variable override\n\tif (process.env.TASK_MASTER_PROJECT_ROOT) {\n\t\tconst envRoot = process.env.TASK_MASTER_PROJECT_ROOT;\n\t\tconst absolutePath = path.isAbsolute(envRoot)\n\t\t\t? envRoot\n\t\t\t: path.resolve(process.cwd(), envRoot);\n\t\tlog.info(\n\t\t\t`Using project root from TASK_MASTER_PROJECT_ROOT environment variable: ${absolutePath}`\n\t\t);\n\t\treturn absolutePath;\n\t}\n\n\t// 2. If project root is explicitly provided, use it\n\tif (projectRootRaw) {\n\t\tconst absolutePath = path.isAbsolute(projectRootRaw)\n\t\t\t? projectRootRaw\n\t\t\t: path.resolve(process.cwd(), projectRootRaw);\n\n\t\tlog.info(`Using explicitly provided project root: ${absolutePath}`);\n\t\treturn absolutePath;\n\t}\n\n\t// 3. If we have a last found project root from a tasks.json search, use that for consistency\n\tif (lastFoundProjectRoot) {\n\t\tlog.info(\n\t\t\t`Using last known project root where tasks.json was found: ${lastFoundProjectRoot}`\n\t\t);\n\t\treturn lastFoundProjectRoot;\n\t}\n\n\t// 4. Check if the current directory has any indicators of being a task-master project\n\tconst currentDir = process.cwd();\n\tif (\n\t\tPROJECT_MARKERS.some((marker) => {\n\t\t\tconst markerPath = path.join(currentDir, marker);\n\t\t\treturn fs.existsSync(markerPath);\n\t\t})\n\t) {\n\t\tlog.info(\n\t\t\t`Using current directory as project root (found project markers): ${currentDir}`\n\t\t);\n\t\treturn currentDir;\n\t}\n\n\t// 5. Default to current working directory but warn the user\n\tlog.warn(\n\t\t`No task-master project detected in current directory. Using ${currentDir} as project root.`\n\t);\n\tlog.warn(\n\t\t'Consider using --project-root to specify the correct project location or set TASK_MASTER_PROJECT_ROOT environment variable.'\n\t);\n\treturn currentDir;\n}\n\n/**\n * Extracts and normalizes the project root path from the MCP session object.\n * @param {Object} session - The MCP session object.\n * @param {Object} log - The MCP logger object.\n * @returns {string|null} - The normalized absolute project root path or null if not found/invalid.\n */\nfunction getProjectRootFromSession(session, log) {\n\ttry {\n\t\t// Add detailed logging of session structure\n\t\tlog.info(\n\t\t\t`Session object: ${JSON.stringify({\n\t\t\t\thasSession: !!session,\n\t\t\t\thasRoots: !!session?.roots,\n\t\t\t\trootsType: typeof session?.roots,\n\t\t\t\tisRootsArray: Array.isArray(session?.roots),\n\t\t\t\trootsLength: session?.roots?.length,\n\t\t\t\tfirstRoot: session?.roots?.[0],\n\t\t\t\thasRootsRoots: !!session?.roots?.roots,\n\t\t\t\trootsRootsType: typeof session?.roots?.roots,\n\t\t\t\tisRootsRootsArray: Array.isArray(session?.roots?.roots),\n\t\t\t\trootsRootsLength: session?.roots?.roots?.length,\n\t\t\t\tfirstRootsRoot: session?.roots?.roots?.[0]\n\t\t\t})}`\n\t\t);\n\n\t\tlet rawRootPath = null;\n\t\tlet decodedPath = null;\n\t\tlet finalPath = null;\n\n\t\t// Check primary location\n\t\tif (session?.roots?.[0]?.uri) {\n\t\t\trawRootPath = session.roots[0].uri;\n\t\t\tlog.info(`Found raw root URI in session.roots[0].uri: ${rawRootPath}`);\n\t\t}\n\t\t// Check alternate location\n\t\telse if (session?.roots?.roots?.[0]?.uri) {\n\t\t\trawRootPath = session.roots.roots[0].uri;\n\t\t\tlog.info(\n\t\t\t\t`Found raw root URI in session.roots.roots[0].uri: ${rawRootPath}`\n\t\t\t);\n\t\t}\n\n\t\tif (rawRootPath) {\n\t\t\t// Decode URI and strip file:// protocol\n\t\t\tdecodedPath = rawRootPath.startsWith('file://')\n\t\t\t\t? decodeURIComponent(rawRootPath.slice(7))\n\t\t\t\t: rawRootPath; // Assume non-file URI is already decoded? Or decode anyway? Let's decode.\n\t\t\tif (!rawRootPath.startsWith('file://')) {\n\t\t\t\tdecodedPath = decodeURIComponent(rawRootPath); // Decode even if no file://\n\t\t\t}\n\n\t\t\t// Handle potential Windows drive prefix after stripping protocol (e.g., /C:/...)\n\t\t\tif (\n\t\t\t\tdecodedPath.startsWith('/') &&\n\t\t\t\t/[A-Za-z]:/.test(decodedPath.substring(1, 3))\n\t\t\t) {\n\t\t\t\tdecodedPath = decodedPath.substring(1); // Remove leading slash if it's like /C:/...\n\t\t\t}\n\n\t\t\tlog.info(`Decoded path: ${decodedPath}`);\n\n\t\t\t// Normalize slashes and resolve\n\t\t\tconst normalizedSlashes = decodedPath.replace(/\\\\/g, '/');\n\t\t\tfinalPath = path.resolve(normalizedSlashes); // Resolve to absolute path for current OS\n\n\t\t\tlog.info(`Normalized and resolved session path: ${finalPath}`);\n\t\t\treturn finalPath;\n\t\t}\n\n\t\t// Fallback Logic (remains the same)\n\t\tlog.warn('No project root URI found in session. Attempting fallbacks...');\n\t\tconst cwd = process.cwd();\n\n\t\t// Fallback 1: Use server path deduction (Cursor IDE)\n\t\tconst serverPath = process.argv[1];\n\t\tif (serverPath && serverPath.includes('mcp-server')) {\n\t\t\tconst mcpServerIndex = serverPath.indexOf('mcp-server');\n\t\t\tif (mcpServerIndex !== -1) {\n\t\t\t\tconst projectRoot = path.dirname(\n\t\t\t\t\tserverPath.substring(0, mcpServerIndex)\n\t\t\t\t); // Go up one level\n\n\t\t\t\tif (\n\t\t\t\t\tfs.existsSync(path.join(projectRoot, '.cursor')) ||\n\t\t\t\t\tfs.existsSync(path.join(projectRoot, 'mcp-server')) ||\n\t\t\t\t\tfs.existsSync(path.join(projectRoot, 'package.json'))\n\t\t\t\t) {\n\t\t\t\t\tlog.info(\n\t\t\t\t\t\t`Using project root derived from server path: ${projectRoot}`\n\t\t\t\t\t);\n\t\t\t\t\treturn projectRoot; // Already absolute\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Fallback 2: Use CWD\n\t\tlog.info(`Using current working directory as ultimate fallback: ${cwd}`);\n\t\treturn cwd; // Already absolute\n\t} catch (e) {\n\t\tlog.error(`Error in getProjectRootFromSession: ${e.message}`);\n\t\t// Attempt final fallback to CWD on error\n\t\tconst cwd = process.cwd();\n\t\tlog.warn(\n\t\t\t`Returning CWD (${cwd}) due to error during session root processing.`\n\t\t);\n\t\treturn cwd;\n\t}\n}\n\n/**\n * Handle API result with standardized error handling and response formatting\n * @param {Object} result - Result object from API call with success, data, and error properties\n * @param {Object} log - Logger object\n * @param {string} errorPrefix - Prefix for error messages\n * @param {Function} processFunction - Optional function to process successful result data\n * @param {string} [projectRoot] - Optional project root for tag information\n * @returns {Object} - Standardized MCP response object\n */\nasync function handleApiResult(\n\tresult,\n\tlog,\n\terrorPrefix = 'API error',\n\tprocessFunction = processMCPResponseData,\n\tprojectRoot = null\n) {\n\t// Get version info for every response\n\tconst versionInfo = getVersionInfo();\n\n\t// Get tag info if project root is provided\n\tconst tagInfo = projectRoot ? getTagInfo(projectRoot, log) : null;\n\n\tif (!result.success) {\n\t\tconst errorMsg = result.error?.message || `Unknown ${errorPrefix}`;\n\t\tlog.error(`${errorPrefix}: ${errorMsg}`);\n\t\treturn createErrorResponse(errorMsg, versionInfo, tagInfo);\n\t}\n\n\t// Process the result data if needed\n\tconst processedData = processFunction\n\t\t? processFunction(result.data)\n\t\t: result.data;\n\n\tlog.info('Successfully completed operation');\n\n\t// Create the response payload including version info and tag info\n\tconst responsePayload = {\n\t\tdata: processedData,\n\t\tversion: versionInfo\n\t};\n\n\t// Add tag information if available\n\tif (tagInfo) {\n\t\tresponsePayload.tag = tagInfo;\n\t}\n\n\treturn createContentResponse(responsePayload);\n}\n\n/**\n * Executes a task-master CLI command synchronously.\n * @param {string} command - The command to execute (e.g., 'add-task')\n * @param {Object} log - Logger instance\n * @param {Array} args - Arguments for the command\n * @param {string|undefined} projectRootRaw - Optional raw project root path (will be normalized internally)\n * @param {Object|null} customEnv - Optional object containing environment variables to pass to the child process\n * @returns {Object} - The result of the command execution\n */\nfunction executeTaskMasterCommand(\n\tcommand,\n\tlog,\n\targs = [],\n\tprojectRootRaw = null,\n\tcustomEnv = null // Changed from session to customEnv\n) {\n\ttry {\n\t\t// Normalize project root internally using the getProjectRoot utility\n\t\tconst cwd = getProjectRoot(projectRootRaw, log);\n\n\t\tlog.info(\n\t\t\t`Executing task-master ${command} with args: ${JSON.stringify(\n\t\t\t\targs\n\t\t\t)} in directory: ${cwd}`\n\t\t);\n\n\t\t// Prepare full arguments array\n\t\tconst fullArgs = [command, ...args];\n\n\t\t// Common options for spawn\n\t\tconst spawnOptions = {\n\t\t\tencoding: 'utf8',\n\t\t\tcwd: cwd,\n\t\t\t// Merge process.env with customEnv, giving precedence to customEnv\n\t\t\tenv: { ...process.env, ...(customEnv || {}) }\n\t\t};\n\n\t\t// Log the environment being passed (optional, for debugging)\n\t\t// log.info(`Spawn options env: ${JSON.stringify(spawnOptions.env)}`);\n\n\t\t// Execute the command using the global task-master CLI or local script\n\t\t// Try the global CLI first\n\t\tlet result = spawnSync('task-master', fullArgs, spawnOptions);\n\n\t\t// If global CLI is not available, try fallback to the local script\n\t\tif (result.error && result.error.code === 'ENOENT') {\n\t\t\tlog.info('Global task-master not found, falling back to local script');\n\t\t\t// Pass the same spawnOptions (including env) to the fallback\n\t\t\tresult = spawnSync('node', ['scripts/dev.js', ...fullArgs], spawnOptions);\n\t\t}\n\n\t\tif (result.error) {\n\t\t\tthrow new Error(`Command execution error: ${result.error.message}`);\n\t\t}\n\n\t\tif (result.status !== 0) {\n\t\t\t// Improve error handling by combining stderr and stdout if stderr is empty\n\t\t\tconst errorOutput = result.stderr\n\t\t\t\t? result.stderr.trim()\n\t\t\t\t: result.stdout\n\t\t\t\t\t? result.stdout.trim()\n\t\t\t\t\t: 'Unknown error';\n\t\t\tthrow new Error(\n\t\t\t\t`Command failed with exit code ${result.status}: ${errorOutput}`\n\t\t\t);\n\t\t}\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tstdout: result.stdout,\n\t\t\tstderr: result.stderr\n\t\t};\n\t} catch (error) {\n\t\tlog.error(`Error executing task-master command: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: error.message\n\t\t};\n\t}\n}\n\n/**\n * Checks cache for a result using the provided key. If not found, executes the action function,\n * caches the result upon success, and returns the result.\n *\n * @param {Object} options - Configuration options.\n * @param {string} options.cacheKey - The unique key for caching this operation's result.\n * @param {Function} options.actionFn - The async function to execute if the cache misses.\n * Should return an object like { success: boolean, data?: any, error?: { code: string, message: string } }.\n * @param {Object} options.log - The logger instance.\n * @returns {Promise<Object>} - An object containing the result.\n * Format: { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nasync function getCachedOrExecute({ cacheKey, actionFn, log }) {\n\t// Check cache first\n\tconst cachedResult = contextManager.getCachedData(cacheKey);\n\n\tif (cachedResult !== undefined) {\n\t\tlog.info(`Cache hit for key: ${cacheKey}`);\n\t\treturn cachedResult;\n\t}\n\n\tlog.info(`Cache miss for key: ${cacheKey}. Executing action function.`);\n\n\t// Execute the action function if cache missed\n\tconst result = await actionFn();\n\n\t// If the action was successful, cache the result\n\tif (result.success && result.data !== undefined) {\n\t\tlog.info(`Action successful. Caching result for key: ${cacheKey}`);\n\t\tcontextManager.setCachedData(cacheKey, result);\n\t} else if (!result.success) {\n\t\tlog.warn(\n\t\t\t`Action failed for cache key ${cacheKey}. Result not cached. Error: ${result.error?.message}`\n\t\t);\n\t} else {\n\t\tlog.warn(\n\t\t\t`Action for cache key ${cacheKey} succeeded but returned no data. Result not cached.`\n\t\t);\n\t}\n\n\treturn result;\n}\n\n/**\n * Recursively removes specified fields from task objects, whether single or in an array.\n * Handles common data structures returned by task commands.\n * @param {Object|Array} taskOrData - A single task object or a data object containing a 'tasks' array.\n * @param {string[]} fieldsToRemove - An array of field names to remove.\n * @returns {Object|Array} - The processed data with specified fields removed.\n */\nfunction processMCPResponseData(\n\ttaskOrData,\n\tfieldsToRemove = ['details', 'testStrategy']\n) {\n\tif (!taskOrData) {\n\t\treturn taskOrData;\n\t}\n\n\t// Helper function to process a single task object\n\tconst processSingleTask = (task) => {\n\t\tif (typeof task !== 'object' || task === null) {\n\t\t\treturn task;\n\t\t}\n\n\t\tconst processedTask = { ...task };\n\n\t\t// Remove specified fields from the task\n\t\tfieldsToRemove.forEach((field) => {\n\t\t\tdelete processedTask[field];\n\t\t});\n\n\t\t// Recursively process subtasks if they exist and are an array\n\t\tif (processedTask.subtasks && Array.isArray(processedTask.subtasks)) {\n\t\t\t// Use processArrayOfTasks to handle the subtasks array\n\t\t\tprocessedTask.subtasks = processArrayOfTasks(processedTask.subtasks);\n\t\t}\n\n\t\treturn processedTask;\n\t};\n\n\t// Helper function to process an array of tasks\n\tconst processArrayOfTasks = (tasks) => {\n\t\treturn tasks.map(processSingleTask);\n\t};\n\n\t// Check if the input is a data structure containing a 'tasks' array (like from listTasks)\n\tif (\n\t\ttypeof taskOrData === 'object' &&\n\t\ttaskOrData !== null &&\n\t\tArray.isArray(taskOrData.tasks)\n\t) {\n\t\treturn {\n\t\t\t...taskOrData, // Keep other potential fields like 'stats', 'filter'\n\t\t\ttasks: processArrayOfTasks(taskOrData.tasks)\n\t\t};\n\t}\n\t// Check if the input is likely a single task object (add more checks if needed)\n\telse if (\n\t\ttypeof taskOrData === 'object' &&\n\t\ttaskOrData !== null &&\n\t\t'id' in taskOrData &&\n\t\t'title' in taskOrData\n\t) {\n\t\treturn processSingleTask(taskOrData);\n\t}\n\t// Check if the input is an array of tasks directly (less common but possible)\n\telse if (Array.isArray(taskOrData)) {\n\t\treturn processArrayOfTasks(taskOrData);\n\t}\n\n\t// If it doesn't match known task structures, return it as is\n\treturn taskOrData;\n}\n\n/**\n * Creates standard content response for tools\n * @param {string|Object} content - Content to include in response\n * @returns {Object} - Content response object in FastMCP format\n */\nfunction createContentResponse(content) {\n\t// FastMCP requires text type, so we format objects as JSON strings\n\treturn {\n\t\tcontent: [\n\t\t\t{\n\t\t\t\ttype: 'text',\n\t\t\t\ttext:\n\t\t\t\t\ttypeof content === 'object'\n\t\t\t\t\t\t? // Format JSON nicely with indentation\n\t\t\t\t\t\t\tJSON.stringify(content, null, 2)\n\t\t\t\t\t\t: // Keep other content types as-is\n\t\t\t\t\t\t\tString(content)\n\t\t\t}\n\t\t]\n\t};\n}\n\n/**\n * Creates error response for tools\n * @param {string} errorMessage - Error message to include in response\n * @param {Object} [versionInfo] - Optional version information object\n * @param {Object} [tagInfo] - Optional tag information object\n * @returns {Object} - Error content response object in FastMCP format\n */\nfunction createErrorResponse(errorMessage, versionInfo, tagInfo) {\n\t// Provide fallback version info if not provided\n\tif (!versionInfo) {\n\t\tversionInfo = getVersionInfo();\n\t}\n\n\tlet responseText = `Error: ${errorMessage}\nVersion: ${versionInfo.version}\nName: ${versionInfo.name}`;\n\n\t// Add tag information if available\n\tif (tagInfo) {\n\t\tresponseText += `\nCurrent Tag: ${tagInfo.currentTag}`;\n\t}\n\n\treturn {\n\t\tcontent: [\n\t\t\t{\n\t\t\t\ttype: 'text',\n\t\t\t\ttext: responseText\n\t\t\t}\n\t\t],\n\t\tisError: true\n\t};\n}\n\n/**\n * Creates a logger wrapper object compatible with core function expectations.\n * Adapts the MCP logger to the { info, warn, error, debug, success } structure.\n * @param {Object} log - The MCP logger instance.\n * @returns {Object} - The logger wrapper object.\n */\nfunction createLogWrapper(log) {\n\treturn {\n\t\tinfo: (message, ...args) => log.info(message, ...args),\n\t\twarn: (message, ...args) => log.warn(message, ...args),\n\t\terror: (message, ...args) => log.error(message, ...args),\n\t\t// Handle optional debug method\n\t\tdebug: (message, ...args) =>\n\t\t\tlog.debug ? log.debug(message, ...args) : null,\n\t\t// Map success to info as a common fallback\n\t\tsuccess: (message, ...args) => log.info(message, ...args)\n\t};\n}\n\n/**\n * Resolves and normalizes a project root path from various formats.\n * Handles URI encoding, Windows paths, and file protocols.\n * @param {string | undefined | null} rawPath - The raw project root path.\n * @param {object} [log] - Optional logger object.\n * @returns {string | null} Normalized absolute path or null if input is invalid/empty.\n */\nfunction normalizeProjectRoot(rawPath, log) {\n\tif (!rawPath) return null;\n\ttry {\n\t\tlet pathString = Array.isArray(rawPath) ? rawPath[0] : String(rawPath);\n\t\tif (!pathString) return null;\n\n\t\t// 1. Decode URI Encoding\n\t\t// Use try-catch for decoding as malformed URIs can throw\n\t\ttry {\n\t\t\tpathString = decodeURIComponent(pathString);\n\t\t} catch (decodeError) {\n\t\t\tif (log)\n\t\t\t\tlog.warn(\n\t\t\t\t\t`Could not decode URI component for path \"${rawPath}\": ${decodeError.message}. Proceeding with raw string.`\n\t\t\t\t);\n\t\t\t// Proceed with the original string if decoding fails\n\t\t\tpathString = Array.isArray(rawPath) ? rawPath[0] : String(rawPath);\n\t\t}\n\n\t\t// 2. Strip file:// prefix (handle 2 or 3 slashes)\n\t\tif (pathString.startsWith('file:///')) {\n\t\t\tpathString = pathString.slice(7); // Slice 7 for file:///, may leave leading / on Windows\n\t\t} else if (pathString.startsWith('file://')) {\n\t\t\tpathString = pathString.slice(7); // Slice 7 for file://\n\t\t}\n\n\t\t// 3. Handle potential Windows leading slash after stripping prefix (e.g., /C:/...)\n\t\t// This checks if it starts with / followed by a drive letter C: D: etc.\n\t\tif (\n\t\t\tpathString.startsWith('/') &&\n\t\t\t/[A-Za-z]:/.test(pathString.substring(1, 3))\n\t\t) {\n\t\t\tpathString = pathString.substring(1); // Remove the leading slash\n\t\t}\n\n\t\t// 4. Normalize backslashes to forward slashes\n\t\tpathString = pathString.replace(/\\\\/g, '/');\n\n\t\t// 5. Resolve to absolute path using server's OS convention\n\t\tconst resolvedPath = path.resolve(pathString);\n\t\treturn resolvedPath;\n\t} catch (error) {\n\t\tif (log) {\n\t\t\tlog.error(\n\t\t\t\t`Error normalizing project root path \"${rawPath}\": ${error.message}`\n\t\t\t);\n\t\t}\n\t\treturn null; // Return null on error\n\t}\n}\n\n/**\n * Extracts the raw project root path from the session (without normalization).\n * Used as a fallback within the HOF.\n * @param {Object} session - The MCP session object.\n * @param {Object} log - The MCP logger object.\n * @returns {string|null} The raw path string or null.\n */\nfunction getRawProjectRootFromSession(session, log) {\n\ttry {\n\t\t// Check primary location\n\t\tif (session?.roots?.[0]?.uri) {\n\t\t\treturn session.roots[0].uri;\n\t\t}\n\t\t// Check alternate location\n\t\telse if (session?.roots?.roots?.[0]?.uri) {\n\t\t\treturn session.roots.roots[0].uri;\n\t\t}\n\t\treturn null; // Not found in expected session locations\n\t} catch (e) {\n\t\tlog.error(`Error accessing session roots: ${e.message}`);\n\t\treturn null;\n\t}\n}\n\n/**\n * Higher-order function to wrap MCP tool execute methods.\n * Ensures args.projectRoot is present and normalized before execution.\n * Uses TASK_MASTER_PROJECT_ROOT environment variable with proper precedence.\n * @param {Function} executeFn - The original async execute(args, context) function.\n * @returns {Function} The wrapped async execute function.\n */\nfunction withNormalizedProjectRoot(executeFn) {\n\treturn async (args, context) => {\n\t\tconst { log, session } = context;\n\t\tlet normalizedRoot = null;\n\t\tlet rootSource = 'unknown';\n\n\t\ttry {\n\t\t\t// PRECEDENCE ORDER:\n\t\t\t// 1. TASK_MASTER_PROJECT_ROOT environment variable (from process.env or session)\n\t\t\t// 2. args.projectRoot (explicitly provided)\n\t\t\t// 3. Session-based project root resolution\n\t\t\t// 4. Current directory fallback\n\n\t\t\t// 1. Check for TASK_MASTER_PROJECT_ROOT environment variable first\n\t\t\tif (process.env.TASK_MASTER_PROJECT_ROOT) {\n\t\t\t\tconst envRoot = process.env.TASK_MASTER_PROJECT_ROOT;\n\t\t\t\tnormalizedRoot = path.isAbsolute(envRoot)\n\t\t\t\t\t? envRoot\n\t\t\t\t\t: path.resolve(process.cwd(), envRoot);\n\t\t\t\trootSource = 'TASK_MASTER_PROJECT_ROOT environment variable';\n\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t}\n\t\t\t// Also check session environment variables for TASK_MASTER_PROJECT_ROOT\n\t\t\telse if (session?.env?.TASK_MASTER_PROJECT_ROOT) {\n\t\t\t\tconst envRoot = session.env.TASK_MASTER_PROJECT_ROOT;\n\t\t\t\tnormalizedRoot = path.isAbsolute(envRoot)\n\t\t\t\t\t? envRoot\n\t\t\t\t\t: path.resolve(process.cwd(), envRoot);\n\t\t\t\trootSource = 'TASK_MASTER_PROJECT_ROOT session environment variable';\n\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t}\n\t\t\t// 2. If no environment variable, try args.projectRoot\n\t\t\telse if (args.projectRoot) {\n\t\t\t\tnormalizedRoot = normalizeProjectRoot(args.projectRoot, log);\n\t\t\t\trootSource = 'args.projectRoot';\n\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t}\n\t\t\t// 3. If no args.projectRoot, try session-based resolution\n\t\t\telse {\n\t\t\t\tconst sessionRoot = getProjectRootFromSession(session, log);\n\t\t\t\tif (sessionRoot) {\n\t\t\t\t\tnormalizedRoot = sessionRoot; // getProjectRootFromSession already normalizes\n\t\t\t\t\trootSource = 'session';\n\t\t\t\t\tlog.info(`Using project root from ${rootSource}: ${normalizedRoot}`);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (!normalizedRoot) {\n\t\t\t\tlog.error(\n\t\t\t\t\t'Could not determine project root from environment, args, or session.'\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t'Could not determine project root. Please provide projectRoot argument or ensure TASK_MASTER_PROJECT_ROOT environment variable is set.'\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Inject the normalized root back into args\n\t\t\tconst updatedArgs = { ...args, projectRoot: normalizedRoot };\n\n\t\t\t// Execute the original function with normalized root in args\n\t\t\treturn await executeFn(updatedArgs, context);\n\t\t} catch (error) {\n\t\t\tlog.error(\n\t\t\t\t`Error within withNormalizedProjectRoot HOF (Normalized Root: ${normalizedRoot}): ${error.message}`\n\t\t\t);\n\t\t\t// Add stack trace if available and debug enabled\n\t\t\tif (error.stack && log.debug) {\n\t\t\t\tlog.debug(error.stack);\n\t\t\t}\n\t\t\t// Return a generic error or re-throw depending on desired behavior\n\t\t\treturn createErrorResponse(`Operation failed: ${error.message}`);\n\t\t}\n\t};\n}\n\n// Ensure all functions are exported\nexport {\n\tgetProjectRoot,\n\tgetProjectRootFromSession,\n\tgetTagInfo,\n\thandleApiResult,\n\texecuteTaskMasterCommand,\n\tgetCachedOrExecute,\n\tprocessMCPResponseData,\n\tcreateContentResponse,\n\tcreateErrorResponse,\n\tcreateLogWrapper,\n\tnormalizeProjectRoot,\n\tgetRawProjectRootFromSession,\n\twithNormalizedProjectRoot\n};\n"], ["/claude-task-master/scripts/modules/task-manager/update-subtask-by-id.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport {\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\nimport {\n\tlog as consoleLog,\n\treadJSON,\n\twriteJSON,\n\ttruncate,\n\tisSilentMode,\n\tfindProjectRoot,\n\tflattenTasksWithSubtasks\n} from '../utils.js';\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\n\n/**\n * Update a subtask by appending additional timestamped information using the unified AI service.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} subtaskId - ID of the subtask to update in format \"parentId.subtaskId\"\n * @param {string} prompt - Prompt for generating additional information\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP server.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path (needed for AI service key resolution).\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json'). Automatically 'json' if mcpLog is present.\n * @returns {Promise<Object|null>} - The updated subtask or null if update failed.\n */\nasync function updateSubtaskById(\n\ttasksPath,\n\tsubtaskId,\n\tprompt,\n\tuseResearch = false,\n\tcontext = {},\n\toutputFormat = context.mcpLog ? 'json' : 'text'\n) {\n\tconst { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;\n\tconst logFn = mcpLog || consoleLog;\n\tconst isMCP = !!mcpLog;\n\n\t// Report helper\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\tlogFn(level, ...args);\n\t\t}\n\t};\n\n\tlet loadingIndicator = null;\n\n\ttry {\n\t\treport('info', `Updating subtask ${subtaskId} with prompt: \"${prompt}\"`);\n\n\t\tif (\n\t\t\t!subtaskId ||\n\t\t\ttypeof subtaskId !== 'string' ||\n\t\t\t!subtaskId.includes('.')\n\t\t) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format \"parentId.subtaskId\"`\n\t\t\t);\n\t\t}\n\n\t\tif (!prompt || typeof prompt !== 'string' || prompt.trim() === '') {\n\t\t\tthrow new Error(\n\t\t\t\t'Prompt cannot be empty. Please provide context for the subtask update.'\n\t\t\t);\n\t\t}\n\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\tthrow new Error(`Tasks file not found at path: ${tasksPath}`);\n\t\t}\n\n\t\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\t\tif (!projectRoot) {\n\t\t\tthrow new Error('Could not determine project root directory');\n\t\t}\n\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(\n\t\t\t\t`No valid tasks found in ${tasksPath}. The file may be corrupted or have an invalid format.`\n\t\t\t);\n\t\t}\n\n\t\tconst [parentIdStr, subtaskIdStr] = subtaskId.split('.');\n\t\tconst parentId = parseInt(parentIdStr, 10);\n\t\tconst subtaskIdNum = parseInt(subtaskIdStr, 10);\n\n\t\tif (\n\t\t\tNumber.isNaN(parentId) ||\n\t\t\tparentId <= 0 ||\n\t\t\tNumber.isNaN(subtaskIdNum) ||\n\t\t\tsubtaskIdNum <= 0\n\t\t) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid subtask ID format: ${subtaskId}. Both parent ID and subtask ID must be positive integers.`\n\t\t\t);\n\t\t}\n\n\t\tconst parentTask = data.tasks.find((task) => task.id === parentId);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(\n\t\t\t\t`Parent task with ID ${parentId} not found. Please verify the task ID and try again.`\n\t\t\t);\n\t\t}\n\n\t\tif (!parentTask.subtasks || !Array.isArray(parentTask.subtasks)) {\n\t\t\tthrow new Error(`Parent task ${parentId} has no subtasks.`);\n\t\t}\n\n\t\tconst subtaskIndex = parentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === subtaskIdNum\n\t\t);\n\t\tif (subtaskIndex === -1) {\n\t\t\tthrow new Error(\n\t\t\t\t`Subtask with ID ${subtaskId} not found. Please verify the subtask ID and try again.`\n\t\t\t);\n\t\t}\n\n\t\tconst subtask = parentTask.subtasks[subtaskIndex];\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update-subtask');\n\t\t\tconst searchQuery = `${parentTask.title} ${subtask.title} ${prompt}`;\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([subtaskId.toString(), ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\treport('warn', `Could not gather context: ${contextError.message}`);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\tif (outputFormat === 'text') {\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Status')\n\t\t\t\t],\n\t\t\t\tcolWidths: [10, 55, 10]\n\t\t\t});\n\t\t\ttable.push([\n\t\t\t\tsubtaskId,\n\t\t\t\ttruncate(subtask.title, 52),\n\t\t\t\tgetStatusWithColor(subtask.status)\n\t\t\t]);\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating Subtask #${subtaskId}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t})\n\t\t\t);\n\t\t\tconsole.log(table.toString());\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\tuseResearch\n\t\t\t\t\t? 'Updating subtask with research...'\n\t\t\t\t\t: 'Updating subtask...'\n\t\t\t);\n\t\t}\n\n\t\tlet generatedContentString = '';\n\t\tlet newlyAddedSnippet = '';\n\t\tlet aiServiceResponse = null;\n\n\t\ttry {\n\t\t\tconst parentContext = {\n\t\t\t\tid: parentTask.id,\n\t\t\t\ttitle: parentTask.title\n\t\t\t};\n\t\t\tconst prevSubtask =\n\t\t\t\tsubtaskIndex > 0\n\t\t\t\t\t? {\n\t\t\t\t\t\t\tid: `${parentTask.id}.${parentTask.subtasks[subtaskIndex - 1].id}`,\n\t\t\t\t\t\t\ttitle: parentTask.subtasks[subtaskIndex - 1].title,\n\t\t\t\t\t\t\tstatus: parentTask.subtasks[subtaskIndex - 1].status\n\t\t\t\t\t\t}\n\t\t\t\t\t: undefined;\n\t\t\tconst nextSubtask =\n\t\t\t\tsubtaskIndex < parentTask.subtasks.length - 1\n\t\t\t\t\t? {\n\t\t\t\t\t\t\tid: `${parentTask.id}.${parentTask.subtasks[subtaskIndex + 1].id}`,\n\t\t\t\t\t\t\ttitle: parentTask.subtasks[subtaskIndex + 1].title,\n\t\t\t\t\t\t\tstatus: parentTask.subtasks[subtaskIndex + 1].status\n\t\t\t\t\t\t}\n\t\t\t\t\t: undefined;\n\n\t\t\t// Build prompts using PromptManager\n\t\t\tconst promptManager = getPromptManager();\n\n\t\t\tconst promptParams = {\n\t\t\t\tparentTask: parentContext,\n\t\t\t\tprevSubtask: prevSubtask,\n\t\t\t\tnextSubtask: nextSubtask,\n\t\t\t\tcurrentDetails: subtask.details || '(No existing details)',\n\t\t\t\tupdatePrompt: prompt,\n\t\t\t\tuseResearch: useResearch,\n\t\t\t\tgatheredContext: gatheredContext || ''\n\t\t\t};\n\n\t\t\tconst variantKey = useResearch ? 'research' : 'default';\n\t\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t\t'update-subtask',\n\t\t\t\tpromptParams,\n\t\t\t\tvariantKey\n\t\t\t);\n\n\t\t\tconst role = useResearch ? 'research' : 'main';\n\t\t\treport('info', `Using AI text service with role: ${role}`);\n\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\trole,\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tmaxRetries: 2,\n\t\t\t\tcommandName: 'update-subtask',\n\t\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (\n\t\t\t\taiServiceResponse &&\n\t\t\t\taiServiceResponse.mainResult &&\n\t\t\t\ttypeof aiServiceResponse.mainResult === 'string'\n\t\t\t) {\n\t\t\t\tgeneratedContentString = aiServiceResponse.mainResult;\n\t\t\t} else {\n\t\t\t\tgeneratedContentString = '';\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'AI service response did not contain expected text string.'\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (outputFormat === 'text' && loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t} catch (aiError) {\n\t\t\treport('error', `AI service call failed: ${aiError.message}`);\n\t\t\tif (outputFormat === 'text' && loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t\tthrow aiError;\n\t\t}\n\n\t\tif (generatedContentString && generatedContentString.trim()) {\n\t\t\t// Check if the string is not empty\n\t\t\tconst timestamp = new Date().toISOString();\n\t\t\tconst formattedBlock = `<info added on ${timestamp}>\\n${generatedContentString.trim()}\\n</info added on ${timestamp}>`;\n\t\t\tnewlyAddedSnippet = formattedBlock; // <--- ADD THIS LINE: Store for display\n\n\t\t\tsubtask.details =\n\t\t\t\t(subtask.details ? subtask.details + '\\n' : '') + formattedBlock;\n\t\t} else {\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t'AI response was empty or whitespace after trimming. Original details remain unchanged.'\n\t\t\t);\n\t\t\tnewlyAddedSnippet = 'No new details were added by the AI.';\n\t\t}\n\n\t\tconst updatedSubtask = parentTask.subtasks[subtaskIndex];\n\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.log(\n\t\t\t\t'>>> DEBUG: Subtask details AFTER AI update:',\n\t\t\t\tupdatedSubtask.details\n\t\t\t);\n\t\t}\n\n\t\tif (updatedSubtask.description) {\n\t\t\tif (prompt.length < 100) {\n\t\t\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t'>>> DEBUG: Subtask description BEFORE append:',\n\t\t\t\t\t\tupdatedSubtask.description\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tupdatedSubtask.description += ` [Updated: ${new Date().toLocaleDateString()}]`;\n\t\t\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t'>>> DEBUG: Subtask description AFTER append:',\n\t\t\t\t\t\tupdatedSubtask.description\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.log('>>> DEBUG: About to call writeJSON with updated data...');\n\t\t}\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.log('>>> DEBUG: writeJSON call completed.');\n\t\t}\n\n\t\treport('success', `Successfully updated subtask ${subtaskId}`);\n\t\t// Updated function call to make sure if uncommented it will generate the task files for the updated subtask based on the tag\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t// \ttag: tag,\n\t\t// \tprojectRoot: projectRoot\n\t\t// });\n\n\t\tif (outputFormat === 'text') {\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(`Successfully updated subtask #${subtaskId}`) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white.bold('Title:') +\n\t\t\t\t\t\t' ' +\n\t\t\t\t\t\tupdatedSubtask.title +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white.bold('Newly Added Snippet:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(newlyAddedSnippet),\n\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t}\n\n\t\treturn {\n\t\t\tupdatedSubtask: updatedSubtask,\n\t\t\ttelemetryData: aiServiceResponse.telemetryData,\n\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t};\n\t} catch (error) {\n\t\tif (outputFormat === 'text' && loadingIndicator) {\n\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\tloadingIndicator = null;\n\t\t}\n\t\treport('error', `Error updating subtask: ${error.message}`);\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\tif (error.message?.includes('ANTHROPIC_API_KEY')) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow('\\nTo fix this issue, set your Anthropic API key:')\n\t\t\t\t);\n\t\t\t\tconsole.log(' export ANTHROPIC_API_KEY=your_api_key_here');\n\t\t\t} else if (error.message?.includes('PERPLEXITY_API_KEY')) {\n\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 1. Set your Perplexity API key: export PERPLEXITY_API_KEY=your_api_key_here'\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 2. Or run without the research flag: task-master update-subtask --id=<id> --prompt=\"...\"'\n\t\t\t\t);\n\t\t\t} else if (error.message?.includes('overloaded')) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'\\nAI model overloaded, and fallback failed or was unavailable:'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(' 1. Try again in a few minutes.');\n\t\t\t\tconsole.log(' 2. Ensure PERPLEXITY_API_KEY is set for fallback.');\n\t\t\t} else if (error.message?.includes('not found')) {\n\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 1. Run task-master list --with-subtasks to see all available subtask IDs'\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\t' 2. Use a valid subtask ID with the --id parameter in format \"parentId.subtaskId\"'\n\t\t\t\t);\n\t\t\t} else if (\n\t\t\t\terror.message?.includes('empty stream response') ||\n\t\t\t\terror.message?.includes('AI did not return a valid text string')\n\t\t\t) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'\\nThe AI model returned an empty or invalid response. This might be due to the prompt or API issues. Try rephrasing or trying again later.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\tif (getDebugFlag(session)) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t} else {\n\t\t\tthrow error;\n\t\t}\n\t\treturn null;\n\t}\n}\n\nexport default updateSubtaskById;\n"], ["/claude-task-master/scripts/modules/commands.js", "/**\n * commands.js\n * Command-line interface for the Task Master CLI\n */\n\nimport { program } from 'commander';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport fs from 'fs';\nimport https from 'https';\nimport http from 'http';\nimport inquirer from 'inquirer';\nimport search from '@inquirer/search';\nimport ora from 'ora'; // Import ora\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tgetCurrentTag,\n\tdetectCamelCaseFlags,\n\ttoKebabCase\n} from './utils.js';\nimport {\n\tparsePRD,\n\tupdateTasks,\n\tgenerateTaskFiles,\n\tsetTaskStatus,\n\tlistTasks,\n\texpandTask,\n\texpandAllTasks,\n\tclearSubtasks,\n\taddTask,\n\taddSubtask,\n\tremoveSubtask,\n\tanalyzeTaskComplexity,\n\tupdateTaskById,\n\tupdateSubtaskById,\n\tremoveTask,\n\tfindTaskById,\n\ttaskExists,\n\tmoveTask,\n\tmigrateProject,\n\tsetResponseLanguage\n} from './task-manager.js';\n\nimport {\n\tcreateTag,\n\tdeleteTag,\n\ttags,\n\tuseTag,\n\trenameTag,\n\tcopyTag\n} from './task-manager/tag-management.js';\n\nimport {\n\taddDependency,\n\tremoveDependency,\n\tvalidateDependenciesCommand,\n\tfixDependenciesCommand\n} from './dependency-manager.js';\n\nimport {\n\tisApiKeySet,\n\tgetDebugFlag,\n\tgetConfig,\n\twriteConfig,\n\tConfigurationError,\n\tisConfigFilePresent,\n\tgetAvailableModels,\n\tgetBaseUrlForRole,\n\tgetDefaultNumTasks,\n\tgetDefaultSubtasks\n} from './config-manager.js';\n\nimport { CUSTOM_PROVIDERS } from '../../src/constants/providers.js';\n\nimport {\n\tCOMPLEXITY_REPORT_FILE,\n\tTASKMASTER_TASKS_FILE,\n\tTASKMASTER_DOCS_DIR\n} from '../../src/constants/paths.js';\n\nimport { initTaskMaster } from '../../src/task-master.js';\n\nimport {\n\tdisplayBanner,\n\tdisplayHelp,\n\tdisplayNextTask,\n\tdisplayTaskById,\n\tdisplayComplexityReport,\n\tgetStatusWithColor,\n\tconfirmTaskOverwrite,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayModelConfiguration,\n\tdisplayAvailableModels,\n\tdisplayApiKeyStatus,\n\tdisplayAiUsageSummary,\n\tdisplayMultipleTasksSummary,\n\tdisplayTaggedTasksFYI,\n\tdisplayCurrentTagIndicator\n} from './ui.js';\nimport {\n\tconfirmProfilesRemove,\n\tconfirmRemoveAllRemainingProfiles\n} from '../../src/ui/confirm.js';\nimport {\n\twouldRemovalLeaveNoProfiles,\n\tgetInstalledProfiles\n} from '../../src/utils/profiles.js';\n\nimport { initializeProject } from '../init.js';\nimport {\n\tgetModelConfiguration,\n\tgetAvailableModelsList,\n\tsetModel,\n\tgetApiKeyStatusReport\n} from './task-manager/models.js';\nimport {\n\tisValidTaskStatus,\n\tTASK_STATUS_OPTIONS\n} from '../../src/constants/task-status.js';\nimport {\n\tisValidRulesAction,\n\tRULES_ACTIONS,\n\tRULES_SETUP_ACTION\n} from '../../src/constants/rules-actions.js';\nimport { getTaskMasterVersion } from '../../src/utils/getVersion.js';\nimport { syncTasksToReadme } from './sync-readme.js';\nimport { RULE_PROFILES } from '../../src/constants/profiles.js';\nimport {\n\tconvertAllRulesToProfileRules,\n\tremoveProfileRules,\n\tisValidProfile,\n\tgetRulesProfile\n} from '../../src/utils/rule-transformer.js';\nimport {\n\trunInteractiveProfilesSetup,\n\tgenerateProfileSummary,\n\tcategorizeProfileResults,\n\tgenerateProfileRemovalSummary,\n\tcategorizeRemovalResults\n} from '../../src/utils/profiles.js';\n\n/**\n * Runs the interactive setup process for model configuration.\n * @param {string|null} projectRoot - The resolved project root directory.\n */\nasync function runInteractiveSetup(projectRoot) {\n\tif (!projectRoot) {\n\t\tconsole.error(\n\t\t\tchalk.red(\n\t\t\t\t'Error: Could not determine project root for interactive setup.'\n\t\t\t)\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\tconst currentConfigResult = await getModelConfiguration({ projectRoot });\n\tconst currentModels = currentConfigResult.success\n\t\t? currentConfigResult.data.activeModels\n\t\t: { main: null, research: null, fallback: null };\n\t// Handle potential config load failure gracefully for the setup flow\n\tif (\n\t\t!currentConfigResult.success &&\n\t\tcurrentConfigResult.error?.code !== 'CONFIG_MISSING'\n\t) {\n\t\tconsole.warn(\n\t\t\tchalk.yellow(\n\t\t\t\t`Warning: Could not load current model configuration: ${currentConfigResult.error?.message || 'Unknown error'}. Proceeding with defaults.`\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to fetch OpenRouter models (duplicated for CLI context)\n\tfunction fetchOpenRouterModelsCLI() {\n\t\treturn new Promise((resolve) => {\n\t\t\tconst options = {\n\t\t\t\thostname: 'openrouter.ai',\n\t\t\t\tpath: '/api/v1/models',\n\t\t\t\tmethod: 'GET',\n\t\t\t\theaders: {\n\t\t\t\t\tAccept: 'application/json'\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tconst req = https.request(options, (res) => {\n\t\t\t\tlet data = '';\n\t\t\t\tres.on('data', (chunk) => {\n\t\t\t\t\tdata += chunk;\n\t\t\t\t});\n\t\t\t\tres.on('end', () => {\n\t\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\t\tresolve(parsedData.data || []); // Return the array of models\n\t\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\t\tconsole.error('Error parsing OpenRouter response:', e);\n\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t`OpenRouter API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\n\t\t\treq.on('error', (e) => {\n\t\t\t\tconsole.error('Error fetching OpenRouter models:', e);\n\t\t\t\tresolve(null); // Indicate failure\n\t\t\t});\n\t\t\treq.end();\n\t\t});\n\t}\n\n\t// Helper function to fetch Ollama models (duplicated for CLI context)\n\tfunction fetchOllamaModelsCLI(baseURL = 'http://localhost:11434/api') {\n\t\treturn new Promise((resolve) => {\n\t\t\ttry {\n\t\t\t\t// Parse the base URL to extract hostname, port, and base path\n\t\t\t\tconst url = new URL(baseURL);\n\t\t\t\tconst isHttps = url.protocol === 'https:';\n\t\t\t\tconst port = url.port || (isHttps ? 443 : 80);\n\t\t\t\tconst basePath = url.pathname.endsWith('/')\n\t\t\t\t\t? url.pathname.slice(0, -1)\n\t\t\t\t\t: url.pathname;\n\n\t\t\t\tconst options = {\n\t\t\t\t\thostname: url.hostname,\n\t\t\t\t\tport: parseInt(port, 10),\n\t\t\t\t\tpath: `${basePath}/tags`,\n\t\t\t\t\tmethod: 'GET',\n\t\t\t\t\theaders: {\n\t\t\t\t\t\tAccept: 'application/json'\n\t\t\t\t\t}\n\t\t\t\t};\n\n\t\t\t\tconst requestLib = isHttps ? https : http;\n\t\t\t\tconst req = requestLib.request(options, (res) => {\n\t\t\t\t\tlet data = '';\n\t\t\t\t\tres.on('data', (chunk) => {\n\t\t\t\t\t\tdata += chunk;\n\t\t\t\t\t});\n\t\t\t\t\tres.on('end', () => {\n\t\t\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\t\t\tresolve(parsedData.models || []); // Return the array of models\n\t\t\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\t\t\tconsole.error('Error parsing Ollama response:', e);\n\t\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t\t`Ollama API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t});\n\n\t\t\t\treq.on('error', (e) => {\n\t\t\t\t\tconsole.error('Error fetching Ollama models:', e);\n\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t});\n\t\t\t\treq.end();\n\t\t\t} catch (e) {\n\t\t\t\tconsole.error('Error parsing Ollama base URL:', e);\n\t\t\t\tresolve(null); // Indicate failure\n\t\t\t}\n\t\t});\n\t}\n\n\t// Helper to get choices and default index for a role\n\tconst getPromptData = (role, allowNone = false) => {\n\t\tconst currentModel = currentModels[role]; // Use the fetched data\n\t\tconst allModelsRaw = getAvailableModels(); // Get all available models\n\n\t\t// Manually group models by provider\n\t\tconst modelsByProvider = allModelsRaw.reduce((acc, model) => {\n\t\t\tif (!acc[model.provider]) {\n\t\t\t\tacc[model.provider] = [];\n\t\t\t}\n\t\t\tacc[model.provider].push(model);\n\t\t\treturn acc;\n\t\t}, {});\n\n\t\tconst cancelOption = { name: '⏹ Cancel Model Setup', value: '__CANCEL__' }; // Symbol updated\n\t\tconst noChangeOption = currentModel?.modelId\n\t\t\t? {\n\t\t\t\t\tname: `✔ No change to current ${role} model (${currentModel.modelId})`, // Symbol updated\n\t\t\t\t\tvalue: '__NO_CHANGE__'\n\t\t\t\t}\n\t\t\t: null;\n\n\t\t// Define custom provider options\n\t\tconst customProviderOptions = [\n\t\t\t{ name: '* Custom OpenRouter model', value: '__CUSTOM_OPENROUTER__' },\n\t\t\t{ name: '* Custom Ollama model', value: '__CUSTOM_OLLAMA__' },\n\t\t\t{ name: '* Custom Bedrock model', value: '__CUSTOM_BEDROCK__' },\n\t\t\t{ name: '* Custom Azure model', value: '__CUSTOM_AZURE__' },\n\t\t\t{ name: '* Custom Vertex model', value: '__CUSTOM_VERTEX__' }\n\t\t];\n\n\t\tlet choices = [];\n\t\tlet defaultIndex = 0; // Default to 'Cancel'\n\n\t\t// Filter and format models allowed for this role using the manually grouped data\n\t\tconst roleChoices = Object.entries(modelsByProvider)\n\t\t\t.map(([provider, models]) => {\n\t\t\t\tconst providerModels = models\n\t\t\t\t\t.filter((m) => m.allowed_roles.includes(role))\n\t\t\t\t\t.map((m) => ({\n\t\t\t\t\t\tname: `${provider} / ${m.id} ${\n\t\t\t\t\t\t\tm.cost_per_1m_tokens\n\t\t\t\t\t\t\t\t? chalk.gray(\n\t\t\t\t\t\t\t\t\t\t`($${m.cost_per_1m_tokens.input.toFixed(2)} input | $${m.cost_per_1m_tokens.output.toFixed(2)} output)`\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t: ''\n\t\t\t\t\t\t}`,\n\t\t\t\t\t\tvalue: { id: m.id, provider },\n\t\t\t\t\t\tshort: `${provider}/${m.id}`\n\t\t\t\t\t}));\n\t\t\t\tif (providerModels.length > 0) {\n\t\t\t\t\treturn [...providerModels];\n\t\t\t\t}\n\t\t\t\treturn null;\n\t\t\t})\n\t\t\t.filter(Boolean)\n\t\t\t.flat();\n\n\t\t// Find the index of the currently selected model for setting the default\n\t\tlet currentChoiceIndex = -1;\n\t\tif (currentModel?.modelId && currentModel?.provider) {\n\t\t\tcurrentChoiceIndex = roleChoices.findIndex(\n\t\t\t\t(choice) =>\n\t\t\t\t\ttypeof choice.value === 'object' &&\n\t\t\t\t\tchoice.value.id === currentModel.modelId &&\n\t\t\t\t\tchoice.value.provider === currentModel.provider\n\t\t\t);\n\t\t}\n\n\t\t// Construct final choices list with custom options moved to bottom\n\t\tconst systemOptions = [];\n\t\tif (noChangeOption) {\n\t\t\tsystemOptions.push(noChangeOption);\n\t\t}\n\t\tsystemOptions.push(cancelOption);\n\n\t\tconst systemLength = systemOptions.length;\n\n\t\tif (allowNone) {\n\t\t\tchoices = [\n\t\t\t\t...systemOptions,\n\t\t\t\tnew inquirer.Separator('\\n── Standard Models ──'),\n\t\t\t\t{ name: '⚪ None (disable)', value: null },\n\t\t\t\t...roleChoices,\n\t\t\t\tnew inquirer.Separator('\\n── Custom Providers ──'),\n\t\t\t\t...customProviderOptions\n\t\t\t];\n\t\t\t// Adjust default index: System + Sep1 + None (+2)\n\t\t\tconst noneOptionIndex = systemLength + 1;\n\t\t\tdefaultIndex =\n\t\t\t\tcurrentChoiceIndex !== -1\n\t\t\t\t\t? currentChoiceIndex + systemLength + 2 // Offset by system options and separators\n\t\t\t\t\t: noneOptionIndex; // Default to 'None' if no current model matched\n\t\t} else {\n\t\t\tchoices = [\n\t\t\t\t...systemOptions,\n\t\t\t\tnew inquirer.Separator('\\n── Standard Models ──'),\n\t\t\t\t...roleChoices,\n\t\t\t\tnew inquirer.Separator('\\n── Custom Providers ──'),\n\t\t\t\t...customProviderOptions\n\t\t\t];\n\t\t\t// Adjust default index: System + Sep (+1)\n\t\t\tdefaultIndex =\n\t\t\t\tcurrentChoiceIndex !== -1\n\t\t\t\t\t? currentChoiceIndex + systemLength + 1 // Offset by system options and separator\n\t\t\t\t\t: noChangeOption\n\t\t\t\t\t\t? 1\n\t\t\t\t\t\t: 0; // Default to 'No Change' if present, else 'Cancel'\n\t\t}\n\n\t\t// Ensure defaultIndex is valid within the final choices array length\n\t\tif (defaultIndex < 0 || defaultIndex >= choices.length) {\n\t\t\t// If default calculation failed or pointed outside bounds, reset intelligently\n\t\t\tdefaultIndex = 0; // Default to 'Cancel'\n\t\t\tconsole.warn(\n\t\t\t\t`Warning: Could not determine default model for role '${role}'. Defaulting to 'Cancel'.`\n\t\t\t); // Add warning\n\t\t}\n\n\t\treturn { choices, default: defaultIndex };\n\t};\n\n\t// --- Generate choices using the helper ---\n\tconst mainPromptData = getPromptData('main');\n\tconst researchPromptData = getPromptData('research');\n\tconst fallbackPromptData = getPromptData('fallback', true); // Allow 'None' for fallback\n\n\t// Display helpful intro message\n\tconsole.log(chalk.cyan('\\n🎯 Interactive Model Setup'));\n\tconsole.log(chalk.gray('━'.repeat(50)));\n\tconsole.log(chalk.yellow('💡 Navigation tips:'));\n\tconsole.log(chalk.gray(' • Type to search and filter options'));\n\tconsole.log(chalk.gray(' • Use ↑↓ arrow keys to navigate results'));\n\tconsole.log(\n\t\tchalk.gray(\n\t\t\t' • Standard models are listed first, custom providers at bottom'\n\t\t)\n\t);\n\tconsole.log(chalk.gray(' • Press Enter to select\\n'));\n\n\t// Helper function to create search source for models\n\tconst createSearchSource = (choices, defaultValue) => {\n\t\treturn (searchTerm = '') => {\n\t\t\tconst filteredChoices = choices.filter((choice) => {\n\t\t\t\tif (choice.type === 'separator') return true; // Always show separators\n\t\t\t\tconst searchText = choice.name || '';\n\t\t\t\treturn searchText.toLowerCase().includes(searchTerm.toLowerCase());\n\t\t\t});\n\t\t\treturn Promise.resolve(filteredChoices);\n\t\t};\n\t};\n\n\tconst answers = {};\n\n\t// Main model selection\n\tanswers.mainModel = await search({\n\t\tmessage: 'Select the main model for generation/updates:',\n\t\tsource: createSearchSource(mainPromptData.choices, mainPromptData.default),\n\t\tpageSize: 15\n\t});\n\n\tif (answers.mainModel !== '__CANCEL__') {\n\t\t// Research model selection\n\t\tanswers.researchModel = await search({\n\t\t\tmessage: 'Select the research model:',\n\t\t\tsource: createSearchSource(\n\t\t\t\tresearchPromptData.choices,\n\t\t\t\tresearchPromptData.default\n\t\t\t),\n\t\t\tpageSize: 15\n\t\t});\n\n\t\tif (answers.researchModel !== '__CANCEL__') {\n\t\t\t// Fallback model selection\n\t\t\tanswers.fallbackModel = await search({\n\t\t\t\tmessage: 'Select the fallback model (optional):',\n\t\t\t\tsource: createSearchSource(\n\t\t\t\t\tfallbackPromptData.choices,\n\t\t\t\t\tfallbackPromptData.default\n\t\t\t\t),\n\t\t\t\tpageSize: 15\n\t\t\t});\n\t\t}\n\t}\n\n\tlet setupSuccess = true;\n\tlet setupConfigModified = false;\n\tconst coreOptionsSetup = { projectRoot }; // Pass root for setup actions\n\n\t// Helper to handle setting a model (including custom)\n\tasync function handleSetModel(role, selectedValue, currentModelId) {\n\t\tif (selectedValue === '__CANCEL__') {\n\t\t\tconsole.log(\n\t\t\t\tchalk.yellow(`\\nSetup canceled during ${role} model selection.`)\n\t\t\t);\n\t\t\tsetupSuccess = false; // Also mark success as false on cancel\n\t\t\treturn false; // Indicate cancellation\n\t\t}\n\n\t\t// Handle the new 'No Change' option\n\t\tif (selectedValue === '__NO_CHANGE__') {\n\t\t\tconsole.log(chalk.gray(`No change selected for ${role} model.`));\n\t\t\treturn true; // Indicate success, continue setup\n\t\t}\n\n\t\tlet modelIdToSet = null;\n\t\tlet providerHint = null;\n\t\tlet isCustomSelection = false;\n\n\t\tif (selectedValue === '__CUSTOM_OPENROUTER__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom OpenRouter Model ID for the ${role} role:`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.OPENROUTER;\n\t\t\t// Validate against live OpenRouter list\n\t\t\tconst openRouterModels = await fetchOpenRouterModelsCLI();\n\t\t\tif (\n\t\t\t\t!openRouterModels ||\n\t\t\t\t!openRouterModels.some((m) => m.id === modelIdToSet)\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Model ID \"${modelIdToSet}\" not found in the live OpenRouter model list. Please check the ID.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\t\t} else if (selectedValue === '__CUSTOM_OLLAMA__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Ollama Model ID for the ${role} role:`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.OLLAMA;\n\t\t\t// Get the Ollama base URL from config for this role\n\t\t\tconst ollamaBaseURL = getBaseUrlForRole(role, projectRoot);\n\t\t\t// Validate against live Ollama list\n\t\t\tconst ollamaModels = await fetchOllamaModelsCLI(ollamaBaseURL);\n\t\t\tif (ollamaModels === null) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t} else if (!ollamaModels.some((m) => m.model === modelIdToSet)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Model ID \"${modelIdToSet}\" not found in the Ollama instance. Please verify the model is pulled and available.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`You can check available models with: curl ${ollamaBaseURL}/tags`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\t\t} else if (selectedValue === '__CUSTOM_BEDROCK__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Bedrock Model ID for the ${role} role (e.g., anthropic.claude-3-sonnet-20240229-v1:0):`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.BEDROCK;\n\n\t\t\t// Check if AWS environment variables exist\n\t\t\tif (\n\t\t\t\t!process.env.AWS_ACCESS_KEY_ID ||\n\t\t\t\t!process.env.AWS_SECRET_ACCESS_KEY\n\t\t\t) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'Warning: AWS_ACCESS_KEY_ID and/or AWS_SECRET_ACCESS_KEY environment variables are missing. Will fallback to system configuration. (ex: aws config files or ec2 instance profiles)'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Custom Bedrock model \"${modelIdToSet}\" will be used. No validation performed.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else if (selectedValue === '__CUSTOM_AZURE__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Azure OpenAI Model ID for the ${role} role (e.g., gpt-4o):`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.AZURE;\n\n\t\t\t// Check if Azure environment variables exist\n\t\t\tif (\n\t\t\t\t!process.env.AZURE_OPENAI_API_KEY ||\n\t\t\t\t!process.env.AZURE_OPENAI_ENDPOINT\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: AZURE_OPENAI_API_KEY and/or AZURE_OPENAI_ENDPOINT environment variables are missing. Please set them before using custom Azure models.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Custom Azure OpenAI model \"${modelIdToSet}\" will be used. No validation performed.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else if (selectedValue === '__CUSTOM_VERTEX__') {\n\t\t\tisCustomSelection = true;\n\t\t\tconst { customId } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'input',\n\t\t\t\t\tname: 'customId',\n\t\t\t\t\tmessage: `Enter the custom Vertex AI Model ID for the ${role} role (e.g., gemini-1.5-pro-002):`\n\t\t\t\t}\n\t\t\t]);\n\t\t\tif (!customId) {\n\t\t\t\tconsole.log(chalk.yellow('No custom ID entered. Skipping role.'));\n\t\t\t\treturn true; // Continue setup, but don't set this role\n\t\t\t}\n\t\t\tmodelIdToSet = customId;\n\t\t\tproviderHint = CUSTOM_PROVIDERS.VERTEX;\n\n\t\t\t// Check if Google/Vertex environment variables exist\n\t\t\tif (\n\t\t\t\t!process.env.GOOGLE_API_KEY &&\n\t\t\t\t!process.env.GOOGLE_APPLICATION_CREDENTIALS\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Either GOOGLE_API_KEY or GOOGLE_APPLICATION_CREDENTIALS environment variable is required. Please set one before using custom Vertex models.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tsetupSuccess = false;\n\t\t\t\treturn true; // Continue setup, but mark as failed\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Custom Vertex AI model \"${modelIdToSet}\" will be used. No validation performed.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else if (\n\t\t\tselectedValue &&\n\t\t\ttypeof selectedValue === 'object' &&\n\t\t\tselectedValue.id\n\t\t) {\n\t\t\t// Standard model selected from list\n\t\t\tmodelIdToSet = selectedValue.id;\n\t\t\tproviderHint = selectedValue.provider; // Provider is known\n\t\t} else if (selectedValue === null && role === 'fallback') {\n\t\t\t// Handle disabling fallback\n\t\t\tmodelIdToSet = null;\n\t\t\tproviderHint = null;\n\t\t} else if (selectedValue) {\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(\n\t\t\t\t\t`Internal Error: Unexpected selection value for ${role}: ${JSON.stringify(selectedValue)}`\n\t\t\t\t)\n\t\t\t);\n\t\t\tsetupSuccess = false;\n\t\t\treturn true;\n\t\t}\n\n\t\t// Only proceed if there's a change to be made\n\t\tif (modelIdToSet !== currentModelId) {\n\t\t\tif (modelIdToSet) {\n\t\t\t\t// Set a specific model (standard or custom)\n\t\t\t\tconst result = await setModel(role, modelIdToSet, {\n\t\t\t\t\t...coreOptionsSetup,\n\t\t\t\t\tproviderHint // Pass the hint\n\t\t\t\t});\n\t\t\t\tif (result.success) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`Set ${role} model: ${result.data.provider} / ${result.data.modelId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tif (result.data.warning) {\n\t\t\t\t\t\t// Display warning if returned by setModel\n\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t}\n\t\t\t\t\tsetupConfigModified = true;\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error setting ${role} model: ${result.error?.message || 'Unknown'}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tsetupSuccess = false;\n\t\t\t\t}\n\t\t\t} else if (role === 'fallback') {\n\t\t\t\t// Disable fallback model\n\t\t\t\tconst currentCfg = getConfig(projectRoot);\n\t\t\t\tif (currentCfg?.models?.fallback?.modelId) {\n\t\t\t\t\t// Check if it was actually set before clearing\n\t\t\t\t\tcurrentCfg.models.fallback = {\n\t\t\t\t\t\t...currentCfg.models.fallback,\n\t\t\t\t\t\tprovider: undefined,\n\t\t\t\t\t\tmodelId: undefined\n\t\t\t\t\t};\n\t\t\t\t\tif (writeConfig(currentCfg, projectRoot)) {\n\t\t\t\t\t\tconsole.log(chalk.blue('Fallback model disabled.'));\n\t\t\t\t\t\tsetupConfigModified = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red('Failed to disable fallback model in config file.')\n\t\t\t\t\t\t);\n\t\t\t\t\t\tsetupSuccess = false;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tconsole.log(chalk.blue('Fallback model was already disabled.'));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true; // Indicate setup should continue\n\t}\n\n\t// Process answers using the handler\n\tif (\n\t\t!(await handleSetModel(\n\t\t\t'main',\n\t\t\tanswers.mainModel,\n\t\t\tcurrentModels.main?.modelId // <--- Now 'currentModels' is defined\n\t\t))\n\t) {\n\t\treturn false; // Explicitly return false if cancelled\n\t}\n\tif (\n\t\t!(await handleSetModel(\n\t\t\t'research',\n\t\t\tanswers.researchModel,\n\t\t\tcurrentModels.research?.modelId // <--- Now 'currentModels' is defined\n\t\t))\n\t) {\n\t\treturn false; // Explicitly return false if cancelled\n\t}\n\tif (\n\t\t!(await handleSetModel(\n\t\t\t'fallback',\n\t\t\tanswers.fallbackModel,\n\t\t\tcurrentModels.fallback?.modelId // <--- Now 'currentModels' is defined\n\t\t))\n\t) {\n\t\treturn false; // Explicitly return false if cancelled\n\t}\n\n\tif (setupSuccess && setupConfigModified) {\n\t\tconsole.log(chalk.green.bold('\\nModel setup complete!'));\n\t} else if (setupSuccess && !setupConfigModified) {\n\t\tconsole.log(chalk.yellow('\\nNo changes made to model configuration.'));\n\t} else if (!setupSuccess) {\n\t\tconsole.error(\n\t\t\tchalk.red(\n\t\t\t\t'\\nErrors occurred during model selection. Please review and try again.'\n\t\t\t)\n\t\t);\n\t}\n\treturn true; // Indicate setup flow completed (not cancelled)\n\t// Let the main command flow continue to display results\n}\n\n/**\n * Configure and register CLI commands\n * @param {Object} program - Commander program instance\n */\nfunction registerCommands(programInstance) {\n\t// Add global error handler for unknown options\n\tprogramInstance.on('option:unknown', function (unknownOption) {\n\t\tconst commandName = this._name || 'unknown';\n\t\tconsole.error(chalk.red(`Error: Unknown option '${unknownOption}'`));\n\t\tconsole.error(\n\t\t\tchalk.yellow(\n\t\t\t\t`Run 'task-master ${commandName} --help' to see available options`\n\t\t\t)\n\t\t);\n\t\tprocess.exit(1);\n\t});\n\n\t// parse-prd command\n\tprogramInstance\n\t\t.command('parse-prd')\n\t\t.description('Parse a PRD file and generate tasks')\n\t\t.argument('[file]', 'Path to the PRD file')\n\t\t.option(\n\t\t\t'-i, --input <file>',\n\t\t\t'Path to the PRD file (alternative to positional argument)'\n\t\t)\n\t\t.option('-o, --output <file>', 'Output file path')\n\t\t.option(\n\t\t\t'-n, --num-tasks <number>',\n\t\t\t'Number of tasks to generate',\n\t\t\tgetDefaultNumTasks()\n\t\t)\n\t\t.option('-f, --force', 'Skip confirmation when overwriting existing tasks')\n\t\t.option(\n\t\t\t'--append',\n\t\t\t'Append new tasks to existing tasks.json instead of overwriting'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed task generation, providing more comprehensive and accurate task breakdown'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (file, options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tlet taskMaster;\n\t\t\ttry {\n\t\t\t\tconst initOptions = {\n\t\t\t\t\tprdPath: file || options.input || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t};\n\t\t\t\t// Only include tasksPath if output is explicitly specified\n\t\t\t\tif (options.output) {\n\t\t\t\t\tinitOptions.tasksPath = options.output;\n\t\t\t\t}\n\t\t\t\ttaskMaster = initTaskMaster(initOptions);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\t`${chalk.white.bold('Parse PRD Help')}\\n\\n${chalk.cyan('Usage:')}\\n task-master parse-prd <prd-file.txt> [options]\\n\\n${chalk.cyan('Options:')}\\n -i, --input <file> Path to the PRD file (alternative to positional argument)\\n -o, --output <file> Output file path (default: .taskmaster/tasks/tasks.json)\\n -n, --num-tasks <number> Number of tasks to generate (default: 10)\\n -f, --force Skip confirmation when overwriting existing tasks\\n --append Append new tasks to existing tasks.json instead of overwriting\\n -r, --research Use Perplexity AI for research-backed task generation\\n\\n${chalk.cyan('Example:')}\\n task-master parse-prd requirements.txt --num-tasks 15\\n task-master parse-prd --input=requirements.txt\\n task-master parse-prd --force\\n task-master parse-prd requirements_v2.txt --append\\n task-master parse-prd requirements.txt --research\\n\\n${chalk.yellow('Note: This command will:')}\\n 1. Look for a PRD file at ${TASKMASTER_DOCS_DIR}/PRD.md by default\\n 2. Use the file specified by --input or positional argument if provided\\n 3. Generate tasks from the PRD and either:\\n - Overwrite any existing tasks.json file (default)\\n - Append to existing tasks.json if --append is used`,\n\t\t\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.error(chalk.red(`\\nError: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconst numTasks = parseInt(options.numTasks, 10);\n\t\t\tconst force = options.force || false;\n\t\t\tconst append = options.append || false;\n\t\t\tconst research = options.research || false;\n\t\t\tlet useForce = force;\n\t\t\tconst useAppend = append;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t// Helper function to check if there are existing tasks in the target tag and confirm overwrite\n\t\t\tasync function confirmOverwriteIfNeeded() {\n\t\t\t\t// Check if there are existing tasks in the target tag\n\t\t\t\tlet hasExistingTasksInTag = false;\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\t\t\t\tif (fs.existsSync(tasksPath)) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\t// Read the entire file to check if the tag exists\n\t\t\t\t\t\tconst existingFileContent = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\t\t\tconst allData = JSON.parse(existingFileContent);\n\n\t\t\t\t\t\t// Check if the target tag exists and has tasks\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\tallData[tag] &&\n\t\t\t\t\t\t\tArray.isArray(allData[tag].tasks) &&\n\t\t\t\t\t\t\tallData[tag].tasks.length > 0\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\thasExistingTasksInTag = true;\n\t\t\t\t\t\t}\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t// If we can't read the file or parse it, assume no existing tasks in this tag\n\t\t\t\t\t\thasExistingTasksInTag = false;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Only show confirmation if there are existing tasks in the target tag\n\t\t\t\tif (hasExistingTasksInTag && !useForce && !useAppend) {\n\t\t\t\t\tconst overwrite = await confirmTaskOverwrite(tasksPath);\n\t\t\t\t\tif (!overwrite) {\n\t\t\t\t\t\tlog('info', 'Operation cancelled.');\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\t// If user confirms 'y', we should set useForce = true for the parsePRD call\n\t\t\t\t\t// Only overwrite if not appending\n\t\t\t\t\tuseForce = true;\n\t\t\t\t}\n\t\t\t\treturn true;\n\t\t\t}\n\n\t\t\tlet spinner;\n\n\t\t\ttry {\n\t\t\t\tif (!(await confirmOverwriteIfNeeded())) return;\n\n\t\t\t\tconsole.log(chalk.blue(`Parsing PRD file: ${taskMaster.getPrdPath()}`));\n\t\t\t\tconsole.log(chalk.blue(`Generating ${numTasks} tasks...`));\n\t\t\t\tif (append) {\n\t\t\t\t\tconsole.log(chalk.blue('Appending to existing tasks...'));\n\t\t\t\t}\n\t\t\t\tif (research) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t'Using Perplexity AI for research-backed task generation'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tspinner = ora('Parsing PRD and generating tasks...\\n').start();\n\t\t\t\t// Handle case where getTasksPath() returns null\n\t\t\t\tconst outputPath =\n\t\t\t\t\ttaskMaster.getTasksPath() ||\n\t\t\t\t\tpath.join(taskMaster.getProjectRoot(), TASKMASTER_TASKS_FILE);\n\t\t\t\tawait parsePRD(taskMaster.getPrdPath(), outputPath, numTasks, {\n\t\t\t\t\tappend: useAppend,\n\t\t\t\t\tforce: useForce,\n\t\t\t\t\tresearch: research,\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag: tag\n\t\t\t\t});\n\t\t\t\tspinner.succeed('Tasks generated successfully!');\n\t\t\t} catch (error) {\n\t\t\t\tif (spinner) {\n\t\t\t\t\tspinner.fail(`Error parsing PRD: ${error.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(chalk.red(`Error parsing PRD: ${error.message}`));\n\t\t\t\t}\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// update command\n\tprogramInstance\n\t\t.command('update')\n\t\t.description(\n\t\t\t'Update multiple tasks with ID >= \"from\" based on new information or implementation changes'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'--from <id>',\n\t\t\t'Task ID to start updating from (tasks with ID >= this value will be updated)',\n\t\t\t'1'\n\t\t)\n\t\t.option(\n\t\t\t'-p, --prompt <text>',\n\t\t\t'Prompt explaining the changes or new context (required)'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed task updates'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst fromId = parseInt(options.from, 10); // Validation happens here\n\t\t\tconst prompt = options.prompt;\n\t\t\tconst useResearch = options.research || false;\n\n\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t// Check if there's an 'id' option which is a common mistake (instead of 'from')\n\t\t\tif (\n\t\t\t\tprocess.argv.includes('--id') ||\n\t\t\t\tprocess.argv.some((arg) => arg.startsWith('--id='))\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: The update command uses --from=<id>, not --id=<id>')\n\t\t\t\t);\n\t\t\t\tconsole.log(chalk.yellow('\\nTo update multiple tasks:'));\n\t\t\t\tconsole.log(\n\t\t\t\t\t` task-master update --from=${fromId} --prompt=\"Your prompt here\"`\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'\\nTo update a single specific task, use the update-task command instead:'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\t` task-master update-task --id=<id> --prompt=\"Your prompt here\"`\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (!prompt) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: --prompt parameter is required. Please provide information about the changes.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Updating tasks from ID >= ${fromId} with prompt: \"${prompt}\"`\n\t\t\t\t)\n\t\t\t);\n\t\t\tconsole.log(chalk.blue(`Tasks file: ${tasksPath}`));\n\n\t\t\tif (useResearch) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue('Using Perplexity AI for research-backed task updates')\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Call core updateTasks, passing context for CLI\n\t\t\tawait updateTasks(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tfromId,\n\t\t\t\tprompt,\n\t\t\t\tuseResearch,\n\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag } // Pass context with projectRoot and tag\n\t\t\t);\n\t\t});\n\n\t// update-task command\n\tprogramInstance\n\t\t.command('update-task')\n\t\t.description(\n\t\t\t'Update a single specific task by ID with new information (use --id parameter)'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-i, --id <id>', 'Task ID to update (required)')\n\t\t.option(\n\t\t\t'-p, --prompt <text>',\n\t\t\t'Prompt explaining the changes or new context (required)'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed task updates'\n\t\t)\n\t\t.option(\n\t\t\t'--append',\n\t\t\t'Append timestamped information to task details instead of full update'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Resolve tag using standard pattern\n\t\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t\t// Show current tag context\n\t\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t\t// Validate required parameters\n\t\t\t\tif (!options.id) {\n\t\t\t\t\tconsole.error(chalk.red('Error: --id parameter is required'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-task --id=23 --prompt=\"Update with new information\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\t// Parse the task ID and validate it's a number\n\t\t\t\tconst taskId = parseInt(options.id, 10);\n\t\t\t\tif (Number.isNaN(taskId) || taskId <= 0) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error: Invalid task ID: ${options.id}. Task ID must be a positive integer.`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-task --id=23 --prompt=\"Update with new information\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tif (!options.prompt) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: --prompt parameter is required. Please provide information about the changes.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-task --id=23 --prompt=\"Update with new information\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst prompt = options.prompt;\n\t\t\t\tconst useResearch = options.research || false;\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tif (tasksPath === TASKMASTER_TASKS_FILE) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Hint: Run task-master init or task-master parse-prd to create tasks.json first'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Hint: Check if the file path is correct: ${tasksPath}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Updating task ${taskId} with prompt: \"${prompt}\"`)\n\t\t\t\t);\n\t\t\t\tconsole.log(chalk.blue(`Tasks file: ${tasksPath}`));\n\n\t\t\t\tif (useResearch) {\n\t\t\t\t\t// Verify Perplexity API key exists if using research\n\t\t\t\t\tif (!isApiKeySet('perplexity')) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow('Falling back to Claude AI for task update.')\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue('Using Perplexity AI for research-backed task update')\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateTaskById(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskId,\n\t\t\t\t\tprompt,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag },\n\t\t\t\t\t'text',\n\t\t\t\t\toptions.append || false\n\t\t\t\t);\n\n\t\t\t\t// If the task wasn't updated (e.g., if it was already marked as done)\n\t\t\t\tif (!result) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nTask update was not completed. Review the messages above for details.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\t\t// Provide more helpful error messages for common issues\n\t\t\t\tif (\n\t\t\t\t\terror.message.includes('task') &&\n\t\t\t\t\terror.message.includes('not found')\n\t\t\t\t) {\n\t\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t' 1. Run task-master list to see all available task IDs'\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(' 2. Use a valid task ID with the --id parameter');\n\t\t\t\t} else if (error.message.includes('API key')) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nThis error is related to API keys. Check your environment variables.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Use getDebugFlag getter instead of CONFIG.debug\n\t\t\t\tif (getDebugFlag()) {\n\t\t\t\t\tconsole.error(error);\n\t\t\t\t}\n\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// update-subtask command\n\tprogramInstance\n\t\t.command('update-subtask')\n\t\t.description(\n\t\t\t'Update a subtask by appending additional timestamped information'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id <id>',\n\t\t\t'Subtask ID to update in format \"parentId.subtaskId\" (required)'\n\t\t)\n\t\t.option(\n\t\t\t'-p, --prompt <text>',\n\t\t\t'Prompt explaining what information to add (required)'\n\t\t)\n\t\t.option('-r, --research', 'Use Perplexity AI for research-backed updates')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Resolve tag using standard pattern\n\t\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t\t// Show current tag context\n\t\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t\t// Validate required parameters\n\t\t\t\tif (!options.id) {\n\t\t\t\t\tconsole.error(chalk.red('Error: --id parameter is required'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-subtask --id=5.2 --prompt=\"Add more details about the API endpoint\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\t// Validate subtask ID format (should contain a dot)\n\t\t\t\tconst subtaskId = options.id;\n\t\t\t\tif (!subtaskId.includes('.')) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error: Invalid subtask ID format: ${subtaskId}. Subtask ID must be in format \"parentId.subtaskId\"`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-subtask --id=5.2 --prompt=\"Add more details about the API endpoint\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tif (!options.prompt) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: --prompt parameter is required. Please provide information to add to the subtask.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Usage example: task-master update-subtask --id=5.2 --prompt=\"Add more details about the API endpoint\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst prompt = options.prompt;\n\t\t\t\tconst useResearch = options.research || false;\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tif (tasksPath === TASKMASTER_TASKS_FILE) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Hint: Run task-master init or task-master parse-prd to create tasks.json first'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Hint: Check if the file path is correct: ${tasksPath}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Updating subtask ${subtaskId} with prompt: \"${prompt}\"`)\n\t\t\t\t);\n\t\t\t\tconsole.log(chalk.blue(`Tasks file: ${tasksPath}`));\n\n\t\t\t\tif (useResearch) {\n\t\t\t\t\t// Verify Perplexity API key exists if using research\n\t\t\t\t\tif (!isApiKeySet('perplexity')) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'Warning: PERPLEXITY_API_KEY environment variable is missing. Research-backed updates will not be available.'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow('Falling back to Claude AI for subtask update.')\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t\t'Using Perplexity AI for research-backed subtask update'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateSubtaskById(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\tsubtaskId,\n\t\t\t\t\tprompt,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t);\n\n\t\t\t\tif (!result) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nSubtask update was not completed. Review the messages above for details.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\t\t// Provide more helpful error messages for common issues\n\t\t\t\tif (\n\t\t\t\t\terror.message.includes('subtask') &&\n\t\t\t\t\terror.message.includes('not found')\n\t\t\t\t) {\n\t\t\t\t\tconsole.log(chalk.yellow('\\nTo fix this issue:'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t' 1. Run task-master list --with-subtasks to see all available subtask IDs'\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t' 2. Use a valid subtask ID with the --id parameter in format \"parentId.subtaskId\"'\n\t\t\t\t\t);\n\t\t\t\t} else if (error.message.includes('API key')) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nThis error is related to API keys. Check your environment variables.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Use getDebugFlag getter instead of CONFIG.debug\n\t\t\t\tif (getDebugFlag()) {\n\t\t\t\t\tconsole.error(error);\n\t\t\t\t}\n\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// generate command\n\tprogramInstance\n\t\t.command('generate')\n\t\t.description('Generate task files from tasks.json')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-o, --output <dir>',\n\t\t\t'Output directory',\n\t\t\tpath.dirname(TASKMASTER_TASKS_FILE)\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst outputDir = options.output;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(`Generating task files from: ${taskMaster.getTasksPath()}`)\n\t\t\t);\n\t\t\tconsole.log(chalk.blue(`Output directory: ${outputDir}`));\n\n\t\t\tawait generateTaskFiles(taskMaster.getTasksPath(), outputDir, {\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\ttag\n\t\t\t});\n\t\t});\n\n\t// set-status command\n\tprogramInstance\n\t\t.command('set-status')\n\t\t.alias('mark')\n\t\t.alias('set')\n\t\t.description('Set the status of a task')\n\t\t.option(\n\t\t\t'-i, --id <id>',\n\t\t\t'Task ID (can be comma-separated for multiple tasks)'\n\t\t)\n\t\t.option(\n\t\t\t'-s, --status <status>',\n\t\t\t`New status (one of: ${TASK_STATUS_OPTIONS.join(', ')})`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst taskId = options.id;\n\t\t\tconst status = options.status;\n\n\t\t\tif (!taskId || !status) {\n\t\t\t\tconsole.error(chalk.red('Error: Both --id and --status are required'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (!isValidTaskStatus(status)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Invalid status value: ${status}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(`Setting status of task(s) ${taskId} to: ${status}`)\n\t\t\t);\n\n\t\t\tawait setTaskStatus(taskMaster.getTasksPath(), taskId, status, {\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\ttag\n\t\t\t});\n\t\t});\n\n\t// list command\n\tprogramInstance\n\t\t.command('list')\n\t\t.description('List all tasks')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --report <report>',\n\t\t\t'Path to the complexity report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('-s, --status <status>', 'Filter by status')\n\t\t.option('--with-subtasks', 'Show subtasks for each task')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Only pass complexityReportPath if user provided a custom path\n\t\t\tif (options.report && options.report !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.report;\n\t\t\t}\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst statusFilter = options.status;\n\t\t\tconst withSubtasks = options.withSubtasks || false;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(`Listing tasks from: ${taskMaster.getTasksPath()}`)\n\t\t\t);\n\t\t\tif (statusFilter) {\n\t\t\t\tconsole.log(chalk.blue(`Filtering by status: ${statusFilter}`));\n\t\t\t}\n\t\t\tif (withSubtasks) {\n\t\t\t\tconsole.log(chalk.blue('Including subtasks in listing'));\n\t\t\t}\n\n\t\t\tawait listTasks(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tstatusFilter,\n\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\twithSubtasks,\n\t\t\t\t'text',\n\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t);\n\t\t});\n\n\t// expand command\n\tprogramInstance\n\t\t.command('expand')\n\t\t.description('Expand a task into subtasks using AI')\n\t\t.option('-i, --id <id>', 'ID of the task to expand')\n\t\t.option(\n\t\t\t'-a, --all',\n\t\t\t'Expand all pending tasks based on complexity analysis'\n\t\t)\n\t\t.option(\n\t\t\t'-n, --num <number>',\n\t\t\t'Number of subtasks to generate (uses complexity analysis by default if available)'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Enable research-backed generation (e.g., using Perplexity)',\n\t\t\tfalse\n\t\t)\n\t\t.option('-p, --prompt <text>', 'Additional context for subtask generation')\n\t\t.option('-f, --force', 'Force expansion even if subtasks exist', false) // Ensure force option exists\n\t\t.option(\n\t\t\t'--file <file>',\n\t\t\t'Path to the tasks file (relative to project root)',\n\t\t\tTASKMASTER_TASKS_FILE // Allow file override\n\t\t) // Allow file override\n\t\t.option(\n\t\t\t'-cr, --complexity-report <file>',\n\t\t\t'Path to the report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tif (options.complexityReport) {\n\t\t\t\tinitOptions.complexityReportPath = options.complexityReport;\n\t\t\t}\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (options.all) {\n\t\t\t\t// --- Handle expand --all ---\n\t\t\t\tconsole.log(chalk.blue('Expanding all pending tasks...'));\n\t\t\t\t// Updated call to the refactored expandAllTasks\n\t\t\t\ttry {\n\t\t\t\t\tconst result = await expandAllTasks(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\toptions.num, // Pass num\n\t\t\t\t\t\toptions.research, // Pass research flag\n\t\t\t\t\t\toptions.prompt, // Pass additional context\n\t\t\t\t\t\toptions.force, // Pass force flag\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\t\t\ttag,\n\t\t\t\t\t\t\tcomplexityReportPath: taskMaster.getComplexityReportPath()\n\t\t\t\t\t\t} // Pass context with projectRoot and tag\n\t\t\t\t\t\t// outputFormat defaults to 'text' in expandAllTasks for CLI\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error expanding all tasks: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} else if (options.id) {\n\t\t\t\t// --- Handle expand --id <id> (Should be correct from previous refactor) ---\n\t\t\t\tif (!options.id) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('Error: Task ID is required unless using --all.')\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(chalk.blue(`Expanding task ${options.id}...`));\n\t\t\t\ttry {\n\t\t\t\t\t// Call the refactored expandTask function\n\t\t\t\t\tawait expandTask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\toptions.id,\n\t\t\t\t\t\toptions.num,\n\t\t\t\t\t\toptions.research,\n\t\t\t\t\t\toptions.prompt,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\t\t\ttag,\n\t\t\t\t\t\t\tcomplexityReportPath: taskMaster.getComplexityReportPath()\n\t\t\t\t\t\t}, // Pass context with projectRoot and tag\n\t\t\t\t\t\toptions.force // Pass the force flag down\n\t\t\t\t\t);\n\t\t\t\t\t// expandTask logs its own success/failure for single task\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error expanding task ${options.id}: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: You must specify either a task ID (--id) or --all.')\n\t\t\t\t);\n\t\t\t\tprogramInstance.help(); // Show help\n\t\t\t}\n\t\t});\n\n\t// analyze-complexity command\n\tprogramInstance\n\t\t.command('analyze-complexity')\n\t\t.description(\n\t\t\t`Analyze tasks and generate expansion recommendations${chalk.reset('')}`\n\t\t)\n\t\t.option('-o, --output <file>', 'Output file path for the report')\n\t\t.option(\n\t\t\t'-m, --model <model>',\n\t\t\t'LLM model to use for analysis (defaults to configured model)'\n\t\t)\n\t\t.option(\n\t\t\t'-t, --threshold <number>',\n\t\t\t'Minimum complexity score to recommend expansion (1-10)',\n\t\t\t'5'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Use Perplexity AI for research-backed complexity analysis'\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id <ids>',\n\t\t\t'Comma-separated list of specific task IDs to analyze (e.g., \"1,3,5\")'\n\t\t)\n\t\t.option('--from <id>', 'Starting task ID in a range to analyze')\n\t\t.option('--to <id>', 'Ending task ID in a range to analyze')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true, // Tasks file is required to analyze\n\t\t\t\ttag: options.tag\n\t\t\t};\n\t\t\t// Only include complexityReportPath if output is explicitly specified\n\t\t\tif (options.output) {\n\t\t\t\tinitOptions.complexityReportPath = options.output;\n\t\t\t}\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst modelOverride = options.model;\n\t\t\tconst thresholdScore = parseFloat(options.threshold);\n\t\t\tconst useResearch = options.research || false;\n\n\t\t\t// Use the provided tag, or the current active tag, or default to 'master'\n\t\t\tconst targetTag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(targetTag);\n\n\t\t\t// Use user's explicit output path if provided, otherwise use tag-aware default\n\t\t\tconst outputPath = taskMaster.getComplexityReportPath();\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`Analyzing task complexity from: ${taskMaster.getTasksPath()}`\n\t\t\t\t)\n\t\t\t);\n\t\t\tconsole.log(chalk.blue(`Output report will be saved to: ${outputPath}`));\n\n\t\t\tif (options.id) {\n\t\t\t\tconsole.log(chalk.blue(`Analyzing specific task IDs: ${options.id}`));\n\t\t\t} else if (options.from || options.to) {\n\t\t\t\tconst fromStr = options.from ? options.from : 'first';\n\t\t\t\tconst toStr = options.to ? options.to : 'last';\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Analyzing tasks in range: ${fromStr} to ${toStr}`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (useResearch) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t'Using Perplexity AI for research-backed complexity analysis'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Update options with tag-aware output path and context\n\t\t\tconst updatedOptions = {\n\t\t\t\t...options,\n\t\t\t\toutput: outputPath,\n\t\t\t\ttag: targetTag,\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\tfile: taskMaster.getTasksPath()\n\t\t\t};\n\n\t\t\tawait analyzeTaskComplexity(updatedOptions);\n\t\t});\n\n\t// research command\n\tprogramInstance\n\t\t.command('research')\n\t\t.description('Perform AI-powered research queries with project context')\n\t\t.argument('[prompt]', 'Research prompt to investigate')\n\t\t.option('--file <file>', 'Path to the tasks file')\n\t\t.option(\n\t\t\t'-i, --id <ids>',\n\t\t\t'Comma-separated task/subtask IDs to include as context (e.g., \"15,16.2\")'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --files <paths>',\n\t\t\t'Comma-separated file paths to include as context'\n\t\t)\n\t\t.option(\n\t\t\t'-c, --context <text>',\n\t\t\t'Additional custom context to include in the research prompt'\n\t\t)\n\t\t.option(\n\t\t\t'-t, --tree',\n\t\t\t'Include project file tree structure in the research context'\n\t\t)\n\t\t.option(\n\t\t\t'-s, --save <file>',\n\t\t\t'Save research results to the specified task/subtask(s)'\n\t\t)\n\t\t.option(\n\t\t\t'-d, --detail <level>',\n\t\t\t'Output detail level: low, medium, high',\n\t\t\t'medium'\n\t\t)\n\t\t.option(\n\t\t\t'--save-to <id>',\n\t\t\t'Automatically save research results to specified task/subtask ID (e.g., \"15\" or \"15.2\")'\n\t\t)\n\t\t.option(\n\t\t\t'--save-file',\n\t\t\t'Save research results to .taskmaster/docs/research/ directory'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (prompt, options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Parameter validation\n\t\t\tif (!prompt || typeof prompt !== 'string' || prompt.trim().length === 0) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Research prompt is required and cannot be empty')\n\t\t\t\t);\n\t\t\t\tshowResearchHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Validate detail level\n\t\t\tconst validDetailLevels = ['low', 'medium', 'high'];\n\t\t\tif (\n\t\t\t\toptions.detail &&\n\t\t\t\t!validDetailLevels.includes(options.detail.toLowerCase())\n\t\t\t) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Detail level must be one of: ${validDetailLevels.join(', ')}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Validate and parse task IDs if provided\n\t\t\tlet taskIds = [];\n\t\t\tif (options.id) {\n\t\t\t\ttry {\n\t\t\t\t\ttaskIds = options.id.split(',').map((id) => {\n\t\t\t\t\t\tconst trimmedId = id.trim();\n\t\t\t\t\t\t// Support both task IDs (e.g., \"15\") and subtask IDs (e.g., \"15.2\")\n\t\t\t\t\t\tif (!/^\\d+(\\.\\d+)?$/.test(trimmedId)) {\n\t\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t\t`Invalid task ID format: \"${trimmedId}\". Expected format: \"15\" or \"15.2\"`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn trimmedId;\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(chalk.red(`Error parsing task IDs: ${error.message}`));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate and parse file paths if provided\n\t\t\tlet filePaths = [];\n\t\t\tif (options.files) {\n\t\t\t\ttry {\n\t\t\t\t\tfilePaths = options.files.split(',').map((filePath) => {\n\t\t\t\t\t\tconst trimmedPath = filePath.trim();\n\t\t\t\t\t\tif (trimmedPath.length === 0) {\n\t\t\t\t\t\t\tthrow new Error('Empty file path provided');\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn trimmedPath;\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error parsing file paths: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate save-to option if provided\n\t\t\tif (options.saveTo) {\n\t\t\t\tconst saveToId = options.saveTo.trim();\n\t\t\t\tif (saveToId.length === 0) {\n\t\t\t\t\tconsole.error(chalk.red('Error: Save-to ID cannot be empty'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t\t// Validate ID format: number or number.number\n\t\t\t\tif (!/^\\d+(\\.\\d+)?$/.test(saveToId)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: Save-to ID must be in format \"15\" for task or \"15.2\" for subtask'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate save option if provided (legacy file save)\n\t\t\tif (options.save) {\n\t\t\t\tconst saveTarget = options.save.trim();\n\t\t\t\tif (saveTarget.length === 0) {\n\t\t\t\t\tconsole.error(chalk.red('Error: Save target cannot be empty'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t\t// Check if it's a valid file path (basic validation)\n\t\t\t\tif (saveTarget.includes('..') || saveTarget.startsWith('/')) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: Save path must be relative and cannot contain \"..\"'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\t// Validate tasks file exists if task IDs are specified\n\t\t\tif (taskIds.length > 0) {\n\t\t\t\ttry {\n\t\t\t\t\tconst tasksData = readJSON(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\t\ttag\n\t\t\t\t\t);\n\t\t\t\t\tif (!tasksData || !tasksData.tasks) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`Error: No valid tasks found in ${taskMaster.getTasksPath()} for tag '${tag}'`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error reading tasks file: ${error.message}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Validate file paths exist if specified\n\t\t\tif (filePaths.length > 0) {\n\t\t\t\tfor (const filePath of filePaths) {\n\t\t\t\t\tconst fullPath = path.isAbsolute(filePath)\n\t\t\t\t\t\t? filePath\n\t\t\t\t\t\t: path.join(taskMaster.getProjectRoot(), filePath);\n\t\t\t\t\tif (!fs.existsSync(fullPath)) {\n\t\t\t\t\t\tconsole.error(chalk.red(`Error: File not found: ${filePath}`));\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Create validated parameters object\n\t\t\tconst validatedParams = {\n\t\t\t\tprompt: prompt.trim(),\n\t\t\t\ttaskIds: taskIds,\n\t\t\t\tfilePaths: filePaths,\n\t\t\t\tcustomContext: options.context ? options.context.trim() : null,\n\t\t\t\tincludeProjectTree: !!options.tree,\n\t\t\t\tsaveTarget: options.save ? options.save.trim() : null,\n\t\t\t\tsaveToId: options.saveTo ? options.saveTo.trim() : null,\n\t\t\t\tallowFollowUp: true, // Always allow follow-up in CLI\n\t\t\t\tdetailLevel: options.detail ? options.detail.toLowerCase() : 'medium',\n\t\t\t\ttasksPath: taskMaster.getTasksPath(),\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot()\n\t\t\t};\n\n\t\t\t// Display what we're about to do\n\t\t\tconsole.log(chalk.blue(`Researching: \"${validatedParams.prompt}\"`));\n\n\t\t\tif (validatedParams.taskIds.length > 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.gray(`Task context: ${validatedParams.taskIds.join(', ')}`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (validatedParams.filePaths.length > 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.gray(`File context: ${validatedParams.filePaths.join(', ')}`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (validatedParams.customContext) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.gray(\n\t\t\t\t\t\t`Custom context: ${validatedParams.customContext.substring(0, 50)}${validatedParams.customContext.length > 50 ? '...' : ''}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tif (validatedParams.includeProjectTree) {\n\t\t\t\tconsole.log(chalk.gray('Including project file tree'));\n\t\t\t}\n\n\t\t\tconsole.log(chalk.gray(`Detail level: ${validatedParams.detailLevel}`));\n\n\t\t\ttry {\n\t\t\t\t// Import the research function\n\t\t\t\tconst { performResearch } = await import('./task-manager/research.js');\n\n\t\t\t\t// Prepare research options\n\t\t\t\tconst researchOptions = {\n\t\t\t\t\ttaskIds: validatedParams.taskIds,\n\t\t\t\t\tfilePaths: validatedParams.filePaths,\n\t\t\t\t\tcustomContext: validatedParams.customContext || '',\n\t\t\t\t\tincludeProjectTree: validatedParams.includeProjectTree,\n\t\t\t\t\tdetailLevel: validatedParams.detailLevel,\n\t\t\t\t\tprojectRoot: validatedParams.projectRoot,\n\t\t\t\t\tsaveToFile: !!options.saveFile,\n\t\t\t\t\ttag: tag\n\t\t\t\t};\n\n\t\t\t\t// Execute research\n\t\t\t\tconst result = await performResearch(\n\t\t\t\t\tvalidatedParams.prompt,\n\t\t\t\t\tresearchOptions,\n\t\t\t\t\t{\n\t\t\t\t\t\tcommandName: 'research',\n\t\t\t\t\t\toutputType: 'cli',\n\t\t\t\t\t\ttag: tag\n\t\t\t\t\t},\n\t\t\t\t\t'text',\n\t\t\t\t\tvalidatedParams.allowFollowUp // Pass follow-up flag\n\t\t\t\t);\n\n\t\t\t\t// Auto-save to task/subtask if requested and no interactive save occurred\n\t\t\t\tif (validatedParams.saveToId && !result.interactiveSaveOccurred) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst isSubtask = validatedParams.saveToId.includes('.');\n\n\t\t\t\t\t\t// Format research content for saving\n\t\t\t\t\t\tconst researchContent = `## Research Query: ${validatedParams.prompt}\n\n**Detail Level:** ${result.detailLevel}\n**Context Size:** ${result.contextSize} characters\n**Timestamp:** ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}\n\n### Results\n\n${result.result}`;\n\n\t\t\t\t\t\tif (isSubtask) {\n\t\t\t\t\t\t\t// Save to subtask\n\t\t\t\t\t\t\tconst { updateSubtaskById } = await import(\n\t\t\t\t\t\t\t\t'./task-manager/update-subtask-by-id.js'\n\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\tawait updateSubtaskById(\n\t\t\t\t\t\t\t\tvalidatedParams.tasksPath,\n\t\t\t\t\t\t\t\tvalidatedParams.saveToId,\n\t\t\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\t\t\toutputType: 'cli',\n\t\t\t\t\t\t\t\t\tprojectRoot: validatedParams.projectRoot,\n\t\t\t\t\t\t\t\t\ttag: tag\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'text'\n\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t\t`✅ Research saved to subtask ${validatedParams.saveToId}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Save to task\n\t\t\t\t\t\t\tconst updateTaskById = (\n\t\t\t\t\t\t\t\tawait import('./task-manager/update-task-by-id.js')\n\t\t\t\t\t\t\t).default;\n\n\t\t\t\t\t\t\tconst taskIdNum = parseInt(validatedParams.saveToId, 10);\n\t\t\t\t\t\t\tawait updateTaskById(\n\t\t\t\t\t\t\t\tvalidatedParams.tasksPath,\n\t\t\t\t\t\t\t\ttaskIdNum,\n\t\t\t\t\t\t\t\tresearchContent,\n\t\t\t\t\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tcommandName: 'research-save',\n\t\t\t\t\t\t\t\t\toutputType: 'cli',\n\t\t\t\t\t\t\t\t\tprojectRoot: validatedParams.projectRoot,\n\t\t\t\t\t\t\t\t\ttag: tag\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t'text',\n\t\t\t\t\t\t\t\ttrue // appendMode = true\n\t\t\t\t\t\t\t);\n\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t\t`✅ Research saved to task ${validatedParams.saveToId}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t} catch (saveError) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.red(`❌ Error saving to task/subtask: ${saveError.message}`)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Save results to file if requested (legacy)\n\t\t\t\tif (validatedParams.saveTarget) {\n\t\t\t\t\tconst saveContent = `# Research Query: ${validatedParams.prompt}\n\n**Detail Level:** ${result.detailLevel}\n**Context Size:** ${result.contextSize} characters\n**Timestamp:** ${new Date().toISOString()}\n\n## Results\n\n${result.result}\n`;\n\n\t\t\t\t\tfs.writeFileSync(validatedParams.saveTarget, saveContent, 'utf-8');\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(`\\n💾 Results saved to: ${validatedParams.saveTarget}`)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`\\n❌ Research failed: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// clear-subtasks command\n\tprogramInstance\n\t\t.command('clear-subtasks')\n\t\t.description('Clear subtasks from specified tasks')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id <ids>',\n\t\t\t'Task IDs (comma-separated) to clear subtasks from'\n\t\t)\n\t\t.option('--all', 'Clear subtasks from all tasks')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst taskIds = options.id;\n\t\t\tconst all = options.all;\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskIds && !all) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Please specify task IDs with --id=<ids> or use --all to clear all tasks'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (all) {\n\t\t\t\t// If --all is specified, get all task IDs\n\t\t\t\tconst data = readJSON(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t);\n\t\t\t\tif (!data || !data.tasks) {\n\t\t\t\t\tconsole.error(chalk.red('Error: No valid tasks found'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t\tconst allIds = data.tasks.map((t) => t.id).join(',');\n\t\t\t\tclearSubtasks(taskMaster.getTasksPath(), allIds, {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tclearSubtasks(taskMaster.getTasksPath(), taskIds, {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t// add-task command\n\tprogramInstance\n\t\t.command('add-task')\n\t\t.description('Add a new task using AI, optionally providing manual details')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-p, --prompt <prompt>',\n\t\t\t'Description of the task to add (required if not using manual fields)'\n\t\t)\n\t\t.option('-t, --title <title>', 'Task title (for manual task creation)')\n\t\t.option(\n\t\t\t'-d, --description <description>',\n\t\t\t'Task description (for manual task creation)'\n\t\t)\n\t\t.option(\n\t\t\t'--details <details>',\n\t\t\t'Implementation details (for manual task creation)'\n\t\t)\n\t\t.option(\n\t\t\t'--dependencies <dependencies>',\n\t\t\t'Comma-separated list of task IDs this task depends on'\n\t\t)\n\t\t.option(\n\t\t\t'--priority <priority>',\n\t\t\t'Task priority (high, medium, low)',\n\t\t\t'medium'\n\t\t)\n\t\t.option(\n\t\t\t'-r, --research',\n\t\t\t'Whether to use research capabilities for task creation'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst isManualCreation = options.title && options.description;\n\n\t\t\t// Validate that either prompt or title+description are provided\n\t\t\tif (!options.prompt && !isManualCreation) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Either --prompt or both --title and --description must be provided'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconst tasksPath = options.file || TASKMASTER_TASKS_FILE;\n\n\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\t`❌ No tasks.json file found. Please run \"task-master init\" or create a tasks.json file at ${TASKMASTER_TASKS_FILE}`\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Correctly determine projectRoot\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst projectRoot = taskMaster.getProjectRoot();\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tlet manualTaskData = null;\n\t\t\tif (isManualCreation) {\n\t\t\t\tmanualTaskData = {\n\t\t\t\t\ttitle: options.title,\n\t\t\t\t\tdescription: options.description,\n\t\t\t\t\tdetails: options.details || '',\n\t\t\t\t\ttestStrategy: options.testStrategy || ''\n\t\t\t\t};\n\t\t\t\t// Restore specific logging for manual creation\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Creating task manually with title: \"${options.title}\"`)\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Restore specific logging for AI creation\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Creating task with AI using prompt: \"${options.prompt}\"`)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Log dependencies and priority if provided (restored)\n\t\t\tconst dependenciesArray = options.dependencies\n\t\t\t\t? options.dependencies.split(',').map((id) => id.trim())\n\t\t\t\t: [];\n\t\t\tif (dependenciesArray.length > 0) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Dependencies: [${dependenciesArray.join(', ')}]`)\n\t\t\t\t);\n\t\t\t}\n\t\t\tif (options.priority) {\n\t\t\t\tconsole.log(chalk.blue(`Priority: ${options.priority}`));\n\t\t\t}\n\n\t\t\tconst context = {\n\t\t\t\tprojectRoot,\n\t\t\t\ttag,\n\t\t\t\tcommandName: 'add-task',\n\t\t\t\toutputType: 'cli'\n\t\t\t};\n\n\t\t\ttry {\n\t\t\t\tconst { newTaskId, telemetryData } = await addTask(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\toptions.prompt,\n\t\t\t\t\tdependenciesArray,\n\t\t\t\t\toptions.priority,\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text',\n\t\t\t\t\tmanualTaskData,\n\t\t\t\t\toptions.research\n\t\t\t\t);\n\n\t\t\t\t// addTask handles detailed CLI success logging AND telemetry display when outputFormat is 'text'\n\t\t\t\t// No need to call displayAiUsageSummary here anymore.\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error adding task: ${error.message}`));\n\t\t\t\tif (error.details) {\n\t\t\t\t\tconsole.error(chalk.red(error.details));\n\t\t\t\t}\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// next command\n\tprogramInstance\n\t\t.command('next')\n\t\t.description(\n\t\t\t`Show the next task to work on based on dependencies and status${chalk.reset('')}`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --report <report>',\n\t\t\t'Path to the complexity report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tif (options.report && options.report !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.report;\n\t\t\t}\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag,\n\t\t\t\tcomplexityReportPath: options.report || false\n\t\t\t});\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tconst context = {\n\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\ttag\n\t\t\t};\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tawait displayNextTask(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\tcontext\n\t\t\t);\n\t\t});\n\n\t// show command\n\tprogramInstance\n\t\t.command('show')\n\t\t.description(\n\t\t\t`Display detailed information about one or more tasks${chalk.reset('')}`\n\t\t)\n\t\t.argument('[id]', 'Task ID(s) to show (comma-separated for multiple)')\n\t\t.option(\n\t\t\t'-i, --id <id>',\n\t\t\t'Task ID(s) to show (comma-separated for multiple)'\n\t\t)\n\t\t.option('-s, --status <status>', 'Filter subtasks by status')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-r, --report <report>',\n\t\t\t'Path to the complexity report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (taskId, options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\t\t\t// Only pass complexityReportPath if user provided a custom path\n\t\t\tif (options.report && options.report !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.report;\n\t\t\t}\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst idArg = taskId || options.id;\n\t\t\tconst statusFilter = options.status;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!idArg) {\n\t\t\t\tconsole.error(chalk.red('Error: Please provide a task ID'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Check if multiple IDs are provided (comma-separated)\n\t\t\tconst taskIds = idArg\n\t\t\t\t.split(',')\n\t\t\t\t.map((id) => id.trim())\n\t\t\t\t.filter((id) => id.length > 0);\n\n\t\t\tif (taskIds.length > 1) {\n\t\t\t\t// Multiple tasks - use compact summary view with interactive drill-down\n\t\t\t\tawait displayMultipleTasksSummary(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskIds,\n\t\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\t\tstatusFilter,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Single task - use detailed view\n\t\t\t\tawait displayTaskById(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskIds[0],\n\t\t\t\t\ttaskMaster.getComplexityReportPath(),\n\t\t\t\t\tstatusFilter,\n\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\n\t// add-dependency command\n\tprogramInstance\n\t\t.command('add-dependency')\n\t\t.description('Add a dependency to a task')\n\t\t.option('-i, --id <id>', 'Task ID to add dependency to')\n\t\t.option('-d, --depends-on <id>', 'Task ID that will become a dependency')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst taskId = options.id;\n\t\t\tconst dependencyId = options.dependsOn;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskId || !dependencyId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Both --id and --depends-on are required')\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Handle subtask IDs correctly by preserving the string format for IDs containing dots\n\t\t\t// Only use parseInt for simple numeric IDs\n\t\t\tconst formattedTaskId = taskId.includes('.')\n\t\t\t\t? taskId\n\t\t\t\t: parseInt(taskId, 10);\n\t\t\tconst formattedDependencyId = dependencyId.includes('.')\n\t\t\t\t? dependencyId\n\t\t\t\t: parseInt(dependencyId, 10);\n\n\t\t\tawait addDependency(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tformattedTaskId,\n\t\t\t\tformattedDependencyId,\n\t\t\t\t{\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t}\n\t\t\t);\n\t\t});\n\n\t// remove-dependency command\n\tprogramInstance\n\t\t.command('remove-dependency')\n\t\t.description('Remove a dependency from a task')\n\t\t.option('-i, --id <id>', 'Task ID to remove dependency from')\n\t\t.option('-d, --depends-on <id>', 'Task ID to remove as a dependency')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\tconst taskId = options.id;\n\t\t\tconst dependencyId = options.dependsOn;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskId || !dependencyId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Both --id and --depends-on are required')\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Handle subtask IDs correctly by preserving the string format for IDs containing dots\n\t\t\t// Only use parseInt for simple numeric IDs\n\t\t\tconst formattedTaskId = taskId.includes('.')\n\t\t\t\t? taskId\n\t\t\t\t: parseInt(taskId, 10);\n\t\t\tconst formattedDependencyId = dependencyId.includes('.')\n\t\t\t\t? dependencyId\n\t\t\t\t: parseInt(dependencyId, 10);\n\n\t\t\tawait removeDependency(\n\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\tformattedTaskId,\n\t\t\t\tformattedDependencyId,\n\t\t\t\t{\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t}\n\t\t\t);\n\t\t});\n\n\t// validate-dependencies command\n\tprogramInstance\n\t\t.command('validate-dependencies')\n\t\t.description(\n\t\t\t`Identify invalid dependencies without fixing them${chalk.reset('')}`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tawait validateDependenciesCommand(taskMaster.getTasksPath(), {\n\t\t\t\tcontext: { projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t});\n\t\t});\n\n\t// fix-dependencies command\n\tprogramInstance\n\t\t.command('fix-dependencies')\n\t\t.description(`Fix invalid dependencies automatically${chalk.reset('')}`)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tawait fixDependenciesCommand(taskMaster.getTasksPath(), {\n\t\t\t\tcontext: { projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t});\n\t\t});\n\n\t// complexity-report command\n\tprogramInstance\n\t\t.command('complexity-report')\n\t\t.description(`Display the complexity analysis report${chalk.reset('')}`)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the report file',\n\t\t\tCOMPLEXITY_REPORT_FILE\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\tconst initOptions = {\n\t\t\t\ttag: options.tag\n\t\t\t};\n\n\t\t\tif (options.file && options.file !== COMPLEXITY_REPORT_FILE) {\n\t\t\t\tinitOptions.complexityReportPath = options.file;\n\t\t\t}\n\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster(initOptions);\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(taskMaster.getCurrentTag());\n\n\t\t\tawait displayComplexityReport(taskMaster.getComplexityReportPath());\n\t\t});\n\n\t// add-subtask command\n\tprogramInstance\n\t\t.command('add-subtask')\n\t\t.description('Add a subtask to an existing task')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-p, --parent <id>', 'Parent task ID (required)')\n\t\t.option('-i, --task-id <id>', 'Existing task ID to convert to subtask')\n\t\t.option(\n\t\t\t'-t, --title <title>',\n\t\t\t'Title for the new subtask (when creating a new subtask)'\n\t\t)\n\t\t.option('-d, --description <text>', 'Description for the new subtask')\n\t\t.option('--details <text>', 'Implementation details for the new subtask')\n\t\t.option(\n\t\t\t'--dependencies <ids>',\n\t\t\t'Comma-separated list of dependency IDs for the new subtask'\n\t\t)\n\t\t.option('-s, --status <status>', 'Status for the new subtask', 'pending')\n\t\t.option('--generate', 'Regenerate task files after adding subtask')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst parentId = options.parent;\n\t\t\tconst existingTaskId = options.taskId;\n\t\t\tconst generateFiles = options.generate || false;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!parentId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: --parent parameter is required. Please provide a parent task ID.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tshowAddSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Parse dependencies if provided\n\t\t\tlet dependencies = [];\n\t\t\tif (options.dependencies) {\n\t\t\t\tdependencies = options.dependencies.split(',').map((id) => {\n\t\t\t\t\t// Handle both regular IDs and dot notation\n\t\t\t\t\treturn id.includes('.') ? id.trim() : parseInt(id.trim(), 10);\n\t\t\t\t});\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\tif (existingTaskId) {\n\t\t\t\t\t// Convert existing task to subtask\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`Converting task ${existingTaskId} to a subtask of ${parentId}...`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tawait addSubtask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tparentId,\n\t\t\t\t\t\texistingTaskId,\n\t\t\t\t\t\tnull,\n\t\t\t\t\t\tgenerateFiles,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`✓ Task ${existingTaskId} successfully converted to a subtask of task ${parentId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} else if (options.title) {\n\t\t\t\t\t// Create new subtask with provided data\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(`Creating new subtask for parent task ${parentId}...`)\n\t\t\t\t\t);\n\n\t\t\t\t\tconst newSubtaskData = {\n\t\t\t\t\t\ttitle: options.title,\n\t\t\t\t\t\tdescription: options.description || '',\n\t\t\t\t\t\tdetails: options.details || '',\n\t\t\t\t\t\tstatus: options.status || 'pending',\n\t\t\t\t\t\tdependencies: dependencies\n\t\t\t\t\t};\n\n\t\t\t\t\tconst subtask = await addSubtask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tparentId,\n\t\t\t\t\t\tnull,\n\t\t\t\t\t\tnewSubtaskData,\n\t\t\t\t\t\tgenerateFiles,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`✓ New subtask ${parentId}.${subtask.id} successfully created`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Display success message and suggested next steps\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.white.bold(\n\t\t\t\t\t\t\t\t`Subtask ${parentId}.${subtask.id} Added Successfully`\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white(`Title: ${subtask.title}`) +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.white(`Status: ${getStatusWithColor(subtask.status)}`) +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t(dependencies.length > 0\n\t\t\t\t\t\t\t\t\t? chalk.white(`Dependencies: ${dependencies.join(', ')}`) +\n\t\t\t\t\t\t\t\t\t\t'\\n'\n\t\t\t\t\t\t\t\t\t: '') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t`1. Run ${chalk.yellow(`task-master show ${parentId}`)} to see the parent task with all subtasks`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t`2. Run ${chalk.yellow(`task-master set-status --id=${parentId}.${subtask.id} --status=in-progress`)} to start working on it`\n\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('Error: Either --task-id or --title must be provided.')\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.white.bold('Usage Examples:') +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white('Convert existing task to subtask:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t\t` task-master add-subtask --parent=5 --task-id=8`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\tchalk.white('Create new subtask:') +\n\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t\t` task-master add-subtask --parent=5 --title=\"Implement login UI\" --description=\"Create the login form\"`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t'\\n\\n',\n\t\t\t\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\tshowAddSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowAddSubtaskHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// Helper function to show add-subtask command help\n\tfunction showAddSubtaskHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\t`${chalk.white.bold('Add Subtask Command Help')}\\n\\n${chalk.cyan('Usage:')}\\n task-master add-subtask --parent=<id> [options]\\n\\n${chalk.cyan('Options:')}\\n -p, --parent <id> Parent task ID (required)\\n -i, --task-id <id> Existing task ID to convert to subtask\\n -t, --title <title> Title for the new subtask\\n -d, --description <text> Description for the new subtask\\n --details <text> Implementation details for the new subtask\\n --dependencies <ids> Comma-separated list of dependency IDs\\n -s, --status <status> Status for the new subtask (default: \"pending\")\\n -f, --file <file> Path to the tasks file (default: \"${TASKMASTER_TASKS_FILE}\")\\n --generate Regenerate task files after adding subtask\\n\\n${chalk.cyan('Examples:')}\\n task-master add-subtask --parent=5 --task-id=8\\n task-master add-subtask -p 5 -t \"Implement login UI\" -d \"Create the login form\" --generate`,\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// remove-subtask command\n\tprogramInstance\n\t\t.command('remove-subtask')\n\t\t.description('Remove a subtask from its parent task')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'-i, --id <id>',\n\t\t\t'Subtask ID(s) to remove in format \"parentId.subtaskId\" (can be comma-separated for multiple subtasks)'\n\t\t)\n\t\t.option(\n\t\t\t'-c, --convert',\n\t\t\t'Convert the subtask to a standalone task instead of deleting it'\n\t\t)\n\t\t.option('--generate', 'Regenerate task files after removing subtask')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst subtaskIds = options.id;\n\t\t\tconst convertToTask = options.convert || false;\n\t\t\tconst generateFiles = options.generate || false;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tif (!subtaskIds) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: --id parameter is required. Please provide subtask ID(s) in format \"parentId.subtaskId\".'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tshowRemoveSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Split by comma to support multiple subtask IDs\n\t\t\t\tconst subtaskIdArray = subtaskIds.split(',').map((id) => id.trim());\n\n\t\t\t\tfor (const subtaskId of subtaskIdArray) {\n\t\t\t\t\t// Validate subtask ID format\n\t\t\t\t\tif (!subtaskId.includes('.')) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`Error: Subtask ID \"${subtaskId}\" must be in format \"parentId.subtaskId\"`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tshowRemoveSubtaskHelp();\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\tconsole.log(chalk.blue(`Removing subtask ${subtaskId}...`));\n\t\t\t\t\tif (convertToTask) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue('The subtask will be converted to a standalone task')\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\tconst result = await removeSubtask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tsubtaskId,\n\t\t\t\t\t\tconvertToTask,\n\t\t\t\t\t\tgenerateFiles,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\n\t\t\t\t\tif (convertToTask && result) {\n\t\t\t\t\t\t// Display success message and next steps for converted task\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\t\tchalk.white.bold(\n\t\t\t\t\t\t\t\t\t`Subtask ${subtaskId} Converted to Task #${result.id}`\n\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white(`Title: ${result.title}`) +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white(`Status: ${getStatusWithColor(result.status)}`) +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t\t\t\t`Dependencies: ${result.dependencies.join(', ')}`\n\t\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t\t`1. Run ${chalk.yellow(`task-master show ${result.id}`)} to see details of the new task`\n\t\t\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t\t\t\t`2. Run ${chalk.yellow(`task-master set-status --id=${result.id} --status=in-progress`)} to start working on it`\n\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Display success message for deleted subtask\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\t\tchalk.white.bold(`Subtask ${subtaskId} Removed`) +\n\t\t\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t\t\tchalk.white('The subtask has been successfully deleted.'),\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\tshowRemoveSubtaskHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowRemoveSubtaskHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// Helper function to show remove-subtask command help\n\tfunction showRemoveSubtaskHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Remove Subtask Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master remove-subtask --id=<parentId.subtaskId> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -i, --id <id> Subtask ID(s) to remove in format \"parentId.subtaskId\" (can be comma-separated, required)\\n' +\n\t\t\t\t\t' -c, --convert Convert the subtask to a standalone task instead of deleting it\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' --skip-generate Skip regenerating task files\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master remove-subtask --id=5.2\\n' +\n\t\t\t\t\t' task-master remove-subtask --id=5.2,6.3,7.1\\n' +\n\t\t\t\t\t' task-master remove-subtask --id=5.2 --convert',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show tags command help\n\tfunction showTagsHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Tags Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master tags [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' --show-metadata Show detailed metadata for each tag\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master tags\\n' +\n\t\t\t\t\t' task-master tags --show-metadata\\n\\n' +\n\t\t\t\t\tchalk.cyan('Related Commands:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master add-tag <name> Create a new tag\\n' +\n\t\t\t\t\t' task-master use-tag <name> Switch to a tag\\n' +\n\t\t\t\t\t' task-master delete-tag <name> Delete a tag',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show add-tag command help\n\tfunction showAddTagHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Add Tag Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master add-tag <tagName> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' --copy-from-current Copy tasks from the current tag to the new tag\\n' +\n\t\t\t\t\t' --copy-from <tag> Copy tasks from the specified tag to the new tag\\n' +\n\t\t\t\t\t' -d, --description <text> Optional description for the tag\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz --copy-from-current\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz --copy-from master\\n' +\n\t\t\t\t\t' task-master add-tag feature-xyz -d \"Feature XYZ development\"',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show delete-tag command help\n\tfunction showDeleteTagHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Delete Tag Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master delete-tag <tagName> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n' +\n\t\t\t\t\t' -y, --yes Skip confirmation prompts\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master delete-tag feature-xyz\\n' +\n\t\t\t\t\t' task-master delete-tag feature-xyz --yes\\n\\n' +\n\t\t\t\t\tchalk.yellow('Warning:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' This will permanently delete the tag and all its tasks!',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show use-tag command help\n\tfunction showUseTagHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Use Tag Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master use-tag <tagName> [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -f, --file <file> Path to the tasks file (default: \"' +\n\t\t\t\t\tTASKMASTER_TASKS_FILE +\n\t\t\t\t\t'\")\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master use-tag feature-xyz\\n' +\n\t\t\t\t\t' task-master use-tag master\\n\\n' +\n\t\t\t\t\tchalk.cyan('Related Commands:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master tags List all available tags\\n' +\n\t\t\t\t\t' task-master add-tag <name> Create a new tag',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// Helper function to show research command help\n\tfunction showResearchHelp() {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.white.bold('Research Command Help') +\n\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\tchalk.cyan('Usage:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t` task-master research \"<query>\" [options]\\n\\n` +\n\t\t\t\t\tchalk.cyan('Required:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' <query> Research question or prompt (required)\\n\\n' +\n\t\t\t\t\tchalk.cyan('Context Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -i, --id <ids> Comma-separated task/subtask IDs for context (e.g., \"15,23.2\")\\n' +\n\t\t\t\t\t' -f, --files <paths> Comma-separated file paths for context\\n' +\n\t\t\t\t\t' -c, --context <text> Additional custom context text\\n' +\n\t\t\t\t\t' --tree Include project file tree structure\\n\\n' +\n\t\t\t\t\tchalk.cyan('Output Options:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' -d, --detail <level> Detail level: low, medium, high (default: medium)\\n' +\n\t\t\t\t\t' --save-to <id> Auto-save results to task/subtask ID (e.g., \"15\" or \"15.2\")\\n' +\n\t\t\t\t\t' --tag <tag> Specify tag context for task operations\\n\\n' +\n\t\t\t\t\tchalk.cyan('Examples:') +\n\t\t\t\t\t'\\n' +\n\t\t\t\t\t' task-master research \"How should I implement user authentication?\"\\n' +\n\t\t\t\t\t' task-master research \"What\\'s the best approach?\" --id=15,23.2\\n' +\n\t\t\t\t\t' task-master research \"How does auth work?\" --files=src/auth.js --tree\\n' +\n\t\t\t\t\t' task-master research \"Implementation steps?\" --save-to=15.2 --detail=high',\n\t\t\t\t{ padding: 1, borderColor: 'blue', borderStyle: 'round' }\n\t\t\t)\n\t\t);\n\t}\n\n\t// remove-task command\n\tprogramInstance\n\t\t.command('remove-task')\n\t\t.description('Remove one or more tasks or subtasks permanently')\n\t\t.option(\n\t\t\t'-i, --id <ids>',\n\t\t\t'ID(s) of the task(s) or subtask(s) to remove (e.g., \"5\", \"5.2\", or \"5,6.1,7\")'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-y, --yes', 'Skip confirmation prompt', false)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst taskIdsString = options.id;\n\n\t\t\t// Resolve tag using standard pattern\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\t// Show current tag context\n\t\t\tdisplayCurrentTagIndicator(tag);\n\n\t\t\tif (!taskIdsString) {\n\t\t\t\tconsole.error(chalk.red('Error: Task ID(s) are required'));\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'Usage: task-master remove-task --id=<taskId1,taskId2...>'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tconst taskIdsToRemove = taskIdsString\n\t\t\t\t.split(',')\n\t\t\t\t.map((id) => id.trim())\n\t\t\t\t.filter(Boolean);\n\n\t\t\tif (taskIdsToRemove.length === 0) {\n\t\t\t\tconsole.error(chalk.red('Error: No valid task IDs provided.'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Read data once for checks and confirmation\n\t\t\t\tconst data = readJSON(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\ttag\n\t\t\t\t);\n\t\t\t\tif (!data || !data.tasks) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: No valid tasks found in ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst existingTasksToRemove = [];\n\t\t\t\tconst nonExistentIds = [];\n\t\t\t\tlet totalSubtasksToDelete = 0;\n\t\t\t\tconst dependentTaskMessages = [];\n\n\t\t\t\tfor (const taskId of taskIdsToRemove) {\n\t\t\t\t\tif (!taskExists(data.tasks, taskId)) {\n\t\t\t\t\t\tnonExistentIds.push(taskId);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Correctly extract the task object from the result of findTaskById\n\t\t\t\t\t\tconst findResult = findTaskById(data.tasks, taskId);\n\t\t\t\t\t\tconst taskObject = findResult.task; // Get the actual task/subtask object\n\n\t\t\t\t\t\tif (taskObject) {\n\t\t\t\t\t\t\texistingTasksToRemove.push({ id: taskId, task: taskObject }); // Push the actual task object\n\n\t\t\t\t\t\t\t// If it's a main task, count its subtasks and check dependents\n\t\t\t\t\t\t\tif (!taskObject.isSubtask) {\n\t\t\t\t\t\t\t\t// Check the actual task object\n\t\t\t\t\t\t\t\tif (taskObject.subtasks && taskObject.subtasks.length > 0) {\n\t\t\t\t\t\t\t\t\ttotalSubtasksToDelete += taskObject.subtasks.length;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tconst dependentTasks = data.tasks.filter(\n\t\t\t\t\t\t\t\t\t(t) =>\n\t\t\t\t\t\t\t\t\t\tt.dependencies &&\n\t\t\t\t\t\t\t\t\t\tt.dependencies.includes(parseInt(taskId, 10))\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tif (dependentTasks.length > 0) {\n\t\t\t\t\t\t\t\t\tdependentTaskMessages.push(\n\t\t\t\t\t\t\t\t\t\t` - Task ${taskId}: ${dependentTasks.length} dependent tasks (${dependentTasks.map((t) => t.id).join(', ')})`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Handle case where findTaskById returned null for the task property (should be rare)\n\t\t\t\t\t\t\tnonExistentIds.push(`${taskId} (error finding details)`);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (nonExistentIds.length > 0) {\n\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Warning: The following task IDs were not found: ${nonExistentIds.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tif (existingTasksToRemove.length === 0) {\n\t\t\t\t\tconsole.log(chalk.blue('No existing tasks found to remove.'));\n\t\t\t\t\tprocess.exit(0);\n\t\t\t\t}\n\n\t\t\t\t// Skip confirmation if --yes flag is provided\n\t\t\t\tif (!options.yes) {\n\t\t\t\t\tconsole.log();\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.red.bold(\n\t\t\t\t\t\t\t`⚠️ WARNING: This will permanently delete the following ${existingTasksToRemove.length} item(s):`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log();\n\n\t\t\t\t\texistingTasksToRemove.forEach(({ id, task }) => {\n\t\t\t\t\t\tif (!task) return; // Should not happen due to taskExists check, but safeguard\n\t\t\t\t\t\tif (task.isSubtask) {\n\t\t\t\t\t\t\t// Subtask - title is directly on the task object\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.white(` Subtask ${id}: ${task.title || '(no title)'}`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Optionally show parent context if available\n\t\t\t\t\t\t\tif (task.parentTask) {\n\t\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\t\tchalk.gray(\n\t\t\t\t\t\t\t\t\t\t` (Parent: ${task.parentTask.id} - ${task.parentTask.title || '(no title)'})`\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t// Main task - title is directly on the task object\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.white.bold(` Task ${id}: ${task.title || '(no title)'}`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\n\t\t\t\t\tif (totalSubtasksToDelete > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`⚠️ This will also delete ${totalSubtasksToDelete} subtasks associated with the selected main tasks!`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\tif (dependentTaskMessages.length > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t'⚠️ Warning: Dependencies on the following tasks will be removed:'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tdependentTaskMessages.forEach((msg) =>\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(msg))\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\tconsole.log();\n\n\t\t\t\t\tconst { confirm } = await inquirer.prompt([\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: 'confirm',\n\t\t\t\t\t\t\tname: 'confirm',\n\t\t\t\t\t\t\tmessage: chalk.red.bold(\n\t\t\t\t\t\t\t\t`Are you sure you want to permanently delete these ${existingTasksToRemove.length} item(s)?`\n\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\tdefault: false\n\t\t\t\t\t\t}\n\t\t\t\t\t]);\n\n\t\t\t\t\tif (!confirm) {\n\t\t\t\t\t\tconsole.log(chalk.blue('Task deletion cancelled.'));\n\t\t\t\t\t\tprocess.exit(0);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tconst indicator = startLoadingIndicator(\n\t\t\t\t\t`Removing ${existingTasksToRemove.length} task(s)/subtask(s)...`\n\t\t\t\t);\n\n\t\t\t\t// Use the string of existing IDs for the core function\n\t\t\t\tconst existingIdsString = existingTasksToRemove\n\t\t\t\t\t.map(({ id }) => id)\n\t\t\t\t\t.join(',');\n\t\t\t\tconst result = await removeTask(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\texistingIdsString,\n\t\t\t\t\t{\n\t\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\t\ttag\n\t\t\t\t\t}\n\t\t\t\t);\n\n\t\t\t\tstopLoadingIndicator(indicator);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t`Successfully removed ${result.removedTasks.length} task(s)/subtask(s).`\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t(result.message ? `\\n\\nDetails:\\n${result.message}` : '') +\n\t\t\t\t\t\t\t\t(result.error\n\t\t\t\t\t\t\t\t\t? `\\n\\nWarnings:\\n${chalk.yellow(result.error)}`\n\t\t\t\t\t\t\t\t\t: ''),\n\t\t\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tboxen(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`Operation completed with errors. Removed ${result.removedTasks.length} task(s)/subtask(s).`\n\t\t\t\t\t\t\t) +\n\t\t\t\t\t\t\t\t(result.message ? `\\n\\nDetails:\\n${result.message}` : '') +\n\t\t\t\t\t\t\t\t(result.error ? `\\n\\nErrors:\\n${chalk.red(result.error)}` : ''),\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1); // Exit with error code if any part failed\n\t\t\t\t}\n\n\t\t\t\t// Log any initially non-existent IDs again for clarity\n\t\t\t\tif (nonExistentIds.length > 0) {\n\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Note: The following IDs were not found initially and were skipped: ${nonExistentIds.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Exit with error if any removals failed\n\t\t\t\t\tif (result.removedTasks.length === 0) {\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(`Error: ${error.message || 'An unknown error occurred'}`)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// init command (Directly calls the implementation from init.js)\n\tprogramInstance\n\t\t.command('init')\n\t\t.description('Initialize a new project with Task Master structure')\n\t\t.option('-y, --yes', 'Skip prompts and use default values')\n\t\t.option('-n, --name <name>', 'Project name')\n\t\t.option('-d, --description <description>', 'Project description')\n\t\t.option('-v, --version <version>', 'Project version', '0.1.0') // Set default here\n\t\t.option('-a, --author <author>', 'Author name')\n\t\t.option(\n\t\t\t'-r, --rules <rules...>',\n\t\t\t'List of rules to add (roo, windsurf, cursor, ...). Accepts comma or space separated values.'\n\t\t)\n\t\t.option('--skip-install', 'Skip installing dependencies')\n\t\t.option('--dry-run', 'Show what would be done without making changes')\n\t\t.option('--aliases', 'Add shell aliases (tm, taskmaster)')\n\t\t.option('--no-aliases', 'Skip shell aliases (tm, taskmaster)')\n\t\t.option('--git', 'Initialize Git repository')\n\t\t.option('--no-git', 'Skip Git repository initialization')\n\t\t.option('--git-tasks', 'Store tasks in Git')\n\t\t.option('--no-git-tasks', 'No Git storage of tasks')\n\t\t.action(async (cmdOptions) => {\n\t\t\t// cmdOptions contains parsed arguments\n\t\t\t// Parse rules: accept space or comma separated, default to all available rules\n\t\t\tlet selectedProfiles = RULE_PROFILES;\n\t\t\tlet rulesExplicitlyProvided = false;\n\n\t\t\tif (cmdOptions.rules && Array.isArray(cmdOptions.rules)) {\n\t\t\t\tconst userSpecifiedProfiles = cmdOptions.rules\n\t\t\t\t\t.flatMap((r) => r.split(','))\n\t\t\t\t\t.map((r) => r.trim())\n\t\t\t\t\t.filter(Boolean);\n\t\t\t\t// Only override defaults if user specified valid rules\n\t\t\t\tif (userSpecifiedProfiles.length > 0) {\n\t\t\t\t\tselectedProfiles = userSpecifiedProfiles;\n\t\t\t\t\trulesExplicitlyProvided = true;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcmdOptions.rules = selectedProfiles;\n\t\t\tcmdOptions.rulesExplicitlyProvided = rulesExplicitlyProvided;\n\n\t\t\ttry {\n\t\t\t\t// Directly call the initializeProject function, passing the parsed options\n\t\t\t\tawait initializeProject(cmdOptions);\n\t\t\t\t// initializeProject handles its own flow, including potential process.exit()\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(`Error during initialization: ${error.message}`)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// models command\n\tprogramInstance\n\t\t.command('models')\n\t\t.description('Manage AI model configurations')\n\t\t.option(\n\t\t\t'--set-main <model_id>',\n\t\t\t'Set the primary model for task generation/updates'\n\t\t)\n\t\t.option(\n\t\t\t'--set-research <model_id>',\n\t\t\t'Set the model for research-backed operations'\n\t\t)\n\t\t.option(\n\t\t\t'--set-fallback <model_id>',\n\t\t\t'Set the model to use if the primary fails'\n\t\t)\n\t\t.option('--setup', 'Run interactive setup to configure models')\n\t\t.option(\n\t\t\t'--openrouter',\n\t\t\t'Allow setting a custom OpenRouter model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--ollama',\n\t\t\t'Allow setting a custom Ollama model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--bedrock',\n\t\t\t'Allow setting a custom Bedrock model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--claude-code',\n\t\t\t'Allow setting a Claude Code model ID (use with --set-*)'\n\t\t)\n\t\t.option(\n\t\t\t'--azure',\n\t\t\t'Allow setting a custom Azure OpenAI model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--vertex',\n\t\t\t'Allow setting a custom Vertex AI model ID (use with --set-*) '\n\t\t)\n\t\t.option(\n\t\t\t'--gemini-cli',\n\t\t\t'Allow setting a Gemini CLI model ID (use with --set-*)'\n\t\t)\n\t\t.addHelpText(\n\t\t\t'after',\n\t\t\t`\nExamples:\n $ task-master models # View current configuration\n $ task-master models --set-main gpt-4o # Set main model (provider inferred)\n $ task-master models --set-research sonar-pro # Set research model\n $ task-master models --set-fallback claude-3-5-sonnet-20241022 # Set fallback\n $ task-master models --set-main my-custom-model --ollama # Set custom Ollama model for main role\n $ task-master models --set-main anthropic.claude-3-sonnet-20240229-v1:0 --bedrock # Set custom Bedrock model for main role\n $ task-master models --set-main some/other-model --openrouter # Set custom OpenRouter model for main role\n $ task-master models --set-main sonnet --claude-code # Set Claude Code model for main role\n $ task-master models --set-main gpt-4o --azure # Set custom Azure OpenAI model for main role\n $ task-master models --set-main claude-3-5-sonnet@20241022 --vertex # Set custom Vertex AI model for main role\n $ task-master models --set-main gemini-2.5-pro --gemini-cli # Set Gemini CLI model for main role\n $ task-master models --setup # Run interactive setup`\n\t\t)\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || false\n\t\t\t});\n\n\t\t\tconst projectRoot = taskMaster.getProjectRoot();\n\n\t\t\t// Validate flags: cannot use multiple provider flags simultaneously\n\t\t\tconst providerFlags = [\n\t\t\t\toptions.openrouter,\n\t\t\t\toptions.ollama,\n\t\t\t\toptions.bedrock,\n\t\t\t\toptions.claudeCode,\n\t\t\t\toptions.geminiCli\n\t\t\t].filter(Boolean).length;\n\t\t\tif (providerFlags > 1) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: Cannot use multiple provider flags (--openrouter, --ollama, --bedrock, --claude-code, --gemini-cli) simultaneously.'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Determine the primary action based on flags\n\t\t\tconst isSetup = options.setup;\n\t\t\tconst isSetOperation =\n\t\t\t\toptions.setMain || options.setResearch || options.setFallback;\n\n\t\t\t// --- Execute Action ---\n\n\t\t\tif (isSetup) {\n\t\t\t\t// Action 1: Run Interactive Setup\n\t\t\t\tconsole.log(chalk.blue('Starting interactive model setup...')); // Added feedback\n\t\t\t\ttry {\n\t\t\t\t\tawait runInteractiveSetup(taskMaster.getProjectRoot());\n\t\t\t\t\t// runInteractiveSetup logs its own completion/error messages\n\t\t\t\t} catch (setupError) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('\\\\nInteractive setup failed unexpectedly:'),\n\t\t\t\t\t\tsetupError.message\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\t// --- IMPORTANT: Exit after setup ---\n\t\t\t\treturn; // Stop execution here\n\t\t\t}\n\n\t\t\tif (isSetOperation) {\n\t\t\t\t// Action 2: Perform Direct Set Operations\n\t\t\t\tlet updateOccurred = false; // Track if any update actually happened\n\n\t\t\t\tif (options.setMain) {\n\t\t\t\t\tconst result = await setModel('main', options.setMain, {\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tproviderHint: options.openrouter\n\t\t\t\t\t\t\t? 'openrouter'\n\t\t\t\t\t\t\t: options.ollama\n\t\t\t\t\t\t\t\t? 'ollama'\n\t\t\t\t\t\t\t\t: options.bedrock\n\t\t\t\t\t\t\t\t\t? 'bedrock'\n\t\t\t\t\t\t\t\t\t: options.claudeCode\n\t\t\t\t\t\t\t\t\t\t? 'claude-code'\n\t\t\t\t\t\t\t\t\t\t: options.geminiCli\n\t\t\t\t\t\t\t\t\t\t\t? 'gemini-cli'\n\t\t\t\t\t\t\t\t\t\t\t: undefined\n\t\t\t\t\t});\n\t\t\t\t\tif (result.success) {\n\t\t\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t\t\t\tif (result.data.warning)\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t\tupdateOccurred = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(`❌ Error setting main model: ${result.error.message}`)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (options.setResearch) {\n\t\t\t\t\tconst result = await setModel('research', options.setResearch, {\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tproviderHint: options.openrouter\n\t\t\t\t\t\t\t? 'openrouter'\n\t\t\t\t\t\t\t: options.ollama\n\t\t\t\t\t\t\t\t? 'ollama'\n\t\t\t\t\t\t\t\t: options.bedrock\n\t\t\t\t\t\t\t\t\t? 'bedrock'\n\t\t\t\t\t\t\t\t\t: options.claudeCode\n\t\t\t\t\t\t\t\t\t\t? 'claude-code'\n\t\t\t\t\t\t\t\t\t\t: options.geminiCli\n\t\t\t\t\t\t\t\t\t\t\t? 'gemini-cli'\n\t\t\t\t\t\t\t\t\t\t\t: undefined\n\t\t\t\t\t});\n\t\t\t\t\tif (result.success) {\n\t\t\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t\t\t\tif (result.data.warning)\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t\tupdateOccurred = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`❌ Error setting research model: ${result.error.message}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (options.setFallback) {\n\t\t\t\t\tconst result = await setModel('fallback', options.setFallback, {\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tproviderHint: options.openrouter\n\t\t\t\t\t\t\t? 'openrouter'\n\t\t\t\t\t\t\t: options.ollama\n\t\t\t\t\t\t\t\t? 'ollama'\n\t\t\t\t\t\t\t\t: options.bedrock\n\t\t\t\t\t\t\t\t\t? 'bedrock'\n\t\t\t\t\t\t\t\t\t: options.claudeCode\n\t\t\t\t\t\t\t\t\t\t? 'claude-code'\n\t\t\t\t\t\t\t\t\t\t: options.geminiCli\n\t\t\t\t\t\t\t\t\t\t\t? 'gemini-cli'\n\t\t\t\t\t\t\t\t\t\t\t: undefined\n\t\t\t\t\t});\n\t\t\t\t\tif (result.success) {\n\t\t\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t\t\t\tif (result.data.warning)\n\t\t\t\t\t\t\tconsole.log(chalk.yellow(result.data.warning));\n\t\t\t\t\t\tupdateOccurred = true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t`❌ Error setting fallback model: ${result.error.message}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Optional: Add a final confirmation if any update occurred\n\t\t\t\tif (updateOccurred) {\n\t\t\t\t\tconsole.log(chalk.blue('\\nModel configuration updated.'));\n\t\t\t\t} else {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nNo model configuration changes were made (or errors occurred).'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// --- IMPORTANT: Exit after set operations ---\n\t\t\t\treturn; // Stop execution here\n\t\t\t}\n\n\t\t\t// Action 3: Display Full Status (Only runs if no setup and no set flags)\n\t\t\tconsole.log(chalk.blue('Fetching current model configuration...')); // Added feedback\n\t\t\tconst configResult = await getModelConfiguration({ projectRoot });\n\t\t\tconst availableResult = await getAvailableModelsList({ projectRoot });\n\t\t\tconst apiKeyStatusResult = await getApiKeyStatusReport({ projectRoot });\n\n\t\t\t// 1. Display Active Models\n\t\t\tif (!configResult.success) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`❌ Error fetching configuration: ${configResult.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tdisplayModelConfiguration(\n\t\t\t\t\tconfigResult.data,\n\t\t\t\t\tavailableResult.data?.models || []\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// 2. Display API Key Status\n\t\t\tif (apiKeyStatusResult.success) {\n\t\t\t\tdisplayApiKeyStatus(apiKeyStatusResult.data.report);\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`⚠️ Warning: Could not display API Key status: ${apiKeyStatusResult.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// 3. Display Other Available Models (Filtered)\n\t\t\tif (availableResult.success) {\n\t\t\t\tconst activeIds = configResult.success\n\t\t\t\t\t? [\n\t\t\t\t\t\t\tconfigResult.data.activeModels.main.modelId,\n\t\t\t\t\t\t\tconfigResult.data.activeModels.research.modelId,\n\t\t\t\t\t\t\tconfigResult.data.activeModels.fallback?.modelId\n\t\t\t\t\t\t].filter(Boolean)\n\t\t\t\t\t: [];\n\t\t\t\tconst displayableAvailable = availableResult.data.models.filter(\n\t\t\t\t\t(m) => !activeIds.includes(m.modelId) && !m.modelId.startsWith('[')\n\t\t\t\t);\n\t\t\t\tdisplayAvailableModels(displayableAvailable);\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`⚠️ Warning: Could not display available models: ${availableResult.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// 4. Conditional Hint if Config File is Missing\n\t\t\tconst configExists = isConfigFilePresent(projectRoot);\n\t\t\tif (!configExists) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\"\\\\nHint: Run 'task-master models --setup' to create or update your configuration.\"\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\t// --- IMPORTANT: Exit after displaying status ---\n\t\t\treturn; // Stop execution here\n\t\t});\n\n\t// response-language command\n\tprogramInstance\n\t\t.command('lang')\n\t\t.description('Manage response language settings')\n\t\t.option('--response <response_language>', 'Set the response language')\n\t\t.option('--setup', 'Run interactive setup to configure response language')\n\t\t.action(async (options) => {\n\t\t\tconst taskMaster = initTaskMaster({});\n\t\t\tconst projectRoot = taskMaster.getProjectRoot(); // Find project root for context\n\t\t\tconst { response, setup } = options;\n\t\t\tlet responseLanguage = response !== undefined ? response : 'English';\n\t\t\tif (setup) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue('Starting interactive response language setup...')\n\t\t\t\t);\n\t\t\t\ttry {\n\t\t\t\t\tconst userResponse = await inquirer.prompt([\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttype: 'input',\n\t\t\t\t\t\t\tname: 'responseLanguage',\n\t\t\t\t\t\t\tmessage: 'Input your preferred response language',\n\t\t\t\t\t\t\tdefault: 'English'\n\t\t\t\t\t\t}\n\t\t\t\t\t]);\n\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t'Response language set to:',\n\t\t\t\t\t\t\tuserResponse.responseLanguage\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tresponseLanguage = userResponse.responseLanguage;\n\t\t\t\t} catch (setupError) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red('\\\\nInteractive setup failed unexpectedly:'),\n\t\t\t\t\t\tsetupError.message\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst result = setResponseLanguage(responseLanguage, {\n\t\t\t\tprojectRoot\n\t\t\t});\n\n\t\t\tif (result.success) {\n\t\t\t\tconsole.log(chalk.green(`✅ ${result.data.message}`));\n\t\t\t} else {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`❌ Error setting response language: ${result.error.message}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// move-task command\n\tprogramInstance\n\t\t.command('move')\n\t\t.description('Move a task or subtask to a new position')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'--from <id>',\n\t\t\t'ID of the task/subtask to move (e.g., \"5\" or \"5.2\"). Can be comma-separated to move multiple tasks (e.g., \"5,6,7\")'\n\t\t)\n\t\t.option(\n\t\t\t'--to <id>',\n\t\t\t'ID of the destination (e.g., \"7\" or \"7.3\"). Must match the number of source IDs if comma-separated'\n\t\t)\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst sourceId = options.from;\n\t\t\tconst destinationId = options.to;\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tif (!sourceId || !destinationId) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red('Error: Both --from and --to parameters are required')\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t'Usage: task-master move --from=<sourceId> --to=<destinationId>'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Check if we're moving multiple tasks (comma-separated IDs)\n\t\t\tconst sourceIds = sourceId.split(',').map((id) => id.trim());\n\t\t\tconst destinationIds = destinationId.split(',').map((id) => id.trim());\n\n\t\t\t// Validate that the number of source and destination IDs match\n\t\t\tif (sourceIds.length !== destinationIds.length) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t'Error: The number of source and destination IDs must match'\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.yellow('Example: task-master move --from=5,6,7 --to=10,11,12')\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// If moving multiple tasks\n\t\t\tif (sourceIds.length > 1) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t`Moving multiple tasks: ${sourceIds.join(', ')} to ${destinationIds.join(', ')}...`\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\ttry {\n\t\t\t\t\t// Read tasks data once to validate destination IDs\n\t\t\t\t\tconst tasksData = readJSON(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\ttaskMaster.getProjectRoot(),\n\t\t\t\t\t\ttag\n\t\t\t\t\t);\n\t\t\t\t\tif (!tasksData || !tasksData.tasks) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(`Error: Invalid or missing tasks file at ${tasksPath}`)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Move tasks one by one\n\t\t\t\t\tfor (let i = 0; i < sourceIds.length; i++) {\n\t\t\t\t\t\tconst fromId = sourceIds[i];\n\t\t\t\t\t\tconst toId = destinationIds[i];\n\n\t\t\t\t\t\t// Skip if source and destination are the same\n\t\t\t\t\t\tif (fromId === toId) {\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.yellow(`Skipping ${fromId} -> ${toId} (same ID)`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.blue(`Moving task/subtask ${fromId} to ${toId}...`)\n\t\t\t\t\t\t);\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tawait moveTask(\n\t\t\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\t\t\tfromId,\n\t\t\t\t\t\t\t\ttoId,\n\t\t\t\t\t\t\t\ti === sourceIds.length - 1,\n\t\t\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t\t`✓ Successfully moved task/subtask ${fromId} to ${toId}`\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t\tchalk.red(`Error moving ${fromId} to ${toId}: ${error.message}`)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t// Continue with the next task rather than exiting\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Moving a single task (existing logic)\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(`Moving task/subtask ${sourceId} to ${destinationId}...`)\n\t\t\t\t);\n\n\t\t\t\ttry {\n\t\t\t\t\tconst result = await moveTask(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tsourceId,\n\t\t\t\t\t\tdestinationId,\n\t\t\t\t\t\ttrue,\n\t\t\t\t\t\t{ projectRoot: taskMaster.getProjectRoot(), tag }\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`✓ Successfully moved task/subtask ${sourceId} to ${destinationId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t// Add/remove profile rules command\n\tprogramInstance\n\t\t.command('rules [action] [profiles...]')\n\t\t.description(\n\t\t\t`Add or remove rules for one or more profiles. Valid actions: ${Object.values(RULES_ACTIONS).join(', ')} (e.g., task-master rules ${RULES_ACTIONS.ADD} windsurf roo)`\n\t\t)\n\t\t.option(\n\t\t\t'-f, --force',\n\t\t\t'Skip confirmation prompt when removing rules (dangerous)'\n\t\t)\n\t\t.option(\n\t\t\t`--${RULES_SETUP_ACTION}`,\n\t\t\t'Run interactive setup to select rule profiles to add'\n\t\t)\n\t\t.addHelpText(\n\t\t\t'after',\n\t\t\t`\n\t\tExamples:\n\t\t$ task-master rules ${RULES_ACTIONS.ADD} windsurf roo # Add Windsurf and Roo rule sets\n\t\t$ task-master rules ${RULES_ACTIONS.REMOVE} windsurf # Remove Windsurf rule set\n\t\t$ task-master rules --${RULES_SETUP_ACTION} # Interactive setup to select rule profiles`\n\t\t)\n\t\t.action(async (action, profiles, options) => {\n\t\t\tconst taskMaster = initTaskMaster({});\n\t\t\tconst projectRoot = taskMaster.getProjectRoot();\n\t\t\tif (!projectRoot) {\n\t\t\t\tconsole.error(chalk.red('Error: Could not find project root.'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t/**\n\t\t\t * 'task-master rules --setup' action:\n\t\t\t *\n\t\t\t * Launches an interactive prompt to select which rule profiles to add to the current project.\n\t\t\t * This does NOT perform project initialization or ask about shell aliases—only rules selection.\n\t\t\t *\n\t\t\t * Example usage:\n\t\t\t * $ task-master rules --setup\n\t\t\t *\n\t\t\t * Useful for adding rules after project creation.\n\t\t\t *\n\t\t\t * The list of profiles is always up-to-date with the available profiles.\n\t\t\t */\n\t\t\tif (options[RULES_SETUP_ACTION]) {\n\t\t\t\t// Run interactive rules setup ONLY (no project init)\n\t\t\t\tconst selectedRuleProfiles = await runInteractiveProfilesSetup();\n\n\t\t\t\tif (!selectedRuleProfiles || selectedRuleProfiles.length === 0) {\n\t\t\t\t\tconsole.log(chalk.yellow('No profiles selected. Exiting.'));\n\t\t\t\t\treturn;\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t`Installing ${selectedRuleProfiles.length} selected profile(s)...`\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\tfor (let i = 0; i < selectedRuleProfiles.length; i++) {\n\t\t\t\t\tconst profile = selectedRuleProfiles[i];\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t\t`Processing profile ${i + 1}/${selectedRuleProfiles.length}: ${profile}...`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t\t`Rule profile for \"${profile}\" not found. Valid profiles: ${RULE_PROFILES.join(', ')}. Skipping.`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tcontinue;\n\t\t\t\t\t}\n\t\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\n\t\t\t\t\tconst addResult = convertAllRulesToProfileRules(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tprofileConfig\n\t\t\t\t\t);\n\n\t\t\t\t\tconsole.log(chalk.green(generateProfileSummary(profile, addResult)));\n\t\t\t\t}\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`\\nCompleted installation of all ${selectedRuleProfiles.length} profile(s).`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Validate action for non-setup mode\n\t\t\tif (!action || !isValidRulesAction(action)) {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t`Error: Invalid or missing action '${action || 'none'}'. Valid actions are: ${Object.values(RULES_ACTIONS).join(', ')}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`For interactive setup, use: task-master rules --${RULES_SETUP_ACTION}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\tif (!profiles || profiles.length === 0) {\n\t\t\t\tconsole.error(\n\t\t\t\t\t'Please specify at least one rule profile (e.g., windsurf, roo).'\n\t\t\t\t);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\n\t\t\t// Support both space- and comma-separated profile lists\n\t\t\tconst expandedProfiles = profiles\n\t\t\t\t.flatMap((b) => b.split(',').map((s) => s.trim()))\n\t\t\t\t.filter(Boolean);\n\n\t\t\tif (action === RULES_ACTIONS.REMOVE) {\n\t\t\t\tlet confirmed = true;\n\t\t\t\tif (!options.force) {\n\t\t\t\t\t// Check if this removal would leave no profiles remaining\n\t\t\t\t\tif (wouldRemovalLeaveNoProfiles(projectRoot, expandedProfiles)) {\n\t\t\t\t\t\tconst installedProfiles = getInstalledProfiles(projectRoot);\n\t\t\t\t\t\tconfirmed = await confirmRemoveAllRemainingProfiles(\n\t\t\t\t\t\t\texpandedProfiles,\n\t\t\t\t\t\t\tinstalledProfiles\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconfirmed = await confirmProfilesRemove(expandedProfiles);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (!confirmed) {\n\t\t\t\t\tconsole.log(chalk.yellow('Aborted: No rules were removed.'));\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconst removalResults = [];\n\t\t\tconst addResults = [];\n\n\t\t\tfor (const profile of expandedProfiles) {\n\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t`Rule profile for \"${profile}\" not found. Valid profiles: ${RULE_PROFILES.join(', ')}. Skipping.`\n\t\t\t\t\t);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\n\t\t\t\tif (action === RULES_ACTIONS.ADD) {\n\t\t\t\t\tconsole.log(chalk.blue(`Adding rules for profile: ${profile}...`));\n\t\t\t\t\tconst addResult = convertAllRulesToProfileRules(\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\tprofileConfig\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.blue(`Completed adding rules for profile: ${profile}`)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Store result with profile name for summary\n\t\t\t\t\taddResults.push({\n\t\t\t\t\t\tprofileName: profile,\n\t\t\t\t\t\tsuccess: addResult.success,\n\t\t\t\t\t\tfailed: addResult.failed\n\t\t\t\t\t});\n\n\t\t\t\t\tconsole.log(chalk.green(generateProfileSummary(profile, addResult)));\n\t\t\t\t} else if (action === RULES_ACTIONS.REMOVE) {\n\t\t\t\t\tconsole.log(chalk.blue(`Removing rules for profile: ${profile}...`));\n\t\t\t\t\tconst result = removeProfileRules(projectRoot, profileConfig);\n\t\t\t\t\tremovalResults.push(result);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(generateProfileRemovalSummary(profile, result))\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t`Unknown action. Use \"${RULES_ACTIONS.ADD}\" or \"${RULES_ACTIONS.REMOVE}\".`\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Print summary for additions\n\t\t\tif (action === RULES_ACTIONS.ADD && addResults.length > 0) {\n\t\t\t\tconst { allSuccessfulProfiles, totalSuccess, totalFailed } =\n\t\t\t\t\tcategorizeProfileResults(addResults);\n\n\t\t\t\tif (allSuccessfulProfiles.length > 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`\\nSuccessfully processed profiles: ${allSuccessfulProfiles.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\n\t\t\t\t\t// Create a descriptive summary\n\t\t\t\t\tif (totalSuccess > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t`Total: ${totalSuccess} files processed, ${totalFailed} failed.`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t\t`Total: ${allSuccessfulProfiles.length} profile(s) set up successfully.`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Print summary for removals\n\t\t\tif (action === RULES_ACTIONS.REMOVE && removalResults.length > 0) {\n\t\t\t\tconst {\n\t\t\t\t\tsuccessfulRemovals,\n\t\t\t\t\tskippedRemovals,\n\t\t\t\t\tfailedRemovals,\n\t\t\t\t\tremovalsWithNotices\n\t\t\t\t} = categorizeRemovalResults(removalResults);\n\n\t\t\t\tif (successfulRemovals.length > 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`\\nSuccessfully removed profiles for: ${successfulRemovals.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tif (skippedRemovals.length > 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Skipped (default or protected): ${skippedRemovals.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tif (failedRemovals.length > 0) {\n\t\t\t\t\tconsole.log(chalk.red('\\nErrors occurred:'));\n\t\t\t\t\tfailedRemovals.forEach((r) => {\n\t\t\t\t\t\tconsole.log(chalk.red(` ${r.profileName}: ${r.error}`));\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t\t// Display notices about preserved files/configurations\n\t\t\t\tif (removalsWithNotices.length > 0) {\n\t\t\t\t\tconsole.log(chalk.cyan('\\nNotices:'));\n\t\t\t\t\tremovalsWithNotices.forEach((r) => {\n\t\t\t\t\t\tconsole.log(chalk.cyan(` ${r.profileName}: ${r.notice}`));\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Overall summary\n\t\t\t\tconst totalProcessed = removalResults.length;\n\t\t\t\tconst totalSuccessful = successfulRemovals.length;\n\t\t\t\tconst totalSkipped = skippedRemovals.length;\n\t\t\t\tconst totalFailed = failedRemovals.length;\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.blue(\n\t\t\t\t\t\t`\\nTotal: ${totalProcessed} profile(s) processed - ${totalSuccessful} removed, ${totalSkipped} skipped, ${totalFailed} failed.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\n\tprogramInstance\n\t\t.command('migrate')\n\t\t.description(\n\t\t\t'Migrate existing project to use the new .taskmaster directory structure'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --force',\n\t\t\t'Force migration even if .taskmaster directory already exists'\n\t\t)\n\t\t.option(\n\t\t\t'--backup',\n\t\t\t'Create backup of old files before migration (default: false)',\n\t\t\tfalse\n\t\t)\n\t\t.option(\n\t\t\t'--cleanup',\n\t\t\t'Remove old files after successful migration (default: true)',\n\t\t\ttrue\n\t\t)\n\t\t.option('-y, --yes', 'Skip confirmation prompts')\n\t\t.option(\n\t\t\t'--dry-run',\n\t\t\t'Show what would be migrated without actually moving files'\n\t\t)\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\tawait migrateProject(options);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red('Error during migration:'), error.message);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// sync-readme command\n\tprogramInstance\n\t\t.command('sync-readme')\n\t\t.description('Sync the current task list to README.md in the project root')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--with-subtasks', 'Include subtasks in the README output')\n\t\t.option(\n\t\t\t'-s, --status <status>',\n\t\t\t'Show only tasks matching this status (e.g., pending, done)'\n\t\t)\n\t\t.option('-t, --tag <tag>', 'Tag to use for the task list (default: master)')\n\t\t.action(async (options) => {\n\t\t\t// Initialize TaskMaster\n\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\ttasksPath: options.file || true,\n\t\t\t\ttag: options.tag\n\t\t\t});\n\n\t\t\tconst withSubtasks = options.withSubtasks || false;\n\t\t\tconst status = options.status || null;\n\n\t\t\tconst tag = taskMaster.getCurrentTag();\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t`📝 Syncing tasks to README.md${withSubtasks ? ' (with subtasks)' : ''}${status ? ` (status: ${status})` : ''}...`\n\t\t\t\t)\n\t\t\t);\n\n\t\t\tconst success = await syncTasksToReadme(taskMaster.getProjectRoot(), {\n\t\t\t\twithSubtasks,\n\t\t\t\tstatus,\n\t\t\t\ttasksPath: taskMaster.getTasksPath(),\n\t\t\t\ttag\n\t\t\t});\n\n\t\t\tif (!success) {\n\t\t\t\tconsole.error(chalk.red('❌ Failed to sync tasks to README.md'));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\t// ===== TAG MANAGEMENT COMMANDS =====\n\n\t// add-tag command\n\tprogramInstance\n\t\t.command('add-tag')\n\t\t.description('Create a new tag context for organizing tasks')\n\t\t.argument(\n\t\t\t'[tagName]',\n\t\t\t'Name of the new tag to create (optional when using --from-branch)'\n\t\t)\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option(\n\t\t\t'--copy-from-current',\n\t\t\t'Copy tasks from the current tag to the new tag'\n\t\t)\n\t\t.option(\n\t\t\t'--copy-from <tag>',\n\t\t\t'Copy tasks from the specified tag to the new tag'\n\t\t)\n\t\t.option(\n\t\t\t'--from-branch',\n\t\t\t'Create tag name from current git branch (ignores tagName argument)'\n\t\t)\n\t\t.option('-d, --description <text>', 'Optional description for the tag')\n\t\t.action(async (tagName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'Hint: Run task-master init or task-master parse-prd to create tasks.json first'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\t// Validate that either tagName is provided or --from-branch is used\n\t\t\t\tif (!tagName && !options.fromBranch) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t'Error: Either tagName argument or --from-branch option is required.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(chalk.yellow('Usage examples:'));\n\t\t\t\t\tconsole.log(chalk.cyan(' task-master add-tag my-tag'));\n\t\t\t\t\tconsole.log(chalk.cyan(' task-master add-tag --from-branch'));\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'add-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\t// Handle --from-branch option\n\t\t\t\tif (options.fromBranch) {\n\t\t\t\t\tconst { createTagFromBranch } = await import(\n\t\t\t\t\t\t'./task-manager/tag-management.js'\n\t\t\t\t\t);\n\t\t\t\t\tconst gitUtils = await import('./utils/git-utils.js');\n\n\t\t\t\t\t// Check if we're in a git repository\n\t\t\t\t\tif (!(await gitUtils.isGitRepository(projectRoot))) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t\t'Error: Not in a git repository. Cannot use --from-branch option.'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Get current git branch\n\t\t\t\t\tconst currentBranch = await gitUtils.getCurrentBranch(projectRoot);\n\t\t\t\t\tif (!currentBranch) {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\tchalk.red('Error: Could not determine current git branch.')\n\t\t\t\t\t\t);\n\t\t\t\t\t\tprocess.exit(1);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Create tag from branch\n\t\t\t\t\tconst branchOptions = {\n\t\t\t\t\t\tcopyFromCurrent: options.copyFromCurrent || false,\n\t\t\t\t\t\tcopyFromTag: options.copyFrom,\n\t\t\t\t\t\tdescription:\n\t\t\t\t\t\t\toptions.description ||\n\t\t\t\t\t\t\t`Tag created from git branch \"${currentBranch}\"`\n\t\t\t\t\t};\n\n\t\t\t\t\tawait createTagFromBranch(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tcurrentBranch,\n\t\t\t\t\t\tbranchOptions,\n\t\t\t\t\t\tcontext,\n\t\t\t\t\t\t'text'\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\t// Regular tag creation\n\t\t\t\t\tconst createOptions = {\n\t\t\t\t\t\tcopyFromCurrent: options.copyFromCurrent || false,\n\t\t\t\t\t\tcopyFromTag: options.copyFrom,\n\t\t\t\t\t\tdescription: options.description\n\t\t\t\t\t};\n\n\t\t\t\t\tawait createTag(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\ttagName,\n\t\t\t\t\t\tcreateOptions,\n\t\t\t\t\t\tcontext,\n\t\t\t\t\t\t'text'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Handle auto-switch if requested\n\t\t\t\tif (options.autoSwitch) {\n\t\t\t\t\tconst { useTag } = await import('./task-manager/tag-management.js');\n\t\t\t\t\tconst finalTagName = options.fromBranch\n\t\t\t\t\t\t? (await import('./utils/git-utils.js')).sanitizeBranchNameForTag(\n\t\t\t\t\t\t\t\tawait (await import('./utils/git-utils.js')).getCurrentBranch(\n\t\t\t\t\t\t\t\t\tprojectRoot\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t: tagName;\n\t\t\t\t\tawait useTag(\n\t\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\t\tfinalTagName,\n\t\t\t\t\t\t{},\n\t\t\t\t\t\tcontext,\n\t\t\t\t\t\t'text'\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error creating tag: ${error.message}`));\n\t\t\t\tshowAddTagHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowAddTagHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// delete-tag command\n\tprogramInstance\n\t\t.command('delete-tag')\n\t\t.description('Delete an existing tag and all its tasks')\n\t\t.argument('<tagName>', 'Name of the tag to delete')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-y, --yes', 'Skip confirmation prompts')\n\t\t.action(async (tagName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst deleteOptions = {\n\t\t\t\t\tyes: options.yes || false\n\t\t\t\t};\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'delete-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait deleteTag(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\ttagName,\n\t\t\t\t\tdeleteOptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text'\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error deleting tag: ${error.message}`));\n\t\t\t\tshowDeleteTagHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowDeleteTagHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// tags command\n\tprogramInstance\n\t\t.command('tags')\n\t\t.description('List all available tags with metadata')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('--show-metadata', 'Show detailed metadata for each tag')\n\t\t.option('--tag <tag>', 'Specify tag context for task operations')\n\t\t.action(async (options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true,\n\t\t\t\t\ttag: options.tag\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst listOptions = {\n\t\t\t\t\tshowTaskCounts: true,\n\t\t\t\t\tshowMetadata: options.showMetadata || false\n\t\t\t\t};\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'tags',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait tags(taskMaster.getTasksPath(), listOptions, context, 'text');\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error listing tags: ${error.message}`));\n\t\t\t\tshowTagsHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowTagsHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// use-tag command\n\tprogramInstance\n\t\t.command('use-tag')\n\t\t.description('Switch to a different tag context')\n\t\t.argument('<tagName>', 'Name of the tag to switch to')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.action(async (tagName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'use-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait useTag(taskMaster.getTasksPath(), tagName, {}, context, 'text');\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error switching tag: ${error.message}`));\n\t\t\t\tshowUseTagHelp();\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tshowUseTagHelp();\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// rename-tag command\n\tprogramInstance\n\t\t.command('rename-tag')\n\t\t.description('Rename an existing tag')\n\t\t.argument('<oldName>', 'Current name of the tag')\n\t\t.argument('<newName>', 'New name for the tag')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.action(async (oldName, newName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'rename-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait renameTag(\n\t\t\t\t\ttaskMaster.getTasksPath(),\n\t\t\t\t\toldName,\n\t\t\t\t\tnewName,\n\t\t\t\t\t{},\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text'\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error renaming tag: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tprocess.exit(1);\n\t\t});\n\n\t// copy-tag command\n\tprogramInstance\n\t\t.command('copy-tag')\n\t\t.description('Copy an existing tag to create a new tag with the same tasks')\n\t\t.argument('<sourceName>', 'Name of the source tag to copy from')\n\t\t.argument('<targetName>', 'Name of the new tag to create')\n\t\t.option(\n\t\t\t'-f, --file <file>',\n\t\t\t'Path to the tasks file',\n\t\t\tTASKMASTER_TASKS_FILE\n\t\t)\n\t\t.option('-d, --description <text>', 'Optional description for the new tag')\n\t\t.action(async (sourceName, targetName, options) => {\n\t\t\ttry {\n\t\t\t\t// Initialize TaskMaster\n\t\t\t\tconst taskMaster = initTaskMaster({\n\t\t\t\t\ttasksPath: options.file || true\n\t\t\t\t});\n\t\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\n\t\t\t\t// Validate tasks file exists\n\t\t\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(`Error: Tasks file not found at path: ${tasksPath}`)\n\t\t\t\t\t);\n\t\t\t\t\tprocess.exit(1);\n\t\t\t\t}\n\n\t\t\t\tconst copyOptions = {\n\t\t\t\t\tdescription: options.description\n\t\t\t\t};\n\n\t\t\t\tconst context = {\n\t\t\t\t\tprojectRoot: taskMaster.getProjectRoot(),\n\t\t\t\t\tcommandName: 'copy-tag',\n\t\t\t\t\toutputType: 'cli'\n\t\t\t\t};\n\n\t\t\t\tawait copyTag(\n\t\t\t\t\ttasksPath,\n\t\t\t\t\tsourceName,\n\t\t\t\t\ttargetName,\n\t\t\t\t\tcopyOptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\t'text'\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconsole.error(chalk.red(`Error copying tag: ${error.message}`));\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t})\n\t\t.on('error', function (err) {\n\t\t\tconsole.error(chalk.red(`Error: ${err.message}`));\n\t\t\tprocess.exit(1);\n\t\t});\n\n\treturn programInstance;\n}\n\n/**\n * Setup the CLI application\n * @returns {Object} Configured Commander program\n */\nfunction setupCLI() {\n\t// Create a new program instance\n\tconst programInstance = program\n\t\t.name('dev')\n\t\t.description('AI-driven development task management')\n\t\t.version(() => {\n\t\t\t// Read version directly from package.json ONLY\n\t\t\ttry {\n\t\t\t\tconst packageJsonPath = path.join(process.cwd(), 'package.json');\n\t\t\t\tif (fs.existsSync(packageJsonPath)) {\n\t\t\t\t\tconst packageJson = JSON.parse(\n\t\t\t\t\t\tfs.readFileSync(packageJsonPath, 'utf8')\n\t\t\t\t\t);\n\t\t\t\t\treturn packageJson.version;\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// Silently fall back to 'unknown'\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'Could not read package.json for version info in .version()'\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn 'unknown'; // Default fallback if package.json fails\n\t\t})\n\t\t.helpOption('-h, --help', 'Display help')\n\t\t.addHelpCommand(false); // Disable default help command\n\n\t// Only override help for the main program, not for individual commands\n\tconst originalHelpInformation =\n\t\tprogramInstance.helpInformation.bind(programInstance);\n\tprogramInstance.helpInformation = function () {\n\t\t// If this is being called for a subcommand, use the default Commander.js help\n\t\tif (this.parent && this.parent !== programInstance) {\n\t\t\treturn originalHelpInformation();\n\t\t}\n\t\t// If this is the main program help, use our custom display\n\t\tdisplayHelp();\n\t\treturn '';\n\t};\n\n\t// Register commands\n\tregisterCommands(programInstance);\n\n\treturn programInstance;\n}\n\n/**\n * Check for newer version of task-master-ai\n * @returns {Promise<{currentVersion: string, latestVersion: string, needsUpdate: boolean}>}\n */\nasync function checkForUpdate() {\n\t// Get current version from package.json ONLY\n\tconst currentVersion = getTaskMasterVersion();\n\n\treturn new Promise((resolve) => {\n\t\t// Get the latest version from npm registry\n\t\tconst options = {\n\t\t\thostname: 'registry.npmjs.org',\n\t\t\tpath: '/task-master-ai',\n\t\t\tmethod: 'GET',\n\t\t\theaders: {\n\t\t\t\tAccept: 'application/vnd.npm.install-v1+json' // Lightweight response\n\t\t\t}\n\t\t};\n\n\t\tconst req = https.request(options, (res) => {\n\t\t\tlet data = '';\n\n\t\t\tres.on('data', (chunk) => {\n\t\t\t\tdata += chunk;\n\t\t\t});\n\n\t\t\tres.on('end', () => {\n\t\t\t\ttry {\n\t\t\t\t\tconst npmData = JSON.parse(data);\n\t\t\t\t\tconst latestVersion = npmData['dist-tags']?.latest || currentVersion;\n\n\t\t\t\t\t// Compare versions\n\t\t\t\t\tconst needsUpdate =\n\t\t\t\t\t\tcompareVersions(currentVersion, latestVersion) < 0;\n\n\t\t\t\t\tresolve({\n\t\t\t\t\t\tcurrentVersion,\n\t\t\t\t\t\tlatestVersion,\n\t\t\t\t\t\tneedsUpdate\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog('debug', `Error parsing npm response: ${error.message}`);\n\t\t\t\t\tresolve({\n\t\t\t\t\t\tcurrentVersion,\n\t\t\t\t\t\tlatestVersion: currentVersion,\n\t\t\t\t\t\tneedsUpdate: false\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\n\t\treq.on('error', (error) => {\n\t\t\tlog('debug', `Error checking for updates: ${error.message}`);\n\t\t\tresolve({\n\t\t\t\tcurrentVersion,\n\t\t\t\tlatestVersion: currentVersion,\n\t\t\t\tneedsUpdate: false\n\t\t\t});\n\t\t});\n\n\t\t// Set a timeout to avoid hanging if npm is slow\n\t\treq.setTimeout(3000, () => {\n\t\t\treq.abort();\n\t\t\tlog('debug', 'Update check timed out');\n\t\t\tresolve({\n\t\t\t\tcurrentVersion,\n\t\t\t\tlatestVersion: currentVersion,\n\t\t\t\tneedsUpdate: false\n\t\t\t});\n\t\t});\n\n\t\treq.end();\n\t});\n}\n\n/**\n * Compare semantic versions\n * @param {string} v1 - First version\n * @param {string} v2 - Second version\n * @returns {number} -1 if v1 < v2, 0 if v1 = v2, 1 if v1 > v2\n */\nfunction compareVersions(v1, v2) {\n\tconst v1Parts = v1.split('.').map((p) => parseInt(p, 10));\n\tconst v2Parts = v2.split('.').map((p) => parseInt(p, 10));\n\n\tfor (let i = 0; i < Math.max(v1Parts.length, v2Parts.length); i++) {\n\t\tconst v1Part = v1Parts[i] || 0;\n\t\tconst v2Part = v2Parts[i] || 0;\n\n\t\tif (v1Part < v2Part) return -1;\n\t\tif (v1Part > v2Part) return 1;\n\t}\n\n\treturn 0;\n}\n\n/**\n * Display upgrade notification message\n * @param {string} currentVersion - Current version\n * @param {string} latestVersion - Latest version\n */\nfunction displayUpgradeNotification(currentVersion, latestVersion) {\n\tconst message = boxen(\n\t\t`${chalk.blue.bold('Update Available!')} ${chalk.dim(currentVersion)} → ${chalk.green(latestVersion)}\\n\\n` +\n\t\t\t`Run ${chalk.cyan('npm i task-master-ai@latest -g')} to update to the latest version with new features and bug fixes.`,\n\t\t{\n\t\t\tpadding: 1,\n\t\t\tmargin: { top: 1, bottom: 1 },\n\t\t\tborderColor: 'yellow',\n\t\t\tborderStyle: 'round'\n\t\t}\n\t);\n\n\tconsole.log(message);\n}\n\n/**\n * Parse arguments and run the CLI\n * @param {Array} argv - Command-line arguments\n */\nasync function runCLI(argv = process.argv) {\n\ttry {\n\t\t// Display banner if not in a pipe\n\t\tif (process.stdout.isTTY) {\n\t\t\tdisplayBanner();\n\t\t}\n\n\t\t// If no arguments provided, show help\n\t\tif (argv.length <= 2) {\n\t\t\tdisplayHelp();\n\t\t\tprocess.exit(0);\n\t\t}\n\n\t\t// Start the update check in the background - don't await yet\n\t\tconst updateCheckPromise = checkForUpdate();\n\n\t\t// Setup and parse\n\t\t// NOTE: getConfig() might be called during setupCLI->registerCommands if commands need config\n\t\t// This means the ConfigurationError might be thrown here if configuration file is missing.\n\t\tconst programInstance = setupCLI();\n\t\tawait programInstance.parseAsync(argv);\n\n\t\t// After command execution, check if an update is available\n\t\tconst updateInfo = await updateCheckPromise;\n\t\tif (updateInfo.needsUpdate) {\n\t\t\tdisplayUpgradeNotification(\n\t\t\t\tupdateInfo.currentVersion,\n\t\t\t\tupdateInfo.latestVersion\n\t\t\t);\n\t\t}\n\n\t\t// Check if migration has occurred and show FYI notice once\n\t\ttry {\n\t\t\t// Use initTaskMaster with no required fields - will only fail if no project root\n\t\t\tconst taskMaster = initTaskMaster({});\n\n\t\t\tconst tasksPath = taskMaster.getTasksPath();\n\t\t\tconst statePath = taskMaster.getStatePath();\n\n\t\t\tif (tasksPath && fs.existsSync(tasksPath)) {\n\t\t\t\t// Read raw file to check if it has master key (bypassing tag resolution)\n\t\t\t\tconst rawData = fs.readFileSync(tasksPath, 'utf8');\n\t\t\t\tconst parsedData = JSON.parse(rawData);\n\n\t\t\t\tif (parsedData && parsedData.master) {\n\t\t\t\t\t// Migration has occurred, check if we've shown the notice\n\t\t\t\t\tlet stateData = { migrationNoticeShown: false };\n\t\t\t\t\tif (statePath && fs.existsSync(statePath)) {\n\t\t\t\t\t\t// Read state.json directly without tag resolution since it's not a tagged file\n\t\t\t\t\t\tconst rawStateData = fs.readFileSync(statePath, 'utf8');\n\t\t\t\t\t\tstateData = JSON.parse(rawStateData) || stateData;\n\t\t\t\t\t}\n\n\t\t\t\t\tif (!stateData.migrationNoticeShown) {\n\t\t\t\t\t\tdisplayTaggedTasksFYI({ _migrationHappened: true });\n\n\t\t\t\t\t\t// Mark as shown\n\t\t\t\t\t\tstateData.migrationNoticeShown = true;\n\t\t\t\t\t\t// Write state.json directly without tag resolution since it's not a tagged file\n\t\t\t\t\t\tif (statePath) {\n\t\t\t\t\t\t\tfs.writeFileSync(statePath, JSON.stringify(stateData, null, 2));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Silently ignore errors checking for migration notice\n\t\t}\n\t} catch (error) {\n\t\t// ** Specific catch block for missing configuration file **\n\t\tif (error instanceof ConfigurationError) {\n\t\t\tconsole.error(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.red.bold('Configuration Update Required!') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white('Taskmaster now uses a ') +\n\t\t\t\t\t\tchalk.yellow.bold('configuration file') +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t' in your project for AI model choices and settings.\\n\\n' +\n\t\t\t\t\t\t\t\t'This file appears to be '\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.red.bold('missing') +\n\t\t\t\t\t\tchalk.white('. No worries though.\\n\\n') +\n\t\t\t\t\t\tchalk.cyan.bold('To create this file, run the interactive setup:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.green(' task-master models --setup') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white.bold('Key Points:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white('* ') +\n\t\t\t\t\t\tchalk.yellow.bold('Configuration file') +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t': Stores your AI model settings (do not manually edit)\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white('* ') +\n\t\t\t\t\t\tchalk.yellow.bold('.env & .mcp.json') +\n\t\t\t\t\t\tchalk.white(': Still used ') +\n\t\t\t\t\t\tchalk.red.bold('only') +\n\t\t\t\t\t\tchalk.white(' for your AI provider API keys.\\n\\n') +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t'`task-master models` to check your config & available models\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t'`task-master models --setup` to adjust the AI models used by Taskmaster'\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tmargin: { top: 1 },\n\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\t// Generic error handling for other errors\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t}\n\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Resolve the final complexity-report path.\n * Rules:\n * 1. If caller passes --output, always respect it.\n * 2. If no explicit output AND tag === 'master' → default report file\n * 3. If no explicit output AND tag !== 'master' → append _<tag>.json\n *\n * @param {string|undefined} outputOpt --output value from CLI (may be undefined)\n * @param {string} targetTag resolved tag (defaults to 'master')\n * @param {string} projectRoot absolute project root\n * @returns {string} absolute path for the report\n */\nexport function resolveComplexityReportPath({\n\tprojectRoot,\n\ttag = 'master',\n\toutput // may be undefined\n}) {\n\t// 1. user knows best\n\tif (output) {\n\t\treturn path.isAbsolute(output) ? output : path.join(projectRoot, output);\n\t}\n\n\t// 2. default naming\n\tconst base = path.join(projectRoot, COMPLEXITY_REPORT_FILE);\n\treturn tag !== 'master' ? base.replace('.json', `_${tag}.json`) : base;\n}\n\nexport {\n\tregisterCommands,\n\tsetupCLI,\n\trunCLI,\n\tcheckForUpdate,\n\tcompareVersions,\n\tdisplayUpgradeNotification\n};\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/show-task.js", "/**\n * show-task.js\n * Direct function implementation for showing task details\n */\n\nimport {\n\tfindTaskById,\n\treadComplexityReport,\n\treadJSON\n} from '../../../../scripts/modules/utils.js';\nimport { findTasksPath } from '../utils/path-utils.js';\n\n/**\n * Direct function wrapper for getting task details.\n *\n * @param {Object} args - Command arguments.\n * @param {string} args.id - Task ID to show.\n * @param {string} [args.file] - Optional path to the tasks file (passed to findTasksPath).\n * @param {string} args.reportPath - Explicit path to the complexity report file.\n * @param {string} [args.status] - Optional status to filter subtasks by.\n * @param {string} args.projectRoot - Absolute path to the project root directory (already normalized by tool).\n * @param {string} [args.tag] - Tag for the task\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function showTaskDirect(args, log) {\n\t// This function doesn't need session context since it only reads data\n\t// Destructure projectRoot and other args. projectRoot is assumed normalized.\n\tconst { id, file, reportPath, status, projectRoot, tag } = args;\n\n\tlog.info(\n\t\t`Showing task direct function. ID: ${id}, File: ${file}, Status Filter: ${status}, ProjectRoot: ${projectRoot}`\n\t);\n\n\t// --- Path Resolution using the passed (already normalized) projectRoot ---\n\tlet tasksJsonPath;\n\ttry {\n\t\t// Use the projectRoot passed directly from args\n\t\ttasksJsonPath = findTasksPath(\n\t\t\t{ projectRoot: projectRoot, file: file },\n\t\t\tlog\n\t\t);\n\t\tlog.info(`Resolved tasks path: ${tasksJsonPath}`);\n\t} catch (error) {\n\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'TASKS_FILE_NOT_FOUND',\n\t\t\t\tmessage: `Failed to find tasks.json: ${error.message}`\n\t\t\t}\n\t\t};\n\t}\n\t// --- End Path Resolution ---\n\n\t// --- Rest of the function remains the same, using tasksJsonPath ---\n\ttry {\n\t\tconst tasksData = readJSON(tasksJsonPath, projectRoot, tag);\n\t\tif (!tasksData || !tasksData.tasks) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'INVALID_TASKS_DATA', message: 'Invalid tasks data' }\n\t\t\t};\n\t\t}\n\n\t\tconst complexityReport = readComplexityReport(reportPath);\n\n\t\t// Parse comma-separated IDs\n\t\tconst taskIds = id\n\t\t\t.split(',')\n\t\t\t.map((taskId) => taskId.trim())\n\t\t\t.filter((taskId) => taskId.length > 0);\n\n\t\tif (taskIds.length === 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_TASK_ID',\n\t\t\t\t\tmessage: 'No valid task IDs provided'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Handle single task ID (existing behavior)\n\t\tif (taskIds.length === 1) {\n\t\t\tconst { task, originalSubtaskCount } = findTaskById(\n\t\t\t\ttasksData.tasks,\n\t\t\t\ttaskIds[0],\n\t\t\t\tcomplexityReport,\n\t\t\t\tstatus\n\t\t\t);\n\n\t\t\tif (!task) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'TASK_NOT_FOUND',\n\t\t\t\t\t\tmessage: `Task or subtask with ID ${taskIds[0]} not found`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tlog.info(`Successfully retrieved task ${taskIds[0]}.`);\n\n\t\t\tconst returnData = { ...task };\n\t\t\tif (originalSubtaskCount !== null) {\n\t\t\t\treturnData._originalSubtaskCount = originalSubtaskCount;\n\t\t\t\treturnData._subtaskFilter = status;\n\t\t\t}\n\n\t\t\treturn { success: true, data: returnData };\n\t\t}\n\n\t\t// Handle multiple task IDs\n\t\tconst foundTasks = [];\n\t\tconst notFoundIds = [];\n\n\t\ttaskIds.forEach((taskId) => {\n\t\t\tconst { task, originalSubtaskCount } = findTaskById(\n\t\t\t\ttasksData.tasks,\n\t\t\t\ttaskId,\n\t\t\t\tcomplexityReport,\n\t\t\t\tstatus\n\t\t\t);\n\n\t\t\tif (task) {\n\t\t\t\tconst taskData = { ...task };\n\t\t\t\tif (originalSubtaskCount !== null) {\n\t\t\t\t\ttaskData._originalSubtaskCount = originalSubtaskCount;\n\t\t\t\t\ttaskData._subtaskFilter = status;\n\t\t\t\t}\n\t\t\t\tfoundTasks.push(taskData);\n\t\t\t} else {\n\t\t\t\tnotFoundIds.push(taskId);\n\t\t\t}\n\t\t});\n\n\t\tlog.info(\n\t\t\t`Successfully retrieved ${foundTasks.length} of ${taskIds.length} requested tasks.`\n\t\t);\n\n\t\t// Return multiple tasks with metadata\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttasks: foundTasks,\n\t\t\t\trequestedIds: taskIds,\n\t\t\t\tfoundCount: foundTasks.length,\n\t\t\t\tnotFoundIds: notFoundIds,\n\t\t\t\tisMultiple: true\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\tlog.error(`Error showing task ${id}: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'TASK_OPERATION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/update-tasks.js", "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\nimport { z } from 'zod'; // Keep Zod for post-parsing validation\n\nimport {\n\tlog as consoleLog,\n\treadJSON,\n\twriteJSON,\n\ttruncate,\n\tisSilentMode\n} from '../utils.js';\n\nimport {\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getModelConfiguration } from './models.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';\n\n// Zod schema for validating the structure of tasks AFTER parsing\nconst updatedTaskSchema = z\n\t.object({\n\t\tid: z.number().int(),\n\t\ttitle: z.string(),\n\t\tdescription: z.string(),\n\t\tstatus: z.string(),\n\t\tdependencies: z.array(z.union([z.number().int(), z.string()])),\n\t\tpriority: z.string().nullable(),\n\t\tdetails: z.string().nullable(),\n\t\ttestStrategy: z.string().nullable(),\n\t\tsubtasks: z.array(z.any()).nullable() // Keep subtasks flexible for now\n\t})\n\t.strip(); // Allow potential extra fields during parsing if needed, then validate structure\n\n// Preprocessing schema that adds defaults before validation\nconst preprocessTaskSchema = z.preprocess((task) => {\n\t// Ensure task is an object\n\tif (typeof task !== 'object' || task === null) {\n\t\treturn {};\n\t}\n\n\t// Return task with defaults for missing fields\n\treturn {\n\t\t...task,\n\t\t// Add defaults for required fields if missing\n\t\tid: task.id ?? 0,\n\t\ttitle: task.title ?? 'Untitled Task',\n\t\tdescription: task.description ?? '',\n\t\tstatus: task.status ?? 'pending',\n\t\tdependencies: Array.isArray(task.dependencies) ? task.dependencies : [],\n\t\t// Optional fields - preserve undefined/null distinction\n\t\tpriority: task.hasOwnProperty('priority') ? task.priority : null,\n\t\tdetails: task.hasOwnProperty('details') ? task.details : null,\n\t\ttestStrategy: task.hasOwnProperty('testStrategy')\n\t\t\t? task.testStrategy\n\t\t\t: null,\n\t\tsubtasks: Array.isArray(task.subtasks)\n\t\t\t? task.subtasks\n\t\t\t: task.subtasks === null\n\t\t\t\t? null\n\t\t\t\t: []\n\t};\n}, updatedTaskSchema);\n\nconst updatedTaskArraySchema = z.array(updatedTaskSchema);\nconst preprocessedTaskArraySchema = z.array(preprocessTaskSchema);\n\n/**\n * Parses an array of task objects from AI's text response.\n * @param {string} text - Response text from AI.\n * @param {number} expectedCount - Expected number of tasks.\n * @param {Function | Object} logFn - The logging function or MCP log object.\n * @param {boolean} isMCP - Flag indicating if logFn is MCP logger.\n * @returns {Array} Parsed and validated tasks array.\n * @throws {Error} If parsing or validation fails.\n */\nfunction parseUpdatedTasksFromText(text, expectedCount, logFn, isMCP) {\n\tconst report = (level, ...args) => {\n\t\tif (isMCP) {\n\t\t\tif (typeof logFn[level] === 'function') logFn[level](...args);\n\t\t\telse logFn.info(...args);\n\t\t} else if (!isSilentMode()) {\n\t\t\t// Check silent mode for consoleLog\n\t\t\tconsoleLog(level, ...args);\n\t\t}\n\t};\n\n\treport(\n\t\t'info',\n\t\t'Attempting to parse updated tasks array from text response...'\n\t);\n\tif (!text || text.trim() === '')\n\t\tthrow new Error('AI response text is empty.');\n\n\tlet cleanedResponse = text.trim();\n\tconst originalResponseForDebug = cleanedResponse;\n\tlet parseMethodUsed = 'raw'; // Track which method worked\n\n\t// --- NEW Step 1: Try extracting between [] first ---\n\tconst firstBracketIndex = cleanedResponse.indexOf('[');\n\tconst lastBracketIndex = cleanedResponse.lastIndexOf(']');\n\tlet potentialJsonFromArray = null;\n\n\tif (firstBracketIndex !== -1 && lastBracketIndex > firstBracketIndex) {\n\t\tpotentialJsonFromArray = cleanedResponse.substring(\n\t\t\tfirstBracketIndex,\n\t\t\tlastBracketIndex + 1\n\t\t);\n\t\t// Basic check to ensure it's not just \"[]\" or malformed\n\t\tif (potentialJsonFromArray.length <= 2) {\n\t\t\tpotentialJsonFromArray = null; // Ignore empty array\n\t\t}\n\t}\n\n\t// If [] extraction yielded something, try parsing it immediately\n\tif (potentialJsonFromArray) {\n\t\ttry {\n\t\t\tconst testParse = JSON.parse(potentialJsonFromArray);\n\t\t\t// It worked! Use this as the primary cleaned response.\n\t\t\tcleanedResponse = potentialJsonFromArray;\n\t\t\tparseMethodUsed = 'brackets';\n\t\t} catch (e) {\n\t\t\treport(\n\t\t\t\t'info',\n\t\t\t\t'Content between [] looked promising but failed initial parse. Proceeding to other methods.'\n\t\t\t);\n\t\t\t// Reset cleanedResponse to original if bracket parsing failed\n\t\t\tcleanedResponse = originalResponseForDebug;\n\t\t}\n\t}\n\n\t// --- Step 2: If bracket parsing didn't work or wasn't applicable, try code block extraction ---\n\tif (parseMethodUsed === 'raw') {\n\t\t// Only look for ```json blocks now\n\t\tconst codeBlockMatch = cleanedResponse.match(\n\t\t\t/```json\\s*([\\s\\S]*?)\\s*```/i // Only match ```json\n\t\t);\n\t\tif (codeBlockMatch) {\n\t\t\tcleanedResponse = codeBlockMatch[1].trim();\n\t\t\tparseMethodUsed = 'codeblock';\n\t\t\treport('info', 'Extracted JSON content from JSON Markdown code block.');\n\t\t} else {\n\t\t\treport('info', 'No JSON code block found.');\n\t\t\t// --- Step 3: If code block failed, try stripping prefixes ---\n\t\t\tconst commonPrefixes = [\n\t\t\t\t'json\\n',\n\t\t\t\t'javascript\\n', // Keep checking common prefixes just in case\n\t\t\t\t'python\\n',\n\t\t\t\t'here are the updated tasks:',\n\t\t\t\t'here is the updated json:',\n\t\t\t\t'updated tasks:',\n\t\t\t\t'updated json:',\n\t\t\t\t'response:',\n\t\t\t\t'output:'\n\t\t\t];\n\t\t\tlet prefixFound = false;\n\t\t\tfor (const prefix of commonPrefixes) {\n\t\t\t\tif (cleanedResponse.toLowerCase().startsWith(prefix)) {\n\t\t\t\t\tcleanedResponse = cleanedResponse.substring(prefix.length).trim();\n\t\t\t\t\tparseMethodUsed = 'prefix';\n\t\t\t\t\treport('info', `Stripped prefix: \"${prefix.trim()}\"`);\n\t\t\t\t\tprefixFound = true;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (!prefixFound) {\n\t\t\t\treport(\n\t\t\t\t\t'warn',\n\t\t\t\t\t'Response does not appear to contain [], JSON code block, or known prefix. Attempting raw parse.'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t// --- Step 4: Attempt final parse ---\n\tlet parsedTasks;\n\ttry {\n\t\tparsedTasks = JSON.parse(cleanedResponse);\n\t} catch (parseError) {\n\t\treport('error', `Failed to parse JSON array: ${parseError.message}`);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Extraction method used: ${parseMethodUsed}` // Log which method failed\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Problematic JSON string (first 500 chars): ${cleanedResponse.substring(0, 500)}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Original Raw Response (first 500 chars): ${originalResponseForDebug.substring(0, 500)}`\n\t\t);\n\t\tthrow new Error(\n\t\t\t`Failed to parse JSON response array: ${parseError.message}`\n\t\t);\n\t}\n\n\t// --- Step 5 & 6: Validate Array structure and Zod schema ---\n\tif (!Array.isArray(parsedTasks)) {\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content is not an array. Type: ${typeof parsedTasks}`\n\t\t);\n\t\treport(\n\t\t\t'error',\n\t\t\t`Parsed content sample: ${JSON.stringify(parsedTasks).substring(0, 200)}`\n\t\t);\n\t\tthrow new Error('Parsed AI response is not a valid JSON array.');\n\t}\n\n\treport('info', `Successfully parsed ${parsedTasks.length} potential tasks.`);\n\tif (expectedCount && parsedTasks.length !== expectedCount) {\n\t\treport(\n\t\t\t'warn',\n\t\t\t`Expected ${expectedCount} tasks, but parsed ${parsedTasks.length}.`\n\t\t);\n\t}\n\n\t// Log missing fields for debugging before preprocessing\n\tlet hasWarnings = false;\n\tparsedTasks.forEach((task, index) => {\n\t\tconst missingFields = [];\n\t\tif (!task.hasOwnProperty('id')) missingFields.push('id');\n\t\tif (!task.hasOwnProperty('status')) missingFields.push('status');\n\t\tif (!task.hasOwnProperty('dependencies'))\n\t\t\tmissingFields.push('dependencies');\n\n\t\tif (missingFields.length > 0) {\n\t\t\thasWarnings = true;\n\t\t\treport(\n\t\t\t\t'warn',\n\t\t\t\t`Task ${index} is missing fields: ${missingFields.join(', ')} - will use defaults`\n\t\t\t);\n\t\t}\n\t});\n\n\tif (hasWarnings) {\n\t\treport(\n\t\t\t'warn',\n\t\t\t'Some tasks were missing required fields. Applying defaults...'\n\t\t);\n\t}\n\n\t// Use the preprocessing schema to add defaults and validate\n\tconst preprocessResult = preprocessedTaskArraySchema.safeParse(parsedTasks);\n\n\tif (!preprocessResult.success) {\n\t\t// This should rarely happen now since preprocessing adds defaults\n\t\treport('error', 'Failed to validate task array even after preprocessing.');\n\t\tpreprocessResult.error.errors.forEach((err) => {\n\t\t\treport('error', ` - Path '${err.path.join('.')}': ${err.message}`);\n\t\t});\n\n\t\tthrow new Error(\n\t\t\t`AI response failed validation: ${preprocessResult.error.message}`\n\t\t);\n\t}\n\n\treport('info', 'Successfully validated and transformed task structure.');\n\treturn preprocessResult.data.slice(\n\t\t0,\n\t\texpectedCount || preprocessResult.data.length\n\t);\n}\n\n/**\n * Update tasks based on new context using the unified AI service.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} fromId - Task ID to start updating from\n * @param {string} prompt - Prompt with new context\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP server.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json').\n */\nasync function updateTasks(\n\ttasksPath,\n\tfromId,\n\tprompt,\n\tuseResearch = false,\n\tcontext = {},\n\toutputFormat = 'text' // Default to text for CLI\n) {\n\tconst { session, mcpLog, projectRoot: providedProjectRoot, tag } = context;\n\t// Use mcpLog if available, otherwise use the imported consoleLog function\n\tconst logFn = mcpLog || consoleLog;\n\t// Flag to easily check which logger type we have\n\tconst isMCP = !!mcpLog;\n\n\tif (isMCP)\n\t\tlogFn.info(`updateTasks called with context: session=${!!session}`);\n\telse logFn('info', `updateTasks called`); // CLI log\n\n\ttry {\n\t\tif (isMCP) logFn.info(`Updating tasks from ID ${fromId}`);\n\t\telse\n\t\t\tlogFn(\n\t\t\t\t'info',\n\t\t\t\t`Updating tasks from ID ${fromId} with prompt: \"${prompt}\"`\n\t\t\t);\n\n\t\t// Determine project root\n\t\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\t\tif (!projectRoot) {\n\t\t\tthrow new Error('Could not determine project root directory');\n\t\t}\n\n\t\t// --- Task Loading/Filtering (Updated to pass projectRoot and tag) ---\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks)\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}`);\n\t\tconst tasksToUpdate = data.tasks.filter(\n\t\t\t(task) => task.id >= fromId && task.status !== 'done'\n\t\t);\n\t\tif (tasksToUpdate.length === 0) {\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(`No tasks to update (ID >= ${fromId} and not 'done').`);\n\t\t\telse\n\t\t\t\tlogFn('info', `No tasks to update (ID >= ${fromId} and not 'done').`);\n\t\t\tif (outputFormat === 'text') console.log(/* yellow message */);\n\t\t\treturn; // Nothing to do\n\t\t}\n\t\t// --- End Task Loading/Filtering ---\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'update');\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(prompt, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst tasksToUpdateIds = tasksToUpdate.map((t) => t.id.toString());\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([...tasksToUpdateIds, ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\tlogFn(\n\t\t\t\t'warn',\n\t\t\t\t`Could not gather additional context: ${contextError.message}`\n\t\t\t);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\t// --- Display Tasks to Update (CLI Only - Unchanged) ---\n\t\tif (outputFormat === 'text') {\n\t\t\t// Show the tasks that will be updated\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Status')\n\t\t\t\t],\n\t\t\t\tcolWidths: [5, 70, 20]\n\t\t\t});\n\n\t\t\ttasksToUpdate.forEach((task) => {\n\t\t\t\ttable.push([\n\t\t\t\t\ttask.id,\n\t\t\t\t\ttruncate(task.title, 57),\n\t\t\t\t\tgetStatusWithColor(task.status)\n\t\t\t\t]);\n\t\t\t});\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating ${tasksToUpdate.length} tasks`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 0 }\n\t\t\t\t})\n\t\t\t);\n\n\t\t\tconsole.log(table.toString());\n\n\t\t\t// Display a message about how completed subtasks are handled\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.cyan.bold('How Completed Subtasks Are Handled:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• Subtasks marked as \"done\" or \"completed\" will be preserved\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• New subtasks will build upon what has already been completed\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• If completed work needs revision, a new subtask will be created instead of modifying done items\\n'\n\t\t\t\t\t\t) +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t'• This approach maintains a clear record of completed work and new requirements'\n\t\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\t\t// --- End Display Tasks ---\n\n\t\t// --- Build Prompts (Using PromptManager) ---\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t'update-tasks',\n\t\t\t{\n\t\t\t\ttasks: tasksToUpdate,\n\t\t\t\tupdatePrompt: prompt,\n\t\t\t\tuseResearch,\n\t\t\t\tprojectContext: gatheredContext\n\t\t\t}\n\t\t);\n\t\t// --- End Build Prompts ---\n\n\t\t// --- AI Call ---\n\t\tlet loadingIndicator = null;\n\t\tlet aiServiceResponse = null;\n\n\t\tif (!isMCP && outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator('Updating tasks with AI...\\n');\n\t\t}\n\n\t\ttry {\n\t\t\t// Determine role based on research flag\n\t\t\tconst serviceRole = useResearch ? 'research' : 'main';\n\n\t\t\t// Call the unified AI service\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\trole: serviceRole,\n\t\t\t\tsession: session,\n\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tcommandName: 'update-tasks',\n\t\t\t\toutputType: isMCP ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (loadingIndicator)\n\t\t\t\tstopLoadingIndicator(loadingIndicator, 'AI update complete.');\n\n\t\t\t// Use the mainResult (text) for parsing\n\t\t\tconst parsedUpdatedTasks = parseUpdatedTasksFromText(\n\t\t\t\taiServiceResponse.mainResult,\n\t\t\t\ttasksToUpdate.length,\n\t\t\t\tlogFn,\n\t\t\t\tisMCP\n\t\t\t);\n\n\t\t\t// --- Update Tasks Data (Updated writeJSON call) ---\n\t\t\tif (!Array.isArray(parsedUpdatedTasks)) {\n\t\t\t\t// Should be caught by parser, but extra check\n\t\t\t\tthrow new Error(\n\t\t\t\t\t'Parsed AI response for updated tasks was not an array.'\n\t\t\t\t);\n\t\t\t}\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(\n\t\t\t\t\t`Received ${parsedUpdatedTasks.length} updated tasks from AI.`\n\t\t\t\t);\n\t\t\telse\n\t\t\t\tlogFn(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Received ${parsedUpdatedTasks.length} updated tasks from AI.`\n\t\t\t\t);\n\t\t\t// Create a map for efficient lookup\n\t\t\tconst updatedTasksMap = new Map(\n\t\t\t\tparsedUpdatedTasks.map((task) => [task.id, task])\n\t\t\t);\n\n\t\t\tlet actualUpdateCount = 0;\n\t\t\tdata.tasks.forEach((task, index) => {\n\t\t\t\tif (updatedTasksMap.has(task.id)) {\n\t\t\t\t\t// Only update if the task was part of the set sent to AI\n\t\t\t\t\tconst updatedTask = updatedTasksMap.get(task.id);\n\t\t\t\t\t// Merge the updated task with the existing one to preserve fields like subtasks\n\t\t\t\t\tdata.tasks[index] = {\n\t\t\t\t\t\t...task, // Keep all existing fields\n\t\t\t\t\t\t...updatedTask, // Override with updated fields\n\t\t\t\t\t\t// Ensure subtasks field is preserved if not provided by AI\n\t\t\t\t\t\tsubtasks:\n\t\t\t\t\t\t\tupdatedTask.subtasks !== undefined\n\t\t\t\t\t\t\t\t? updatedTask.subtasks\n\t\t\t\t\t\t\t\t: task.subtasks\n\t\t\t\t\t};\n\t\t\t\t\tactualUpdateCount++;\n\t\t\t\t}\n\t\t\t});\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(\n\t\t\t\t\t`Applied updates to ${actualUpdateCount} tasks in the dataset.`\n\t\t\t\t);\n\t\t\telse\n\t\t\t\tlogFn(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Applied updates to ${actualUpdateCount} tasks in the dataset.`\n\t\t\t\t);\n\n\t\t\t// Fix: Pass projectRoot and currentTag to writeJSON\n\t\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t\tif (isMCP)\n\t\t\t\tlogFn.info(\n\t\t\t\t\t`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`\n\t\t\t\t);\n\t\t\telse\n\t\t\t\tlogFn(\n\t\t\t\t\t'success',\n\t\t\t\t\t`Successfully updated ${actualUpdateCount} tasks in ${tasksPath}`\n\t\t\t\t);\n\t\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\n\t\t\tif (outputFormat === 'text' && aiServiceResponse.telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tupdatedTasks: parsedUpdatedTasks,\n\t\t\t\ttelemetryData: aiServiceResponse.telemetryData,\n\t\t\t\ttagInfo: aiServiceResponse.tagInfo\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\tif (isMCP) logFn.error(`Error during AI service call: ${error.message}`);\n\t\t\telse logFn('error', `Error during AI service call: ${error.message}`);\n\t\t\tif (error.message.includes('API key')) {\n\t\t\t\tif (isMCP)\n\t\t\t\t\tlogFn.error(\n\t\t\t\t\t\t'Please ensure API keys are configured correctly in .env or mcp.json.'\n\t\t\t\t\t);\n\t\t\t\telse\n\t\t\t\t\tlogFn(\n\t\t\t\t\t\t'error',\n\t\t\t\t\t\t'Please ensure API keys are configured correctly in .env or mcp.json.'\n\t\t\t\t\t);\n\t\t\t}\n\t\t\tthrow error;\n\t\t} finally {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t}\n\t} catch (error) {\n\t\t// --- General Error Handling (Unchanged) ---\n\t\tif (isMCP) logFn.error(`Error updating tasks: ${error.message}`);\n\t\telse logFn('error', `Error updating tasks: ${error.message}`);\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t\tif (getDebugFlag(session)) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error; // Re-throw for MCP/programmatic callers\n\t\t}\n\t\t// --- End General Error Handling ---\n\t}\n}\n\nexport default updateTasks;\n"], ["/claude-task-master/scripts/modules/task-manager/expand-all-tasks.js", "import { log, readJSON, isSilentMode, findProjectRoot } from '../utils.js';\nimport {\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\nimport expandTask from './expand-task.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport { aggregateTelemetry } from '../utils.js';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\n\n/**\n * Expand all eligible pending or in-progress tasks using the expandTask function.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} [numSubtasks] - Optional: Target number of subtasks per task.\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {string} [additionalContext=''] - Optional additional context.\n * @param {boolean} [force=false] - Force expansion even if tasks already have subtasks.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @param {string} [context.complexityReportPath] - Path to the complexity report file\n * @param {string} [outputFormat='text'] - Output format ('text' or 'json'). MCP calls should use 'json'.\n * @returns {Promise<{success: boolean, expandedCount: number, failedCount: number, skippedCount: number, tasksToExpand: number, telemetryData: Array<Object>}>} - Result summary.\n */\nasync function expandAllTasks(\n\ttasksPath,\n\tnumSubtasks, // Keep this signature, expandTask handles defaults\n\tuseResearch = false,\n\tadditionalContext = '',\n\tforce = false, // Keep force here for the filter logic\n\tcontext = {},\n\toutputFormat = 'text' // Assume text default for CLI\n) {\n\tconst {\n\t\tsession,\n\t\tmcpLog,\n\t\tprojectRoot: providedProjectRoot,\n\t\ttag,\n\t\tcomplexityReportPath\n\t} = context;\n\tconst isMCPCall = !!mcpLog; // Determine if called from MCP\n\n\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\tif (!projectRoot) {\n\t\tthrow new Error('Could not determine project root directory');\n\t}\n\n\t// Use mcpLog if available, otherwise use the default console log wrapper respecting silent mode\n\tconst logger =\n\t\tmcpLog ||\n\t\t(outputFormat === 'json'\n\t\t\t? {\n\t\t\t\t\t// Basic logger for JSON output mode\n\t\t\t\t\tinfo: (msg) => {},\n\t\t\t\t\twarn: (msg) => {},\n\t\t\t\t\terror: (msg) => console.error(`ERROR: ${msg}`), // Still log errors\n\t\t\t\t\tdebug: (msg) => {}\n\t\t\t\t}\n\t\t\t: {\n\t\t\t\t\t// CLI logger respecting silent mode\n\t\t\t\t\tinfo: (msg) => !isSilentMode() && log('info', msg),\n\t\t\t\t\twarn: (msg) => !isSilentMode() && log('warn', msg),\n\t\t\t\t\terror: (msg) => !isSilentMode() && log('error', msg),\n\t\t\t\t\tdebug: (msg) =>\n\t\t\t\t\t\t!isSilentMode() && getDebugFlag(session) && log('debug', msg)\n\t\t\t\t});\n\n\tlet loadingIndicator = null;\n\tlet expandedCount = 0;\n\tlet failedCount = 0;\n\tlet tasksToExpandCount = 0;\n\tconst allTelemetryData = []; // Still collect individual data first\n\n\tif (!isMCPCall && outputFormat === 'text') {\n\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t'Analyzing tasks for expansion...'\n\t\t);\n\t}\n\n\ttry {\n\t\tlogger.info(`Reading tasks from ${tasksPath}`);\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`Invalid tasks data in ${tasksPath}`);\n\t\t}\n\n\t\t// --- Restore Original Filtering Logic ---\n\t\tconst tasksToExpand = data.tasks.filter(\n\t\t\t(task) =>\n\t\t\t\t(task.status === 'pending' || task.status === 'in-progress') && // Include 'in-progress'\n\t\t\t\t(!task.subtasks || task.subtasks.length === 0 || force) // Check subtasks/force here\n\t\t);\n\t\ttasksToExpandCount = tasksToExpand.length; // Get the count from the filtered array\n\t\tlogger.info(`Found ${tasksToExpandCount} tasks eligible for expansion.`);\n\t\t// --- End Restored Filtering Logic ---\n\n\t\tif (loadingIndicator) {\n\t\t\tstopLoadingIndicator(loadingIndicator, 'Analysis complete.');\n\t\t}\n\n\t\tif (tasksToExpandCount === 0) {\n\t\t\tlogger.info('No tasks eligible for expansion.');\n\t\t\t// --- Fix: Restore success: true and add message ---\n\t\t\treturn {\n\t\t\t\tsuccess: true, // Indicate overall success despite no action\n\t\t\t\texpandedCount: 0,\n\t\t\t\tfailedCount: 0,\n\t\t\t\tskippedCount: 0,\n\t\t\t\ttasksToExpand: 0,\n\t\t\t\ttelemetryData: allTelemetryData,\n\t\t\t\tmessage: 'No tasks eligible for expansion.'\n\t\t\t};\n\t\t\t// --- End Fix ---\n\t\t}\n\n\t\t// Iterate over the already filtered tasks\n\t\tfor (const task of tasksToExpand) {\n\t\t\t// Start indicator for individual task expansion in CLI mode\n\t\t\tlet taskIndicator = null;\n\t\t\tif (!isMCPCall && outputFormat === 'text') {\n\t\t\t\ttaskIndicator = startLoadingIndicator(`Expanding task ${task.id}...`);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Call the refactored expandTask function AND capture result\n\t\t\t\tconst result = await expandTask(\n\t\t\t\t\ttasksPath,\n\t\t\t\t\ttask.id,\n\t\t\t\t\tnumSubtasks,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\tadditionalContext,\n\t\t\t\t\t{\n\t\t\t\t\t\t...context,\n\t\t\t\t\t\tprojectRoot,\n\t\t\t\t\t\ttag: data.tag || tag,\n\t\t\t\t\t\tcomplexityReportPath\n\t\t\t\t\t}, // Pass the whole context object with projectRoot and resolved tag\n\t\t\t\t\tforce\n\t\t\t\t);\n\t\t\t\texpandedCount++;\n\n\t\t\t\t// Collect individual telemetry data\n\t\t\t\tif (result && result.telemetryData) {\n\t\t\t\t\tallTelemetryData.push(result.telemetryData);\n\t\t\t\t}\n\n\t\t\t\tif (taskIndicator) {\n\t\t\t\t\tstopLoadingIndicator(taskIndicator, `Task ${task.id} expanded.`);\n\t\t\t\t}\n\t\t\t\tlogger.info(`Successfully expanded task ${task.id}.`);\n\t\t\t} catch (error) {\n\t\t\t\tfailedCount++;\n\t\t\t\tif (taskIndicator) {\n\t\t\t\t\tstopLoadingIndicator(\n\t\t\t\t\t\ttaskIndicator,\n\t\t\t\t\t\t`Failed to expand task ${task.id}.`,\n\t\t\t\t\t\tfalse\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tlogger.error(`Failed to expand task ${task.id}: ${error.message}`);\n\t\t\t\t// Continue to the next task\n\t\t\t}\n\t\t}\n\n\t\t// --- AGGREGATION AND DISPLAY ---\n\t\tlogger.info(\n\t\t\t`Expansion complete: ${expandedCount} expanded, ${failedCount} failed.`\n\t\t);\n\n\t\t// Aggregate the collected telemetry data\n\t\tconst aggregatedTelemetryData = aggregateTelemetry(\n\t\t\tallTelemetryData,\n\t\t\t'expand-all-tasks'\n\t\t);\n\n\t\tif (outputFormat === 'text') {\n\t\t\tconst summaryContent =\n\t\t\t\t`${chalk.white.bold('Expansion Summary:')}\\n\\n` +\n\t\t\t\t`${chalk.cyan('-')} Attempted: ${chalk.bold(tasksToExpandCount)}\\n` +\n\t\t\t\t`${chalk.green('-')} Expanded: ${chalk.bold(expandedCount)}\\n` +\n\t\t\t\t// Skipped count is always 0 now due to pre-filtering\n\t\t\t\t`${chalk.gray('-')} Skipped: ${chalk.bold(0)}\\n` +\n\t\t\t\t`${chalk.red('-')} Failed: ${chalk.bold(failedCount)}`;\n\n\t\t\tconsole.log(\n\t\t\t\tboxen(summaryContent, {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tmargin: { top: 1 },\n\t\t\t\t\tborderColor: failedCount > 0 ? 'red' : 'green', // Red if failures, green otherwise\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\n\t\tif (outputFormat === 'text' && aggregatedTelemetryData) {\n\t\t\tdisplayAiUsageSummary(aggregatedTelemetryData, 'cli');\n\t\t}\n\n\t\t// Return summary including the AGGREGATED telemetry data\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\texpandedCount,\n\t\t\tfailedCount,\n\t\t\tskippedCount: 0,\n\t\t\ttasksToExpand: tasksToExpandCount,\n\t\t\ttelemetryData: aggregatedTelemetryData\n\t\t};\n\t} catch (error) {\n\t\tif (loadingIndicator)\n\t\t\tstopLoadingIndicator(loadingIndicator, 'Error.', false);\n\t\tlogger.error(`Error during expand all operation: ${error.message}`);\n\t\tif (!isMCPCall && getDebugFlag(session)) {\n\t\t\tconsole.error(error); // Log full stack in debug CLI mode\n\t\t}\n\t\t// Re-throw error for the caller to handle, the direct function will format it\n\t\tthrow error; // Let direct function wrapper handle formatting\n\t\t/* Original re-throw:\n\t\tthrow new Error(`Failed to expand all tasks: ${error.message}`);\n\t\t*/\n\t}\n}\n\nexport default expandAllTasks;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/parse-prd.js", "/**\n * parse-prd.js\n * Direct function implementation for parsing PRD documents\n */\n\nimport path from 'path';\nimport fs from 'fs';\nimport { parsePRD } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode,\n\tisSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\nimport { getDefaultNumTasks } from '../../../../scripts/modules/config-manager.js';\nimport { resolvePrdPath, resolveProjectPath } from '../utils/path-utils.js';\nimport { TASKMASTER_TASKS_FILE } from '../../../../src/constants/paths.js';\n\n/**\n * Direct function wrapper for parsing PRD documents and generating tasks.\n *\n * @param {Object} args - Command arguments containing projectRoot, input, output, numTasks options.\n * @param {string} args.input - Path to the input PRD file.\n * @param {string} args.output - Path to the output directory.\n * @param {string} args.numTasks - Number of tasks to generate.\n * @param {boolean} args.force - Whether to force parsing.\n * @param {boolean} args.append - Whether to append to the output file.\n * @param {boolean} args.research - Whether to use research mode.\n * @param {string} args.tag - Tag context for organizing tasks into separate task lists.\n * @param {Object} log - Logger object.\n * @param {Object} context - Context object containing session data.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function parsePRDDirect(args, log, context = {}) {\n\tconst { session } = context;\n\t// Extract projectRoot from args\n\tconst {\n\t\tinput: inputArg,\n\t\toutput: outputArg,\n\t\tnumTasks: numTasksArg,\n\t\tforce,\n\t\tappend,\n\t\tresearch,\n\t\tprojectRoot,\n\t\ttag\n\t} = args;\n\n\t// Create the standard logger wrapper\n\tconst logWrapper = createLogWrapper(log);\n\n\t// --- Input Validation and Path Resolution ---\n\tif (!projectRoot) {\n\t\tlogWrapper.error('parsePRDDirect requires a projectRoot argument.');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'projectRoot is required.'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Resolve input path using path utilities\n\tlet inputPath;\n\tif (inputArg) {\n\t\ttry {\n\t\t\tinputPath = resolvePrdPath({ input: inputArg, projectRoot }, session);\n\t\t} catch (error) {\n\t\t\tlogWrapper.error(`Error resolving PRD path: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'FILE_NOT_FOUND', message: error.message }\n\t\t\t};\n\t\t}\n\t} else {\n\t\tlogWrapper.error('parsePRDDirect called without input path');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: { code: 'MISSING_ARGUMENT', message: 'Input path is required' }\n\t\t};\n\t}\n\n\t// Resolve output path - use new path utilities for default\n\tconst outputPath = outputArg\n\t\t? path.isAbsolute(outputArg)\n\t\t\t? outputArg\n\t\t\t: path.resolve(projectRoot, outputArg)\n\t\t: resolveProjectPath(TASKMASTER_TASKS_FILE, args) ||\n\t\t\tpath.resolve(projectRoot, TASKMASTER_TASKS_FILE);\n\n\t// Check if input file exists\n\tif (!fs.existsSync(inputPath)) {\n\t\tconst errorMsg = `Input PRD file not found at resolved path: ${inputPath}`;\n\t\tlogWrapper.error(errorMsg);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: { code: 'FILE_NOT_FOUND', message: errorMsg }\n\t\t};\n\t}\n\n\tconst outputDir = path.dirname(outputPath);\n\ttry {\n\t\tif (!fs.existsSync(outputDir)) {\n\t\t\tlogWrapper.info(`Creating output directory: ${outputDir}`);\n\t\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t\t}\n\t} catch (error) {\n\t\tconst errorMsg = `Failed to create output directory ${outputDir}: ${error.message}`;\n\t\tlogWrapper.error(errorMsg);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: { code: 'DIRECTORY_CREATE_FAILED', message: errorMsg }\n\t\t};\n\t}\n\n\tlet numTasks = getDefaultNumTasks(projectRoot);\n\tif (numTasksArg) {\n\t\tnumTasks =\n\t\t\ttypeof numTasksArg === 'string' ? parseInt(numTasksArg, 10) : numTasksArg;\n\t\tif (Number.isNaN(numTasks) || numTasks < 0) {\n\t\t\t// Ensure positive number\n\t\t\tnumTasks = getDefaultNumTasks(projectRoot); // Fallback to default if parsing fails or invalid\n\t\t\tlogWrapper.warn(\n\t\t\t\t`Invalid numTasks value: ${numTasksArg}. Using default: ${numTasks}`\n\t\t\t);\n\t\t}\n\t}\n\n\tif (append) {\n\t\tlogWrapper.info('Append mode enabled.');\n\t\tif (force) {\n\t\t\tlogWrapper.warn(\n\t\t\t\t'Both --force and --append flags were provided. --force takes precedence; append mode will be ignored.'\n\t\t\t);\n\t\t}\n\t}\n\n\tif (research) {\n\t\tlogWrapper.info(\n\t\t\t'Research mode enabled. Using Perplexity AI for enhanced PRD analysis.'\n\t\t);\n\t}\n\n\tlogWrapper.info(\n\t\t`Parsing PRD via direct function. Input: ${inputPath}, Output: ${outputPath}, NumTasks: ${numTasks}, Force: ${force}, Append: ${append}, Research: ${research}, ProjectRoot: ${projectRoot}`\n\t);\n\n\tconst wasSilent = isSilentMode();\n\tif (!wasSilent) {\n\t\tenableSilentMode();\n\t}\n\n\ttry {\n\t\t// Call the core parsePRD function\n\t\tconst result = await parsePRD(\n\t\t\tinputPath,\n\t\t\toutputPath,\n\t\t\tnumTasks,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog: logWrapper,\n\t\t\t\tprojectRoot,\n\t\t\t\ttag,\n\t\t\t\tforce,\n\t\t\t\tappend,\n\t\t\t\tresearch,\n\t\t\t\tcommandName: 'parse-prd',\n\t\t\t\toutputType: 'mcp'\n\t\t\t},\n\t\t\t'json'\n\t\t);\n\n\t\t// Adjust check for the new return structure\n\t\tif (result && result.success) {\n\t\t\tconst successMsg = `Successfully parsed PRD and generated tasks in ${result.tasksPath}`;\n\t\t\tlogWrapper.success(successMsg);\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmessage: successMsg,\n\t\t\t\t\toutputPath: result.tasksPath,\n\t\t\t\t\ttelemetryData: result.telemetryData,\n\t\t\t\t\ttagInfo: result.tagInfo\n\t\t\t\t}\n\t\t\t};\n\t\t} else {\n\t\t\t// Handle case where core function didn't return expected success structure\n\t\t\tlogWrapper.error(\n\t\t\t\t'Core parsePRD function did not return a successful structure.'\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\t\tmessage:\n\t\t\t\t\t\tresult?.message ||\n\t\t\t\t\t\t'Core function failed to parse PRD or returned unexpected result.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tlogWrapper.error(`Error executing core parsePRD: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'PARSE_PRD_CORE_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error parsing PRD'\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tif (!wasSilent && isSilentMode()) {\n\t\t\tdisableSilentMode();\n\t\t}\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/expand-task.js", "import fs from 'fs';\nimport path from 'path';\nimport { z } from 'zod';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tisSilentMode,\n\tgetTagAwareFilePath\n} from '../utils.js';\n\nimport {\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { generateTextService } from '../ai-services-unified.js';\n\nimport { getDefaultSubtasks, getDebugFlag } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { flattenTasksWithSubtasks, findProjectRoot } from '../utils.js';\n\n// --- Zod Schemas (Keep from previous step) ---\nconst subtaskSchema = z\n\t.object({\n\t\tid: z\n\t\t\t.number()\n\t\t\t.int()\n\t\t\t.positive()\n\t\t\t.describe('Sequential subtask ID starting from 1'),\n\t\ttitle: z.string().min(5).describe('Clear, specific title for the subtask'),\n\t\tdescription: z\n\t\t\t.string()\n\t\t\t.min(10)\n\t\t\t.describe('Detailed description of the subtask'),\n\t\tdependencies: z\n\t\t\t.array(z.string())\n\t\t\t.describe(\n\t\t\t\t'Array of subtask dependencies within the same parent task. Use format [\"parentTaskId.1\", \"parentTaskId.2\"]. Subtasks can only depend on siblings, not external tasks.'\n\t\t\t),\n\t\tdetails: z.string().min(20).describe('Implementation details and guidance'),\n\t\tstatus: z\n\t\t\t.string()\n\t\t\t.describe(\n\t\t\t\t'The current status of the subtask (should be pending initially)'\n\t\t\t),\n\t\ttestStrategy: z\n\t\t\t.string()\n\t\t\t.nullable()\n\t\t\t.describe('Approach for testing this subtask')\n\t\t\t.default('')\n\t})\n\t.strict();\nconst subtaskArraySchema = z.array(subtaskSchema);\nconst subtaskWrapperSchema = z.object({\n\tsubtasks: subtaskArraySchema.describe('The array of generated subtasks.')\n});\n// --- End Zod Schemas ---\n\n/**\n * Parse subtasks from AI's text response. Includes basic cleanup.\n * @param {string} text - Response text from AI.\n * @param {number} startId - Starting subtask ID expected.\n * @param {number} expectedCount - Expected number of subtasks.\n * @param {number} parentTaskId - Parent task ID for context.\n * @param {Object} logger - Logging object (mcpLog or console log).\n * @returns {Array} Parsed and potentially corrected subtasks array.\n * @throws {Error} If parsing fails or JSON is invalid/malformed.\n */\nfunction parseSubtasksFromText(\n\ttext,\n\tstartId,\n\texpectedCount,\n\tparentTaskId,\n\tlogger\n) {\n\tif (typeof text !== 'string') {\n\t\tlogger.error(\n\t\t\t`AI response text is not a string. Received type: ${typeof text}, Value: ${text}`\n\t\t);\n\t\tthrow new Error('AI response text is not a string.');\n\t}\n\n\tif (!text || text.trim() === '') {\n\t\tthrow new Error('AI response text is empty after trimming.');\n\t}\n\n\tconst originalTrimmedResponse = text.trim(); // Store the original trimmed response\n\tlet jsonToParse = originalTrimmedResponse; // Initialize jsonToParse with it\n\n\tlogger.debug(\n\t\t`Original AI Response for parsing (full length: ${jsonToParse.length}): ${jsonToParse.substring(0, 1000)}...`\n\t);\n\n\t// --- Pre-emptive cleanup for known AI JSON issues ---\n\t// Fix for \"dependencies\": , or \"dependencies\":,\n\tif (jsonToParse.includes('\"dependencies\":')) {\n\t\tconst malformedPattern = /\"dependencies\":\\s*,/g;\n\t\tif (malformedPattern.test(jsonToParse)) {\n\t\t\tlogger.warn('Attempting to fix malformed \"dependencies\": , issue.');\n\t\t\tjsonToParse = jsonToParse.replace(\n\t\t\t\tmalformedPattern,\n\t\t\t\t'\"dependencies\": [],'\n\t\t\t);\n\t\t\tlogger.debug(\n\t\t\t\t`JSON after fixing \"dependencies\": ${jsonToParse.substring(0, 500)}...`\n\t\t\t);\n\t\t}\n\t}\n\t// --- End pre-emptive cleanup ---\n\n\tlet parsedObject;\n\tlet primaryParseAttemptFailed = false;\n\n\t// --- Attempt 1: Simple Parse (with optional Markdown cleanup) ---\n\tlogger.debug('Attempting simple parse...');\n\ttry {\n\t\t// Check for markdown code block\n\t\tconst codeBlockMatch = jsonToParse.match(/```(?:json)?\\s*([\\s\\S]*?)\\s*```/);\n\t\tlet contentToParseDirectly = jsonToParse;\n\t\tif (codeBlockMatch && codeBlockMatch[1]) {\n\t\t\tcontentToParseDirectly = codeBlockMatch[1].trim();\n\t\t\tlogger.debug('Simple parse: Extracted content from markdown code block.');\n\t\t} else {\n\t\t\tlogger.debug(\n\t\t\t\t'Simple parse: No markdown code block found, using trimmed original.'\n\t\t\t);\n\t\t}\n\n\t\tparsedObject = JSON.parse(contentToParseDirectly);\n\t\tlogger.debug('Simple parse successful!');\n\n\t\t// Quick check if it looks like our target object\n\t\tif (\n\t\t\t!parsedObject ||\n\t\t\ttypeof parsedObject !== 'object' ||\n\t\t\t!Array.isArray(parsedObject.subtasks)\n\t\t) {\n\t\t\tlogger.warn(\n\t\t\t\t'Simple parse succeeded, but result is not the expected {\"subtasks\": []} structure. Will proceed to advanced extraction.'\n\t\t\t);\n\t\t\tprimaryParseAttemptFailed = true;\n\t\t\tparsedObject = null; // Reset parsedObject so we enter the advanced logic\n\t\t}\n\t\t// If it IS the correct structure, we'll skip advanced extraction.\n\t} catch (e) {\n\t\tlogger.warn(\n\t\t\t`Simple parse failed: ${e.message}. Proceeding to advanced extraction logic.`\n\t\t);\n\t\tprimaryParseAttemptFailed = true;\n\t\t// jsonToParse is already originalTrimmedResponse if simple parse failed before modifying it for markdown\n\t}\n\n\t// --- Attempt 2: Advanced Extraction (if simple parse failed or produced wrong structure) ---\n\tif (primaryParseAttemptFailed || !parsedObject) {\n\t\t// Ensure we try advanced if simple parse gave wrong structure\n\t\tlogger.debug('Attempting advanced extraction logic...');\n\t\t// Reset jsonToParse to the original full trimmed response for advanced logic\n\t\tjsonToParse = originalTrimmedResponse;\n\n\t\t// (Insert the more complex extraction logic here - the one we worked on with:\n\t\t// - targetPattern = '{\"subtasks\":';\n\t\t// - careful brace counting for that targetPattern\n\t\t// - fallbacks to last '{' and '}' if targetPattern logic fails)\n\t\t// This was the logic from my previous message. Let's assume it's here.\n\t\t// This block should ultimately set `jsonToParse` to the best candidate string.\n\n\t\t// Example snippet of that advanced logic's start:\n\t\tconst targetPattern = '{\"subtasks\":';\n\t\tconst patternStartIndex = jsonToParse.indexOf(targetPattern);\n\n\t\tif (patternStartIndex !== -1) {\n\t\t\tconst openBraces = 0;\n\t\t\tconst firstBraceFound = false;\n\t\t\tconst extractedJsonBlock = '';\n\t\t\t// ... (loop for brace counting as before) ...\n\t\t\t// ... (if successful, jsonToParse = extractedJsonBlock) ...\n\t\t\t// ... (if that fails, fallbacks as before) ...\n\t\t} else {\n\t\t\t// ... (fallback to last '{' and '}' if targetPattern not found) ...\n\t\t}\n\t\t// End of advanced logic excerpt\n\n\t\tlogger.debug(\n\t\t\t`Advanced extraction: JSON string that will be parsed: ${jsonToParse.substring(0, 500)}...`\n\t\t);\n\t\ttry {\n\t\t\tparsedObject = JSON.parse(jsonToParse);\n\t\t\tlogger.debug('Advanced extraction parse successful!');\n\t\t} catch (parseError) {\n\t\t\tlogger.error(\n\t\t\t\t`Advanced extraction: Failed to parse JSON object: ${parseError.message}`\n\t\t\t);\n\t\t\tlogger.error(\n\t\t\t\t`Advanced extraction: Problematic JSON string for parse (first 500 chars): ${jsonToParse.substring(0, 500)}`\n\t\t\t);\n\t\t\tthrow new Error(\n\t\t\t\t// Re-throw a more specific error if advanced also fails\n\t\t\t\t`Failed to parse JSON response object after both simple and advanced attempts: ${parseError.message}`\n\t\t\t);\n\t\t}\n\t}\n\n\t// --- Validation (applies to successfully parsedObject from either attempt) ---\n\tif (\n\t\t!parsedObject ||\n\t\ttypeof parsedObject !== 'object' ||\n\t\t!Array.isArray(parsedObject.subtasks)\n\t) {\n\t\tlogger.error(\n\t\t\t`Final parsed content is not an object or missing 'subtasks' array. Content: ${JSON.stringify(parsedObject).substring(0, 200)}`\n\t\t);\n\t\tthrow new Error(\n\t\t\t'Parsed AI response is not a valid object containing a \"subtasks\" array after all attempts.'\n\t\t);\n\t}\n\tconst parsedSubtasks = parsedObject.subtasks;\n\n\tif (expectedCount && parsedSubtasks.length !== expectedCount) {\n\t\tlogger.warn(\n\t\t\t`Expected ${expectedCount} subtasks, but parsed ${parsedSubtasks.length}.`\n\t\t);\n\t}\n\n\tlet currentId = startId;\n\tconst validatedSubtasks = [];\n\tconst validationErrors = [];\n\n\tfor (const rawSubtask of parsedSubtasks) {\n\t\tconst correctedSubtask = {\n\t\t\t...rawSubtask,\n\t\t\tid: currentId,\n\t\t\tdependencies: Array.isArray(rawSubtask.dependencies)\n\t\t\t\t? rawSubtask.dependencies.filter(\n\t\t\t\t\t\t(dep) =>\n\t\t\t\t\t\t\ttypeof dep === 'string' && dep.startsWith(`${parentTaskId}.`)\n\t\t\t\t\t)\n\t\t\t\t: [],\n\t\t\tstatus: 'pending'\n\t\t};\n\n\t\tconst result = subtaskSchema.safeParse(correctedSubtask);\n\n\t\tif (result.success) {\n\t\t\tvalidatedSubtasks.push(result.data);\n\t\t} else {\n\t\t\tlogger.warn(\n\t\t\t\t`Subtask validation failed for raw data: ${JSON.stringify(rawSubtask).substring(0, 100)}...`\n\t\t\t);\n\t\t\tresult.error.errors.forEach((err) => {\n\t\t\t\tconst errorMessage = ` - Field '${err.path.join('.')}': ${err.message}`;\n\t\t\t\tlogger.warn(errorMessage);\n\t\t\t\tvalidationErrors.push(`Subtask ${currentId}: ${errorMessage}`);\n\t\t\t});\n\t\t}\n\t\tcurrentId++;\n\t}\n\n\tif (validationErrors.length > 0) {\n\t\tlogger.error(\n\t\t\t`Found ${validationErrors.length} validation errors in the generated subtasks.`\n\t\t);\n\t\tlogger.warn('Proceeding with only the successfully validated subtasks.');\n\t}\n\n\tif (validatedSubtasks.length === 0 && parsedSubtasks.length > 0) {\n\t\tthrow new Error(\n\t\t\t'AI response contained potential subtasks, but none passed validation.'\n\t\t);\n\t}\n\treturn validatedSubtasks.slice(0, expectedCount || validatedSubtasks.length);\n}\n\n/**\n * Expand a task into subtasks using the unified AI service (generateTextService).\n * Appends new subtasks by default. Replaces existing subtasks if force=true.\n * Integrates complexity report to determine subtask count and prompt if available,\n * unless numSubtasks is explicitly provided.\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number} taskId - Task ID to expand\n * @param {number | null | undefined} [numSubtasks] - Optional: Explicit target number of subtasks. If null/undefined, check complexity report or config default.\n * @param {boolean} [useResearch=false] - Whether to use the research AI role.\n * @param {string} [additionalContext=''] - Optional additional context.\n * @param {Object} context - Context object containing session and mcpLog.\n * @param {Object} [context.session] - Session object from MCP.\n * @param {Object} [context.mcpLog] - MCP logger object.\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @param {boolean} [force=false] - If true, replace existing subtasks; otherwise, append.\n * @returns {Promise<Object>} The updated parent task object with new subtasks.\n * @throws {Error} If task not found, AI service fails, or parsing fails.\n */\nasync function expandTask(\n\ttasksPath,\n\ttaskId,\n\tnumSubtasks,\n\tuseResearch = false,\n\tadditionalContext = '',\n\tcontext = {},\n\tforce = false\n) {\n\tconst {\n\t\tsession,\n\t\tmcpLog,\n\t\tprojectRoot: contextProjectRoot,\n\t\ttag,\n\t\tcomplexityReportPath\n\t} = context;\n\tconst outputFormat = mcpLog ? 'json' : 'text';\n\n\t// Determine projectRoot: Use from context if available, otherwise derive from tasksPath\n\tconst projectRoot = contextProjectRoot || findProjectRoot(tasksPath);\n\n\t// Use mcpLog if available, otherwise use the default console log wrapper\n\tconst logger = mcpLog || {\n\t\tinfo: (msg) => !isSilentMode() && log('info', msg),\n\t\twarn: (msg) => !isSilentMode() && log('warn', msg),\n\t\terror: (msg) => !isSilentMode() && log('error', msg),\n\t\tdebug: (msg) =>\n\t\t\t!isSilentMode() && getDebugFlag(session) && log('debug', msg) // Use getDebugFlag\n\t};\n\n\tif (mcpLog) {\n\t\tlogger.info(`expandTask called with context: session=${!!session}`);\n\t}\n\n\ttry {\n\t\t// --- Task Loading/Filtering (Unchanged) ---\n\t\tlogger.info(`Reading tasks from ${tasksPath}`);\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks)\n\t\t\tthrow new Error(`Invalid tasks data in ${tasksPath}`);\n\t\tconst taskIndex = data.tasks.findIndex(\n\t\t\t(t) => t.id === parseInt(taskId, 10)\n\t\t);\n\t\tif (taskIndex === -1) throw new Error(`Task ${taskId} not found`);\n\t\tconst task = data.tasks[taskIndex];\n\t\tlogger.info(\n\t\t\t`Expanding task ${taskId}: ${task.title}${useResearch ? ' with research' : ''}`\n\t\t);\n\t\t// --- End Task Loading/Filtering ---\n\n\t\t// --- Handle Force Flag: Clear existing subtasks if force=true ---\n\t\tif (force && Array.isArray(task.subtasks) && task.subtasks.length > 0) {\n\t\t\tlogger.info(\n\t\t\t\t`Force flag set. Clearing existing ${task.subtasks.length} subtasks for task ${taskId}.`\n\t\t\t);\n\t\t\ttask.subtasks = []; // Clear existing subtasks\n\t\t}\n\t\t// --- End Force Flag Handling ---\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\ttry {\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(data.tasks);\n\t\t\tconst fuzzySearch = new FuzzyTaskSearch(allTasksFlat, 'expand-task');\n\t\t\tconst searchQuery = `${task.title} ${task.description}`;\n\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\tmaxResults: 5,\n\t\t\t\tincludeSelf: true\n\t\t\t});\n\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\tconst finalTaskIds = [\n\t\t\t\t...new Set([taskId.toString(), ...relevantTaskIds])\n\t\t\t];\n\n\t\t\tif (finalTaskIds.length > 0) {\n\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\ttasks: finalTaskIds,\n\t\t\t\t\tformat: 'research'\n\t\t\t\t});\n\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t}\n\t\t} catch (contextError) {\n\t\t\tlogger.warn(`Could not gather context: ${contextError.message}`);\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\t// --- Complexity Report Integration ---\n\t\tlet finalSubtaskCount;\n\t\tlet complexityReasoningContext = '';\n\t\tlet taskAnalysis = null;\n\n\t\tlogger.info(\n\t\t\t`Looking for complexity report at: ${complexityReportPath}${tag !== 'master' ? ` (tag-specific for '${tag}')` : ''}`\n\t\t);\n\n\t\ttry {\n\t\t\tif (fs.existsSync(complexityReportPath)) {\n\t\t\t\tconst complexityReport = readJSON(complexityReportPath);\n\t\t\t\ttaskAnalysis = complexityReport?.complexityAnalysis?.find(\n\t\t\t\t\t(a) => a.taskId === task.id\n\t\t\t\t);\n\t\t\t\tif (taskAnalysis) {\n\t\t\t\t\tlogger.info(\n\t\t\t\t\t\t`Found complexity analysis for task ${task.id}: Score ${taskAnalysis.complexityScore}`\n\t\t\t\t\t);\n\t\t\t\t\tif (taskAnalysis.reasoning) {\n\t\t\t\t\t\tcomplexityReasoningContext = `\\nComplexity Analysis Reasoning: ${taskAnalysis.reasoning}`;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlogger.info(\n\t\t\t\t\t\t`No complexity analysis found for task ${task.id} in report.`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlogger.info(\n\t\t\t\t\t`Complexity report not found at ${complexityReportPath}. Skipping complexity check.`\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (reportError) {\n\t\t\tlogger.warn(\n\t\t\t\t`Could not read or parse complexity report: ${reportError.message}. Proceeding without it.`\n\t\t\t);\n\t\t}\n\n\t\t// Determine final subtask count\n\t\tconst explicitNumSubtasks = parseInt(numSubtasks, 10);\n\t\tif (!Number.isNaN(explicitNumSubtasks) && explicitNumSubtasks >= 0) {\n\t\t\tfinalSubtaskCount = explicitNumSubtasks;\n\t\t\tlogger.info(\n\t\t\t\t`Using explicitly provided subtask count: ${finalSubtaskCount}`\n\t\t\t);\n\t\t} else if (taskAnalysis?.recommendedSubtasks) {\n\t\t\tfinalSubtaskCount = parseInt(taskAnalysis.recommendedSubtasks, 10);\n\t\t\tlogger.info(\n\t\t\t\t`Using subtask count from complexity report: ${finalSubtaskCount}`\n\t\t\t);\n\t\t} else {\n\t\t\tfinalSubtaskCount = getDefaultSubtasks(session);\n\t\t\tlogger.info(`Using default number of subtasks: ${finalSubtaskCount}`);\n\t\t}\n\t\tif (Number.isNaN(finalSubtaskCount) || finalSubtaskCount < 0) {\n\t\t\tlogger.warn(\n\t\t\t\t`Invalid subtask count determined (${finalSubtaskCount}), defaulting to 3.`\n\t\t\t);\n\t\t\tfinalSubtaskCount = 3;\n\t\t}\n\n\t\t// Determine prompt content AND system prompt\n\t\tconst nextSubtaskId = (task.subtasks?.length || 0) + 1;\n\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\t// Combine all context sources into a single additionalContext parameter\n\t\tlet combinedAdditionalContext = '';\n\t\tif (additionalContext || complexityReasoningContext) {\n\t\t\tcombinedAdditionalContext =\n\t\t\t\t`\\n\\n${additionalContext}${complexityReasoningContext}`.trim();\n\t\t}\n\t\tif (gatheredContext) {\n\t\t\tcombinedAdditionalContext =\n\t\t\t\t`${combinedAdditionalContext}\\n\\n# Project Context\\n\\n${gatheredContext}`.trim();\n\t\t}\n\n\t\t// Ensure expansionPrompt is a string (handle both string and object formats)\n\t\tlet expansionPromptText = undefined;\n\t\tif (taskAnalysis?.expansionPrompt) {\n\t\t\tif (typeof taskAnalysis.expansionPrompt === 'string') {\n\t\t\t\texpansionPromptText = taskAnalysis.expansionPrompt;\n\t\t\t} else if (\n\t\t\t\ttypeof taskAnalysis.expansionPrompt === 'object' &&\n\t\t\t\ttaskAnalysis.expansionPrompt.text\n\t\t\t) {\n\t\t\t\texpansionPromptText = taskAnalysis.expansionPrompt.text;\n\t\t\t}\n\t\t}\n\n\t\t// Ensure gatheredContext is a string (handle both string and object formats)\n\t\tlet gatheredContextText = gatheredContext;\n\t\tif (typeof gatheredContext === 'object' && gatheredContext !== null) {\n\t\t\tif (gatheredContext.data) {\n\t\t\t\tgatheredContextText = gatheredContext.data;\n\t\t\t} else if (gatheredContext.text) {\n\t\t\t\tgatheredContextText = gatheredContext.text;\n\t\t\t} else {\n\t\t\t\tgatheredContextText = JSON.stringify(gatheredContext);\n\t\t\t}\n\t\t}\n\n\t\tconst promptParams = {\n\t\t\ttask: task,\n\t\t\tsubtaskCount: finalSubtaskCount,\n\t\t\tnextSubtaskId: nextSubtaskId,\n\t\t\tadditionalContext: additionalContext,\n\t\t\tcomplexityReasoningContext: complexityReasoningContext,\n\t\t\tgatheredContext: gatheredContextText || '',\n\t\t\tuseResearch: useResearch,\n\t\t\texpansionPrompt: expansionPromptText || undefined\n\t\t};\n\n\t\tlet variantKey = 'default';\n\t\tif (expansionPromptText) {\n\t\t\tvariantKey = 'complexity-report';\n\t\t\tlogger.info(\n\t\t\t\t`Using expansion prompt from complexity report for task ${task.id}.`\n\t\t\t);\n\t\t} else if (useResearch) {\n\t\t\tvariantKey = 'research';\n\t\t\tlogger.info(`Using research variant for task ${task.id}.`);\n\t\t} else {\n\t\t\tlogger.info(`Using standard prompt generation for task ${task.id}.`);\n\t\t}\n\n\t\tconst { systemPrompt, userPrompt: promptContent } =\n\t\t\tawait promptManager.loadPrompt('expand-task', promptParams, variantKey);\n\t\t// --- End Complexity Report / Prompt Logic ---\n\n\t\t// --- AI Subtask Generation using generateTextService ---\n\t\tlet generatedSubtasks = [];\n\t\tlet loadingIndicator = null;\n\t\tif (outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\t`Generating ${finalSubtaskCount || 'appropriate number of'} subtasks...\\n`\n\t\t\t);\n\t\t}\n\n\t\tlet responseText = '';\n\t\tlet aiServiceResponse = null;\n\n\t\ttry {\n\t\t\tconst role = useResearch ? 'research' : 'main';\n\n\t\t\t// Call generateTextService with the determined prompts and telemetry params\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\tprompt: promptContent,\n\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\trole,\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tcommandName: 'expand-task',\n\t\t\t\toutputType: outputFormat\n\t\t\t});\n\t\t\tresponseText = aiServiceResponse.mainResult;\n\n\t\t\t// Parse Subtasks\n\t\t\tgeneratedSubtasks = parseSubtasksFromText(\n\t\t\t\tresponseText,\n\t\t\t\tnextSubtaskId,\n\t\t\t\tfinalSubtaskCount,\n\t\t\t\ttask.id,\n\t\t\t\tlogger\n\t\t\t);\n\t\t\tlogger.info(\n\t\t\t\t`Successfully parsed ${generatedSubtasks.length} subtasks from AI response.`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\tlogger.error(\n\t\t\t\t`Error during AI call or parsing for task ${taskId}: ${error.message}`, // Added task ID context\n\t\t\t\t'error'\n\t\t\t);\n\t\t\t// Log raw response in debug mode if parsing failed\n\t\t\tif (\n\t\t\t\terror.message.includes('Failed to parse valid subtasks') &&\n\t\t\t\tgetDebugFlag(session)\n\t\t\t) {\n\t\t\t\tlogger.error(`Raw AI Response that failed parsing:\\n${responseText}`);\n\t\t\t}\n\t\t\tthrow error;\n\t\t} finally {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t}\n\n\t\t// --- Task Update & File Writing ---\n\t\t// Ensure task.subtasks is an array before appending\n\t\tif (!Array.isArray(task.subtasks)) {\n\t\t\ttask.subtasks = [];\n\t\t}\n\t\t// Append the newly generated and validated subtasks\n\t\ttask.subtasks.push(...generatedSubtasks);\n\t\t// --- End Change: Append instead of replace ---\n\n\t\tdata.tasks[taskIndex] = task; // Assign the modified task back\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\n\t\t// Display AI Usage Summary for CLI\n\t\tif (\n\t\t\toutputFormat === 'text' &&\n\t\t\taiServiceResponse &&\n\t\t\taiServiceResponse.telemetryData\n\t\t) {\n\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t}\n\n\t\t// Return the updated task object AND telemetry data\n\t\treturn {\n\t\t\ttask,\n\t\t\ttelemetryData: aiServiceResponse?.telemetryData,\n\t\t\ttagInfo: aiServiceResponse?.tagInfo\n\t\t};\n\t} catch (error) {\n\t\t// Catches errors from file reading, parsing, AI call etc.\n\t\tlogger.error(`Error expanding task ${taskId}: ${error.message}`, 'error');\n\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\tconsole.error(error); // Log full stack in debug CLI mode\n\t\t}\n\t\tthrow error; // Re-throw for the caller\n\t}\n}\n\nexport default expandTask;\n"], ["/claude-task-master/scripts/modules/utils/contextGatherer.js", "/**\n * contextGatherer.js\n * Comprehensive context gathering utility for Task Master AI operations\n * Supports task context, file context, project tree, and custom context\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport pkg from 'gpt-tokens';\nimport Fuse from 'fuse.js';\nimport {\n\treadJSON,\n\tfindTaskById,\n\ttruncate,\n\tflattenTasksWithSubtasks\n} from '../utils.js';\n\nconst { encode } = pkg;\n\n/**\n * Context Gatherer class for collecting and formatting context from various sources\n */\nexport class ContextGatherer {\n\tconstructor(projectRoot, tag) {\n\t\tthis.projectRoot = projectRoot;\n\t\tthis.tasksPath = path.join(\n\t\t\tprojectRoot,\n\t\t\t'.taskmaster',\n\t\t\t'tasks',\n\t\t\t'tasks.json'\n\t\t);\n\t\tthis.tag = tag;\n\t\tthis.allTasks = this._loadAllTasks();\n\t}\n\n\t_loadAllTasks() {\n\t\ttry {\n\t\t\tconst data = readJSON(this.tasksPath, this.projectRoot, this.tag);\n\t\t\tconst tasks = data?.tasks || [];\n\t\t\treturn tasks;\n\t\t} catch (error) {\n\t\t\tconsole.warn(\n\t\t\t\t`Warning: Could not load tasks for ContextGatherer: ${error.message}`\n\t\t\t);\n\t\t\treturn [];\n\t\t}\n\t}\n\n\t/**\n\t * Count tokens in a text string using gpt-tokens\n\t * @param {string} text - Text to count tokens for\n\t * @returns {number} Token count\n\t */\n\tcountTokens(text) {\n\t\tif (!text || typeof text !== 'string') {\n\t\t\treturn 0;\n\t\t}\n\t\ttry {\n\t\t\treturn encode(text).length;\n\t\t} catch (error) {\n\t\t\t// Fallback to rough character-based estimation if tokenizer fails\n\t\t\t// Rough estimate: ~4 characters per token for English text\n\t\t\treturn Math.ceil(text.length / 4);\n\t\t}\n\t}\n\n\t/**\n\t * Main method to gather context from multiple sources\n\t * @param {Object} options - Context gathering options\n\t * @param {Array<string>} [options.tasks] - Task/subtask IDs to include\n\t * @param {Array<string>} [options.files] - File paths to include\n\t * @param {string} [options.customContext] - Additional custom context\n\t * @param {boolean} [options.includeProjectTree] - Include project file tree\n\t * @param {string} [options.format] - Output format: 'research', 'chat', 'system-prompt'\n\t * @param {boolean} [options.includeTokenCounts] - Whether to include token breakdown\n\t * @param {string} [options.semanticQuery] - A query string for semantic task searching.\n\t * @param {number} [options.maxSemanticResults] - Max number of semantic results.\n\t * @param {Array<number>} [options.dependencyTasks] - Array of task IDs to build dependency graphs from.\n\t * @returns {Promise<Object>} Object with context string and analysis data\n\t */\n\tasync gather(options = {}) {\n\t\tconst {\n\t\t\ttasks = [],\n\t\t\tfiles = [],\n\t\t\tcustomContext = '',\n\t\t\tincludeProjectTree = false,\n\t\t\tformat = 'research',\n\t\t\tincludeTokenCounts = false,\n\t\t\tsemanticQuery,\n\t\t\tmaxSemanticResults = 10,\n\t\t\tdependencyTasks = []\n\t\t} = options;\n\n\t\tconst contextSections = [];\n\t\tconst finalTaskIds = new Set(tasks.map(String));\n\t\tlet analysisData = null;\n\t\tlet tokenBreakdown = null;\n\n\t\t// Initialize token breakdown if requested\n\t\tif (includeTokenCounts) {\n\t\t\ttokenBreakdown = {\n\t\t\t\ttotal: 0,\n\t\t\t\tcustomContext: null,\n\t\t\t\ttasks: [],\n\t\t\t\tfiles: [],\n\t\t\t\tprojectTree: null\n\t\t\t};\n\t\t}\n\n\t\t// Semantic Search\n\t\tif (semanticQuery && this.allTasks.length > 0) {\n\t\t\tconst semanticResults = this._performSemanticSearch(\n\t\t\t\tsemanticQuery,\n\t\t\t\tmaxSemanticResults\n\t\t\t);\n\n\t\t\t// Store the analysis data for UI display\n\t\t\tanalysisData = semanticResults.analysisData;\n\n\t\t\tsemanticResults.tasks.forEach((task) => {\n\t\t\t\tfinalTaskIds.add(String(task.id));\n\t\t\t});\n\t\t}\n\n\t\t// Dependency Graph Analysis\n\t\tif (dependencyTasks.length > 0) {\n\t\t\tconst dependencyResults = this._buildDependencyGraphs(dependencyTasks);\n\t\t\tdependencyResults.allRelatedTaskIds.forEach((id) =>\n\t\t\t\tfinalTaskIds.add(String(id))\n\t\t\t);\n\t\t\t// We can format and add dependencyResults.graphVisualization later if needed\n\t\t}\n\n\t\t// Add custom context first\n\t\tif (customContext && customContext.trim()) {\n\t\t\tconst formattedCustomContext = this._formatCustomContext(\n\t\t\t\tcustomContext,\n\t\t\t\tformat\n\t\t\t);\n\t\t\tcontextSections.push(formattedCustomContext);\n\n\t\t\t// Calculate tokens for custom context if requested\n\t\t\tif (includeTokenCounts) {\n\t\t\t\ttokenBreakdown.customContext = {\n\t\t\t\t\ttokens: this.countTokens(formattedCustomContext),\n\t\t\t\t\tcharacters: formattedCustomContext.length\n\t\t\t\t};\n\t\t\t\ttokenBreakdown.total += tokenBreakdown.customContext.tokens;\n\t\t\t}\n\t\t}\n\n\t\t// Gather context for the final list of tasks\n\t\tif (finalTaskIds.size > 0) {\n\t\t\tconst taskContextResult = await this._gatherTaskContext(\n\t\t\t\tArray.from(finalTaskIds),\n\t\t\t\tformat,\n\t\t\t\tincludeTokenCounts\n\t\t\t);\n\t\t\tif (taskContextResult.context) {\n\t\t\t\tcontextSections.push(taskContextResult.context);\n\n\t\t\t\t// Add task breakdown if token counting is enabled\n\t\t\t\tif (includeTokenCounts && taskContextResult.breakdown) {\n\t\t\t\t\ttokenBreakdown.tasks = taskContextResult.breakdown;\n\t\t\t\t\tconst taskTokens = taskContextResult.breakdown.reduce(\n\t\t\t\t\t\t(sum, task) => sum + task.tokens,\n\t\t\t\t\t\t0\n\t\t\t\t\t);\n\t\t\t\t\ttokenBreakdown.total += taskTokens;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Add file context\n\t\tif (files.length > 0) {\n\t\t\tconst fileContextResult = await this._gatherFileContext(\n\t\t\t\tfiles,\n\t\t\t\tformat,\n\t\t\t\tincludeTokenCounts\n\t\t\t);\n\t\t\tif (fileContextResult.context) {\n\t\t\t\tcontextSections.push(fileContextResult.context);\n\n\t\t\t\t// Add file breakdown if token counting is enabled\n\t\t\t\tif (includeTokenCounts && fileContextResult.breakdown) {\n\t\t\t\t\ttokenBreakdown.files = fileContextResult.breakdown;\n\t\t\t\t\tconst fileTokens = fileContextResult.breakdown.reduce(\n\t\t\t\t\t\t(sum, file) => sum + file.tokens,\n\t\t\t\t\t\t0\n\t\t\t\t\t);\n\t\t\t\t\ttokenBreakdown.total += fileTokens;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Add project tree context\n\t\tif (includeProjectTree) {\n\t\t\tconst treeContextResult = await this._gatherProjectTreeContext(\n\t\t\t\tformat,\n\t\t\t\tincludeTokenCounts\n\t\t\t);\n\t\t\tif (treeContextResult.context) {\n\t\t\t\tcontextSections.push(treeContextResult.context);\n\n\t\t\t\t// Add tree breakdown if token counting is enabled\n\t\t\t\tif (includeTokenCounts && treeContextResult.breakdown) {\n\t\t\t\t\ttokenBreakdown.projectTree = treeContextResult.breakdown;\n\t\t\t\t\ttokenBreakdown.total += treeContextResult.breakdown.tokens;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tconst finalContext = this._joinContextSections(contextSections, format);\n\n\t\tconst result = {\n\t\t\tcontext: finalContext,\n\t\t\tanalysisData: analysisData,\n\t\t\tcontextSections: contextSections.length,\n\t\t\tfinalTaskIds: Array.from(finalTaskIds)\n\t\t};\n\n\t\t// Only include tokenBreakdown if it was requested\n\t\tif (includeTokenCounts) {\n\t\t\tresult.tokenBreakdown = tokenBreakdown;\n\t\t}\n\n\t\treturn result;\n\t}\n\n\t_performSemanticSearch(query, maxResults) {\n\t\tconst searchableTasks = this.allTasks.map((task) => {\n\t\t\tconst dependencyTitles =\n\t\t\t\ttask.dependencies?.length > 0\n\t\t\t\t\t? task.dependencies\n\t\t\t\t\t\t\t.map((depId) => this.allTasks.find((t) => t.id === depId)?.title)\n\t\t\t\t\t\t\t.filter(Boolean)\n\t\t\t\t\t\t\t.join(' ')\n\t\t\t\t\t: '';\n\t\t\treturn { ...task, dependencyTitles };\n\t\t});\n\n\t\t// Use the exact same approach as add-task.js\n\t\tconst searchOptions = {\n\t\t\tincludeScore: true, // Return match scores\n\t\t\tthreshold: 0.4, // Lower threshold = stricter matching (range 0-1)\n\t\t\tkeys: [\n\t\t\t\t{ name: 'title', weight: 1.5 }, // Title is most important\n\t\t\t\t{ name: 'description', weight: 2 }, // Description is very important\n\t\t\t\t{ name: 'details', weight: 3 }, // Details is most important\n\t\t\t\t// Search dependencies to find tasks that depend on similar things\n\t\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t\t],\n\t\t\t// Sort matches by score (lower is better)\n\t\t\tshouldSort: true,\n\t\t\t// Allow searching in nested properties\n\t\t\tuseExtendedSearch: true,\n\t\t\t// Return up to 50 matches\n\t\t\tlimit: 50\n\t\t};\n\n\t\t// Create search index using Fuse.js\n\t\tconst fuse = new Fuse(searchableTasks, searchOptions);\n\n\t\t// Extract significant words and phrases from the prompt (like add-task.js does)\n\t\tconst promptWords = query\n\t\t\t.toLowerCase()\n\t\t\t.replace(/[^\\w\\s-]/g, ' ') // Replace non-alphanumeric chars with spaces\n\t\t\t.split(/\\s+/)\n\t\t\t.filter((word) => word.length > 3); // Words at least 4 chars\n\n\t\t// Use the user's prompt for fuzzy search\n\t\tconst fuzzyResults = fuse.search(query);\n\n\t\t// Also search for each significant word to catch different aspects\n\t\tconst wordResults = [];\n\t\tfor (const word of promptWords) {\n\t\t\tif (word.length > 5) {\n\t\t\t\t// Only use significant words\n\t\t\t\tconst results = fuse.search(word);\n\t\t\t\tif (results.length > 0) {\n\t\t\t\t\twordResults.push(...results);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Merge and deduplicate results\n\t\tconst mergedResults = [...fuzzyResults];\n\n\t\t// Add word results that aren't already in fuzzyResults\n\t\tfor (const wordResult of wordResults) {\n\t\t\tif (!mergedResults.some((r) => r.item.id === wordResult.item.id)) {\n\t\t\t\tmergedResults.push(wordResult);\n\t\t\t}\n\t\t}\n\n\t\t// Group search results by relevance\n\t\tconst highRelevance = mergedResults\n\t\t\t.filter((result) => result.score < 0.25)\n\t\t\t.map((result) => result.item);\n\n\t\tconst mediumRelevance = mergedResults\n\t\t\t.filter((result) => result.score >= 0.25 && result.score < 0.4)\n\t\t\t.map((result) => result.item);\n\n\t\t// Get recent tasks (newest first)\n\t\tconst recentTasks = [...this.allTasks]\n\t\t\t.sort((a, b) => b.id - a.id)\n\t\t\t.slice(0, 5);\n\n\t\t// Combine high relevance, medium relevance, and recent tasks\n\t\t// Prioritize high relevance first\n\t\tconst allRelevantTasks = [...highRelevance];\n\n\t\t// Add medium relevance if not already included\n\t\tfor (const task of mediumRelevance) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add recent tasks if not already included\n\t\tfor (const task of recentTasks) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Get top N results for context\n\t\tconst finalResults = allRelevantTasks.slice(0, maxResults);\n\t\treturn {\n\t\t\ttasks: finalResults,\n\t\t\tanalysisData: {\n\t\t\t\thighRelevance: highRelevance,\n\t\t\t\tmediumRelevance: mediumRelevance,\n\t\t\t\trecentTasks: recentTasks,\n\t\t\t\tallRelevantTasks: allRelevantTasks\n\t\t\t}\n\t\t};\n\t}\n\n\t_buildDependencyContext(taskIds) {\n\t\tconst { allRelatedTaskIds, graphs, depthMap } =\n\t\t\tthis._buildDependencyGraphs(taskIds);\n\t\tif (allRelatedTaskIds.size === 0) return '';\n\n\t\tconst dependentTasks = Array.from(allRelatedTaskIds)\n\t\t\t.map((id) => this.allTasks.find((t) => t.id === id))\n\t\t\t.filter(Boolean)\n\t\t\t.sort((a, b) => (depthMap.get(a.id) || 0) - (depthMap.get(b.id) || 0));\n\n\t\tconst uniqueDetailedTasks = dependentTasks.slice(0, 8);\n\n\t\tlet context = `\\nThis task relates to a dependency structure with ${dependentTasks.length} related tasks in the chain.`;\n\n\t\tconst directDeps = this.allTasks.filter((t) => taskIds.includes(t.id));\n\t\tif (directDeps.length > 0) {\n\t\t\tcontext += `\\n\\nDirect dependencies:\\n${directDeps\n\t\t\t\t.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)\n\t\t\t\t.join('\\n')}`;\n\t\t}\n\n\t\tconst indirectDeps = dependentTasks.filter((t) => !taskIds.includes(t.id));\n\t\tif (indirectDeps.length > 0) {\n\t\t\tcontext += `\\n\\nIndirect dependencies (dependencies of dependencies):\\n${indirectDeps\n\t\t\t\t.slice(0, 5)\n\t\t\t\t.map((t) => `- Task ${t.id}: ${t.title} - ${t.description}`)\n\t\t\t\t.join('\\n')}`;\n\t\t\tif (indirectDeps.length > 5)\n\t\t\t\tcontext += `\\n- ... and ${\n\t\t\t\t\tindirectDeps.length - 5\n\t\t\t\t} more indirect dependencies`;\n\t\t}\n\n\t\tcontext += `\\n\\nDetailed information about dependencies:`;\n\t\tfor (const depTask of uniqueDetailedTasks) {\n\t\t\tconst isDirect = taskIds.includes(depTask.id)\n\t\t\t\t? ' [DIRECT DEPENDENCY]'\n\t\t\t\t: '';\n\t\t\tcontext += `\\n\\n------ Task ${depTask.id}${isDirect}: ${depTask.title} ------\\n`;\n\t\t\tcontext += `Description: ${depTask.description}\\n`;\n\t\t\tif (depTask.dependencies?.length) {\n\t\t\t\tcontext += `Dependencies: ${depTask.dependencies.join(', ')}\\n`;\n\t\t\t}\n\t\t\tif (depTask.details) {\n\t\t\t\tcontext += `Implementation Details: ${truncate(\n\t\t\t\t\tdepTask.details,\n\t\t\t\t\t400\n\t\t\t\t)}\\n`;\n\t\t\t}\n\t\t}\n\n\t\tif (graphs.length > 0) {\n\t\t\tcontext += '\\n\\nDependency Chain Visualization:';\n\t\t\tcontext += graphs\n\t\t\t\t.map((graph) => this._formatDependencyChain(graph))\n\t\t\t\t.join('');\n\t\t}\n\n\t\treturn context;\n\t}\n\n\t_buildDependencyGraphs(taskIds) {\n\t\tconst visited = new Set();\n\t\tconst depthMap = new Map();\n\t\tconst graphs = [];\n\n\t\tfor (const id of taskIds) {\n\t\t\tconst graph = this._buildDependencyGraph(id, visited, depthMap);\n\t\t\tif (graph) graphs.push(graph);\n\t\t}\n\n\t\treturn { allRelatedTaskIds: visited, graphs, depthMap };\n\t}\n\n\t_buildDependencyGraph(taskId, visited, depthMap, depth = 0) {\n\t\tif (visited.has(taskId) || depth > 5) return null; // Limit recursion depth\n\t\tconst task = this.allTasks.find((t) => t.id === taskId);\n\t\tif (!task) return null;\n\n\t\tvisited.add(taskId);\n\t\tif (!depthMap.has(taskId) || depth < depthMap.get(taskId)) {\n\t\t\tdepthMap.set(taskId, depth);\n\t\t}\n\n\t\tconst dependencies =\n\t\t\ttask.dependencies\n\t\t\t\t?.map((depId) =>\n\t\t\t\t\tthis._buildDependencyGraph(depId, visited, depthMap, depth + 1)\n\t\t\t\t)\n\t\t\t\t.filter(Boolean) || [];\n\n\t\treturn { ...task, dependencies };\n\t}\n\n\t_formatDependencyChain(node, prefix = '', isLast = true, depth = 0) {\n\t\tif (depth > 3) return '';\n\t\tconst connector = isLast ? '└── ' : '├── ';\n\t\tlet result = `${prefix}${connector}Task ${node.id}: ${node.title}`;\n\t\tif (node.dependencies?.length) {\n\t\t\tconst childPrefix = prefix + (isLast ? ' ' : '│ ');\n\t\t\tresult += node.dependencies\n\t\t\t\t.map((dep, index) =>\n\t\t\t\t\tthis._formatDependencyChain(\n\t\t\t\t\t\tdep,\n\t\t\t\t\t\tchildPrefix,\n\t\t\t\t\t\tindex === node.dependencies.length - 1,\n\t\t\t\t\t\tdepth + 1\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\t.join('');\n\t\t}\n\t\treturn '\\n' + result;\n\t}\n\n\t/**\n\t * Parse task ID strings into structured format\n\t * Supports formats: \"15\", \"15.2\", \"16,17.1\"\n\t * @param {Array<string>} taskIds - Array of task ID strings\n\t * @returns {Array<Object>} Parsed task identifiers\n\t */\n\t_parseTaskIds(taskIds) {\n\t\tconst parsed = [];\n\n\t\tfor (const idStr of taskIds) {\n\t\t\tif (idStr.includes('.')) {\n\t\t\t\t// Subtask format: \"15.2\"\n\t\t\t\tconst [parentId, subtaskId] = idStr.split('.');\n\t\t\t\tparsed.push({\n\t\t\t\t\ttype: 'subtask',\n\t\t\t\t\tparentId: parseInt(parentId, 10),\n\t\t\t\t\tsubtaskId: parseInt(subtaskId, 10),\n\t\t\t\t\tfullId: idStr\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\t// Task format: \"15\"\n\t\t\t\tparsed.push({\n\t\t\t\t\ttype: 'task',\n\t\t\t\t\ttaskId: parseInt(idStr, 10),\n\t\t\t\t\tfullId: idStr\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\treturn parsed;\n\t}\n\n\t/**\n\t * Gather context from tasks and subtasks\n\t * @param {Array<string>} taskIds - Task/subtask IDs\n\t * @param {string} format - Output format\n\t * @param {boolean} includeTokenCounts - Whether to include token breakdown\n\t * @returns {Promise<Object>} Task context result with breakdown\n\t */\n\tasync _gatherTaskContext(taskIds, format, includeTokenCounts = false) {\n\t\ttry {\n\t\t\tif (!this.allTasks || this.allTasks.length === 0) {\n\t\t\t\treturn { context: null, breakdown: [] };\n\t\t\t}\n\n\t\t\tconst parsedIds = this._parseTaskIds(taskIds);\n\t\t\tconst contextItems = [];\n\t\t\tconst breakdown = [];\n\n\t\t\tfor (const parsed of parsedIds) {\n\t\t\t\tlet formattedItem = null;\n\t\t\t\tlet itemInfo = null;\n\n\t\t\t\tif (parsed.type === 'task') {\n\t\t\t\t\tconst result = findTaskById(this.allTasks, parsed.taskId);\n\t\t\t\t\tif (result.task) {\n\t\t\t\t\t\tformattedItem = this._formatTaskForContext(result.task, format);\n\t\t\t\t\t\titemInfo = {\n\t\t\t\t\t\t\tid: parsed.fullId,\n\t\t\t\t\t\t\ttype: 'task',\n\t\t\t\t\t\t\ttitle: result.task.title,\n\t\t\t\t\t\t\ttokens: includeTokenCounts ? this.countTokens(formattedItem) : 0,\n\t\t\t\t\t\t\tcharacters: formattedItem.length\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t} else if (parsed.type === 'subtask') {\n\t\t\t\t\tconst parentResult = findTaskById(this.allTasks, parsed.parentId);\n\t\t\t\t\tif (parentResult.task && parentResult.task.subtasks) {\n\t\t\t\t\t\tconst subtask = parentResult.task.subtasks.find(\n\t\t\t\t\t\t\t(st) => st.id === parsed.subtaskId\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (subtask) {\n\t\t\t\t\t\t\tformattedItem = this._formatSubtaskForContext(\n\t\t\t\t\t\t\t\tsubtask,\n\t\t\t\t\t\t\t\tparentResult.task,\n\t\t\t\t\t\t\t\tformat\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\titemInfo = {\n\t\t\t\t\t\t\t\tid: parsed.fullId,\n\t\t\t\t\t\t\t\ttype: 'subtask',\n\t\t\t\t\t\t\t\ttitle: subtask.title,\n\t\t\t\t\t\t\t\tparentTitle: parentResult.task.title,\n\t\t\t\t\t\t\t\ttokens: includeTokenCounts\n\t\t\t\t\t\t\t\t\t? this.countTokens(formattedItem)\n\t\t\t\t\t\t\t\t\t: 0,\n\t\t\t\t\t\t\t\tcharacters: formattedItem.length\n\t\t\t\t\t\t\t};\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (formattedItem && itemInfo) {\n\t\t\t\t\tcontextItems.push(formattedItem);\n\t\t\t\t\tif (includeTokenCounts) {\n\t\t\t\t\t\tbreakdown.push(itemInfo);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (contextItems.length === 0) {\n\t\t\t\treturn { context: null, breakdown: [] };\n\t\t\t}\n\n\t\t\tconst finalContext = this._formatTaskContextSection(contextItems, format);\n\t\t\treturn {\n\t\t\t\tcontext: finalContext,\n\t\t\t\tbreakdown: includeTokenCounts ? breakdown : []\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tconsole.warn(`Warning: Could not gather task context: ${error.message}`);\n\t\t\treturn { context: null, breakdown: [] };\n\t\t}\n\t}\n\n\t/**\n\t * Format a task for context inclusion\n\t * @param {Object} task - Task object\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted task context\n\t */\n\t_formatTaskForContext(task, format) {\n\t\tconst sections = [];\n\n\t\tsections.push(`**Task ${task.id}: ${task.title}**`);\n\t\tsections.push(`Description: ${task.description}`);\n\t\tsections.push(`Status: ${task.status || 'pending'}`);\n\t\tsections.push(`Priority: ${task.priority || 'medium'}`);\n\n\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\tsections.push(`Dependencies: ${task.dependencies.join(', ')}`);\n\t\t}\n\n\t\tif (task.details) {\n\t\t\tconst details = truncate(task.details, 500);\n\t\t\tsections.push(`Implementation Details: ${details}`);\n\t\t}\n\n\t\tif (task.testStrategy) {\n\t\t\tconst testStrategy = truncate(task.testStrategy, 300);\n\t\t\tsections.push(`Test Strategy: ${testStrategy}`);\n\t\t}\n\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tsections.push(`Subtasks: ${task.subtasks.length} subtasks defined`);\n\t\t}\n\n\t\treturn sections.join('\\n');\n\t}\n\n\t/**\n\t * Format a subtask for context inclusion\n\t * @param {Object} subtask - Subtask object\n\t * @param {Object} parentTask - Parent task object\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted subtask context\n\t */\n\t_formatSubtaskForContext(subtask, parentTask, format) {\n\t\tconst sections = [];\n\n\t\tsections.push(\n\t\t\t`**Subtask ${parentTask.id}.${subtask.id}: ${subtask.title}**`\n\t\t);\n\t\tsections.push(`Parent Task: ${parentTask.title}`);\n\t\tsections.push(`Description: ${subtask.description}`);\n\t\tsections.push(`Status: ${subtask.status || 'pending'}`);\n\n\t\tif (subtask.dependencies && subtask.dependencies.length > 0) {\n\t\t\tsections.push(`Dependencies: ${subtask.dependencies.join(', ')}`);\n\t\t}\n\n\t\tif (subtask.details) {\n\t\t\tconst details = truncate(subtask.details, 500);\n\t\t\tsections.push(`Implementation Details: ${details}`);\n\t\t}\n\n\t\treturn sections.join('\\n');\n\t}\n\n\t/**\n\t * Gather context from files\n\t * @param {Array<string>} filePaths - File paths to read\n\t * @param {string} format - Output format\n\t * @param {boolean} includeTokenCounts - Whether to include token breakdown\n\t * @returns {Promise<Object>} File context result with breakdown\n\t */\n\tasync _gatherFileContext(filePaths, format, includeTokenCounts = false) {\n\t\tconst fileContents = [];\n\t\tconst breakdown = [];\n\n\t\tfor (const filePath of filePaths) {\n\t\t\ttry {\n\t\t\t\tconst fullPath = path.isAbsolute(filePath)\n\t\t\t\t\t? filePath\n\t\t\t\t\t: path.join(this.projectRoot, filePath);\n\n\t\t\t\tif (!fs.existsSync(fullPath)) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst stats = fs.statSync(fullPath);\n\t\t\t\tif (!stats.isFile()) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Check file size (limit to 50KB for context)\n\t\t\t\tif (stats.size > 50 * 1024) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst content = fs.readFileSync(fullPath, 'utf-8');\n\t\t\t\tconst relativePath = path.relative(this.projectRoot, fullPath);\n\n\t\t\t\tconst fileData = {\n\t\t\t\t\tpath: relativePath,\n\t\t\t\t\tsize: stats.size,\n\t\t\t\t\tcontent: content,\n\t\t\t\t\tlastModified: stats.mtime\n\t\t\t\t};\n\n\t\t\t\tfileContents.push(fileData);\n\n\t\t\t\t// Calculate tokens for this individual file if requested\n\t\t\t\tif (includeTokenCounts) {\n\t\t\t\t\tconst formattedFile = this._formatSingleFileForContext(\n\t\t\t\t\t\tfileData,\n\t\t\t\t\t\tformat\n\t\t\t\t\t);\n\t\t\t\t\tbreakdown.push({\n\t\t\t\t\t\tpath: relativePath,\n\t\t\t\t\t\tsizeKB: Math.round(stats.size / 1024),\n\t\t\t\t\t\ttokens: this.countTokens(formattedFile),\n\t\t\t\t\t\tcharacters: formattedFile.length\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\t`Warning: Could not read file ${filePath}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\tif (fileContents.length === 0) {\n\t\t\treturn { context: null, breakdown: [] };\n\t\t}\n\n\t\tconst finalContext = this._formatFileContextSection(fileContents, format);\n\t\treturn {\n\t\t\tcontext: finalContext,\n\t\t\tbreakdown: includeTokenCounts ? breakdown : []\n\t\t};\n\t}\n\n\t/**\n\t * Generate project file tree context\n\t * @param {string} format - Output format\n\t * @param {boolean} includeTokenCounts - Whether to include token breakdown\n\t * @returns {Promise<Object>} Project tree context result with breakdown\n\t */\n\tasync _gatherProjectTreeContext(format, includeTokenCounts = false) {\n\t\ttry {\n\t\t\tconst tree = this._generateFileTree(this.projectRoot, 5); // Max depth 5\n\t\t\tconst finalContext = this._formatProjectTreeSection(tree, format);\n\n\t\t\tconst breakdown = includeTokenCounts\n\t\t\t\t? {\n\t\t\t\t\t\ttokens: this.countTokens(finalContext),\n\t\t\t\t\t\tcharacters: finalContext.length,\n\t\t\t\t\t\tfileCount: tree.fileCount || 0,\n\t\t\t\t\t\tdirCount: tree.dirCount || 0\n\t\t\t\t\t}\n\t\t\t\t: null;\n\n\t\t\treturn {\n\t\t\t\tcontext: finalContext,\n\t\t\t\tbreakdown: breakdown\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tconsole.warn(\n\t\t\t\t`Warning: Could not generate project tree: ${error.message}`\n\t\t\t);\n\t\t\treturn { context: null, breakdown: null };\n\t\t}\n\t}\n\n\t/**\n\t * Format a single file for context (used for token counting)\n\t * @param {Object} fileData - File data object\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted file context\n\t */\n\t_formatSingleFileForContext(fileData, format) {\n\t\tconst header = `**File: ${fileData.path}** (${Math.round(fileData.size / 1024)}KB)`;\n\t\tconst content = `\\`\\`\\`\\n${fileData.content}\\n\\`\\`\\``;\n\t\treturn `${header}\\n\\n${content}`;\n\t}\n\n\t/**\n\t * Generate file tree structure\n\t * @param {string} dirPath - Directory path\n\t * @param {number} maxDepth - Maximum depth to traverse\n\t * @param {number} currentDepth - Current depth\n\t * @returns {Object} File tree structure\n\t */\n\t_generateFileTree(dirPath, maxDepth, currentDepth = 0) {\n\t\tconst ignoreDirs = [\n\t\t\t'.git',\n\t\t\t'node_modules',\n\t\t\t'.env',\n\t\t\t'coverage',\n\t\t\t'dist',\n\t\t\t'build'\n\t\t];\n\t\tconst ignoreFiles = ['.DS_Store', '.env', '.env.local', '.env.production'];\n\n\t\tif (currentDepth >= maxDepth) {\n\t\t\treturn null;\n\t\t}\n\n\t\ttry {\n\t\t\tconst items = fs.readdirSync(dirPath);\n\t\t\tconst tree = {\n\t\t\t\tname: path.basename(dirPath),\n\t\t\t\ttype: 'directory',\n\t\t\t\tchildren: [],\n\t\t\t\tfileCount: 0,\n\t\t\t\tdirCount: 0\n\t\t\t};\n\n\t\t\tfor (const item of items) {\n\t\t\t\tif (ignoreDirs.includes(item) || ignoreFiles.includes(item)) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst itemPath = path.join(dirPath, item);\n\t\t\t\tconst stats = fs.statSync(itemPath);\n\n\t\t\t\tif (stats.isDirectory()) {\n\t\t\t\t\ttree.dirCount++;\n\t\t\t\t\tif (currentDepth < maxDepth - 1) {\n\t\t\t\t\t\tconst subtree = this._generateFileTree(\n\t\t\t\t\t\t\titemPath,\n\t\t\t\t\t\t\tmaxDepth,\n\t\t\t\t\t\t\tcurrentDepth + 1\n\t\t\t\t\t\t);\n\t\t\t\t\t\tif (subtree) {\n\t\t\t\t\t\t\ttree.children.push(subtree);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ttree.fileCount++;\n\t\t\t\t\ttree.children.push({\n\t\t\t\t\t\tname: item,\n\t\t\t\t\t\ttype: 'file',\n\t\t\t\t\t\tsize: stats.size\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn tree;\n\t\t} catch (error) {\n\t\t\treturn null;\n\t\t}\n\t}\n\n\t/**\n\t * Format custom context section\n\t * @param {string} customContext - Custom context string\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted custom context\n\t */\n\t_formatCustomContext(customContext, format) {\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## Additional Context\\n\\n${customContext}`;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**Additional Context:**\\n${customContext}`;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `Additional context: ${customContext}`;\n\t\t\tdefault:\n\t\t\t\treturn customContext;\n\t\t}\n\t}\n\n\t/**\n\t * Format task context section\n\t * @param {Array<string>} taskItems - Formatted task items\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted task context section\n\t */\n\t_formatTaskContextSection(taskItems, format) {\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## Task Context\\n\\n${taskItems.join('\\n\\n---\\n\\n')}`;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**Task Context:**\\n\\n${taskItems.join('\\n\\n')}`;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `Task context: ${taskItems.join(' | ')}`;\n\t\t\tdefault:\n\t\t\t\treturn taskItems.join('\\n\\n');\n\t\t}\n\t}\n\n\t/**\n\t * Format file context section\n\t * @param {Array<Object>} fileContents - File content objects\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted file context section\n\t */\n\t_formatFileContextSection(fileContents, format) {\n\t\tconst fileItems = fileContents.map((file) => {\n\t\t\tconst header = `**File: ${file.path}** (${Math.round(file.size / 1024)}KB)`;\n\t\t\tconst content = `\\`\\`\\`\\n${file.content}\\n\\`\\`\\``;\n\t\t\treturn `${header}\\n\\n${content}`;\n\t\t});\n\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## File Context\\n\\n${fileItems.join('\\n\\n---\\n\\n')}`;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**File Context:**\\n\\n${fileItems.join('\\n\\n')}`;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `File context: ${fileContents.map((f) => `${f.path} (${f.content.substring(0, 200)}...)`).join(' | ')}`;\n\t\t\tdefault:\n\t\t\t\treturn fileItems.join('\\n\\n');\n\t\t}\n\t}\n\n\t/**\n\t * Format project tree section\n\t * @param {Object} tree - File tree structure\n\t * @param {string} format - Output format\n\t * @returns {string} Formatted project tree section\n\t */\n\t_formatProjectTreeSection(tree, format) {\n\t\tconst treeString = this._renderFileTree(tree);\n\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn `## Project Structure\\n\\n\\`\\`\\`\\n${treeString}\\n\\`\\`\\``;\n\t\t\tcase 'chat':\n\t\t\t\treturn `**Project Structure:**\\n\\`\\`\\`\\n${treeString}\\n\\`\\`\\``;\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn `Project structure: ${treeString.replace(/\\n/g, ' | ')}`;\n\t\t\tdefault:\n\t\t\t\treturn treeString;\n\t\t}\n\t}\n\n\t/**\n\t * Render file tree as string\n\t * @param {Object} tree - File tree structure\n\t * @param {string} prefix - Current prefix for indentation\n\t * @returns {string} Rendered tree string\n\t */\n\t_renderFileTree(tree, prefix = '') {\n\t\tlet result = `${prefix}${tree.name}/`;\n\n\t\tif (tree.fileCount > 0 || tree.dirCount > 0) {\n\t\t\tresult += ` (${tree.fileCount} files, ${tree.dirCount} dirs)`;\n\t\t}\n\n\t\tresult += '\\n';\n\n\t\tif (tree.children) {\n\t\t\ttree.children.forEach((child, index) => {\n\t\t\t\tconst isLast = index === tree.children.length - 1;\n\t\t\t\tconst childPrefix = prefix + (isLast ? '└── ' : '├── ');\n\t\t\t\tconst nextPrefix = prefix + (isLast ? ' ' : '│ ');\n\n\t\t\t\tif (child.type === 'directory') {\n\t\t\t\t\tresult += this._renderFileTree(child, childPrefix);\n\t\t\t\t} else {\n\t\t\t\t\tresult += `${childPrefix}${child.name}\\n`;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\n\t\treturn result;\n\t}\n\n\t/**\n\t * Join context sections based on format\n\t * @param {Array<string>} sections - Context sections\n\t * @param {string} format - Output format\n\t * @returns {string} Joined context string\n\t */\n\t_joinContextSections(sections, format) {\n\t\tif (sections.length === 0) {\n\t\t\treturn '';\n\t\t}\n\n\t\tswitch (format) {\n\t\t\tcase 'research':\n\t\t\t\treturn sections.join('\\n\\n---\\n\\n');\n\t\t\tcase 'chat':\n\t\t\t\treturn sections.join('\\n\\n');\n\t\t\tcase 'system-prompt':\n\t\t\t\treturn sections.join(' ');\n\t\t\tdefault:\n\t\t\t\treturn sections.join('\\n\\n');\n\t\t}\n\t}\n}\n\n/**\n * Factory function to create a context gatherer instance\n * @param {string} projectRoot - Project root directory\n * @param {string} tag - Tag for the task\n * @returns {ContextGatherer} Context gatherer instance\n * @throws {Error} If tag is not provided\n */\nexport function createContextGatherer(projectRoot, tag) {\n\tif (!tag) {\n\t\tthrow new Error('Tag is required');\n\t}\n\treturn new ContextGatherer(projectRoot, tag);\n}\n\nexport default ContextGatherer;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/generate-task-files.js", "/**\n * generate-task-files.js\n * Direct function implementation for generating task files from tasks.json\n */\n\nimport { generateTaskFiles } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for generateTaskFiles with error handling.\n *\n * @param {Object} args - Command arguments containing tasksJsonPath and outputDir.\n * @param {string} args.tasksJsonPath - Path to the tasks.json file.\n * @param {string} args.outputDir - Path to the output directory.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object.\n * @returns {Promise<Object>} - Result object with success status and data/error information.\n */\nexport async function generateTaskFilesDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, outputDir, projectRoot, tag } = args;\n\ttry {\n\t\tlog.info(`Generating task files with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if paths were provided\n\t\tif (!tasksJsonPath) {\n\t\t\tconst errorMessage = 'tasksJsonPath is required but was not provided.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\t\tif (!outputDir) {\n\t\t\tconst errorMessage = 'outputDir is required but was not provided.';\n\t\t\tlog.error(errorMessage);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: errorMessage }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided paths\n\t\tconst tasksPath = tasksJsonPath;\n\t\tconst resolvedOutputDir = outputDir;\n\n\t\tlog.info(`Generating task files from ${tasksPath} to ${resolvedOutputDir}`);\n\n\t\t// Execute core generateTaskFiles function in a separate try/catch\n\t\ttry {\n\t\t\t// Enable silent mode to prevent logs from being written to stdout\n\t\t\tenableSilentMode();\n\n\t\t\t// Pass projectRoot and tag so the core respects context\n\t\t\tgenerateTaskFiles(tasksPath, resolvedOutputDir, {\n\t\t\t\tprojectRoot,\n\t\t\t\ttag,\n\t\t\t\tmcpLog: log\n\t\t\t});\n\n\t\t\t// Restore normal logging after task generation\n\t\t\tdisableSilentMode();\n\t\t} catch (genError) {\n\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Error in generateTaskFiles: ${genError.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'GENERATE_FILES_ERROR', message: genError.message }\n\t\t\t};\n\t\t}\n\n\t\t// Return success with file paths\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully generated task files`,\n\t\t\t\ttasksPath: tasksPath,\n\t\t\t\toutputDir: resolvedOutputDir,\n\t\t\t\ttaskFiles:\n\t\t\t\t\t'Individual task files have been generated in the output directory'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error generating task files: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'GENERATE_TASKS_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error generating task files'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/research.js", "/**\n * research.js\n * Core research functionality for AI-powered queries with project context\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport inquirer from 'inquirer';\nimport { highlight } from 'cli-highlight';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { generateTextService } from '../ai-services-unified.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport {\n\tlog as consoleLog,\n\tfindProjectRoot,\n\treadJSON,\n\tflattenTasksWithSubtasks\n} from '../utils.js';\nimport {\n\tdisplayAiUsageSummary,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator\n} from '../ui.js';\n\n/**\n * Perform AI-powered research with project context\n * @param {string} query - Research query/prompt\n * @param {Object} options - Research options\n * @param {Array<string>} [options.taskIds] - Task/subtask IDs for context\n * @param {Array<string>} [options.filePaths] - File paths for context\n * @param {string} [options.customContext] - Additional custom context\n * @param {boolean} [options.includeProjectTree] - Include project file tree\n * @param {string} [options.detailLevel] - Detail level: 'low', 'medium', 'high'\n * @param {string} [options.projectRoot] - Project root directory\n * @param {string} [options.tag] - Tag for the task\n * @param {boolean} [options.saveToFile] - Whether to save results to file (MCP mode)\n * @param {Object} [context] - Execution context\n * @param {Object} [context.session] - MCP session object\n * @param {Object} [context.mcpLog] - MCP logger object\n * @param {string} [context.commandName] - Command name for telemetry\n * @param {string} [context.outputType] - Output type ('cli' or 'mcp')\n * @param {string} [outputFormat] - Output format ('text' or 'json')\n * @param {boolean} [allowFollowUp] - Whether to allow follow-up questions (default: true)\n * @returns {Promise<Object>} Research results with telemetry data\n */\nasync function performResearch(\n\tquery,\n\toptions = {},\n\tcontext = {},\n\toutputFormat = 'text',\n\tallowFollowUp = true\n) {\n\tconst {\n\t\ttaskIds = [],\n\t\tfilePaths = [],\n\t\tcustomContext = '',\n\t\tincludeProjectTree = false,\n\t\tdetailLevel = 'medium',\n\t\tprojectRoot: providedProjectRoot,\n\t\ttag,\n\t\tsaveToFile = false\n\t} = options;\n\n\tconst {\n\t\tsession,\n\t\tmcpLog,\n\t\tcommandName = 'research',\n\t\toutputType = 'cli'\n\t} = context;\n\tconst isMCP = !!mcpLog;\n\n\t// Determine project root\n\tconst projectRoot = providedProjectRoot || findProjectRoot();\n\tif (!projectRoot) {\n\t\tthrow new Error('Could not determine project root directory');\n\t}\n\n\t// Create consistent logger\n\tconst logFn = isMCP\n\t\t? mcpLog\n\t\t: {\n\t\t\t\tinfo: (...args) => consoleLog('info', ...args),\n\t\t\t\twarn: (...args) => consoleLog('warn', ...args),\n\t\t\t\terror: (...args) => consoleLog('error', ...args),\n\t\t\t\tdebug: (...args) => consoleLog('debug', ...args),\n\t\t\t\tsuccess: (...args) => consoleLog('success', ...args)\n\t\t\t};\n\n\t// Show UI banner for CLI mode\n\tif (outputFormat === 'text') {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan.bold(`🔍 AI Research Query`), {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'cyan',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t})\n\t\t);\n\t}\n\n\ttry {\n\t\t// Initialize context gatherer\n\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\n\t\t// Auto-discover relevant tasks using fuzzy search to supplement provided tasks\n\t\tlet finalTaskIds = [...taskIds]; // Start with explicitly provided tasks\n\t\tlet autoDiscoveredIds = [];\n\n\t\ttry {\n\t\t\tconst tasksPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster',\n\t\t\t\t'tasks',\n\t\t\t\t'tasks.json'\n\t\t\t);\n\t\t\tconst tasksData = await readJSON(tasksPath, projectRoot, tag);\n\n\t\t\tif (tasksData && tasksData.tasks && tasksData.tasks.length > 0) {\n\t\t\t\t// Flatten tasks to include subtasks for fuzzy search\n\t\t\t\tconst flattenedTasks = flattenTasksWithSubtasks(tasksData.tasks);\n\t\t\t\tconst fuzzySearch = new FuzzyTaskSearch(flattenedTasks, 'research');\n\t\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(query, {\n\t\t\t\t\tmaxResults: 8,\n\t\t\t\t\tincludeRecent: true,\n\t\t\t\t\tincludeCategoryMatches: true\n\t\t\t\t});\n\n\t\t\t\tautoDiscoveredIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\t\t// Remove any auto-discovered tasks that were already explicitly provided\n\t\t\t\tconst uniqueAutoDiscovered = autoDiscoveredIds.filter(\n\t\t\t\t\t(id) => !finalTaskIds.includes(id)\n\t\t\t\t);\n\n\t\t\t\t// Add unique auto-discovered tasks to the final list\n\t\t\t\tfinalTaskIds = [...finalTaskIds, ...uniqueAutoDiscovered];\n\n\t\t\t\tif (outputFormat === 'text' && finalTaskIds.length > 0) {\n\t\t\t\t\t// Sort task IDs numerically for better display\n\t\t\t\t\tconst sortedTaskIds = finalTaskIds\n\t\t\t\t\t\t.map((id) => parseInt(id))\n\t\t\t\t\t\t.sort((a, b) => a - b)\n\t\t\t\t\t\t.map((id) => id.toString());\n\n\t\t\t\t\t// Show different messages based on whether tasks were explicitly provided\n\t\t\t\t\tif (taskIds.length > 0) {\n\t\t\t\t\t\tconst sortedProvidedIds = taskIds\n\t\t\t\t\t\t\t.map((id) => parseInt(id))\n\t\t\t\t\t\t\t.sort((a, b) => a - b)\n\t\t\t\t\t\t\t.map((id) => id.toString());\n\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.gray('Provided tasks: ') +\n\t\t\t\t\t\t\t\tchalk.cyan(sortedProvidedIds.join(', '))\n\t\t\t\t\t\t);\n\n\t\t\t\t\t\tif (uniqueAutoDiscovered.length > 0) {\n\t\t\t\t\t\t\tconst sortedAutoIds = uniqueAutoDiscovered\n\t\t\t\t\t\t\t\t.map((id) => parseInt(id))\n\t\t\t\t\t\t\t\t.sort((a, b) => a - b)\n\t\t\t\t\t\t\t\t.map((id) => id.toString());\n\n\t\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\t\tchalk.gray('+ Auto-discovered related tasks: ') +\n\t\t\t\t\t\t\t\t\tchalk.cyan(sortedAutoIds.join(', '))\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.gray('Auto-discovered relevant tasks: ') +\n\t\t\t\t\t\t\t\tchalk.cyan(sortedTaskIds.join(', '))\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Silently continue without auto-discovered tasks if there's an error\n\t\t\tlogFn.debug(`Could not auto-discover tasks: ${error.message}`);\n\t\t}\n\n\t\tconst contextResult = await contextGatherer.gather({\n\t\t\ttasks: finalTaskIds,\n\t\t\tfiles: filePaths,\n\t\t\tcustomContext,\n\t\t\tincludeProjectTree,\n\t\t\tformat: 'research', // Use research format for AI consumption\n\t\t\tincludeTokenCounts: true\n\t\t});\n\n\t\tconst gatheredContext = contextResult.context;\n\t\tconst tokenBreakdown = contextResult.tokenBreakdown;\n\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\tconst promptParams = {\n\t\t\tquery: query,\n\t\t\tgatheredContext: gatheredContext || '',\n\t\t\tdetailLevel: detailLevel,\n\t\t\tprojectInfo: {\n\t\t\t\troot: projectRoot,\n\t\t\t\ttaskCount: finalTaskIds.length,\n\t\t\t\tfileCount: filePaths.length\n\t\t\t}\n\t\t};\n\n\t\t// Load prompts - the research template handles detail level internally\n\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t'research',\n\t\t\tpromptParams\n\t\t);\n\n\t\t// Count tokens for system and user prompts\n\t\tconst systemPromptTokens = contextGatherer.countTokens(systemPrompt);\n\t\tconst userPromptTokens = contextGatherer.countTokens(userPrompt);\n\t\tconst totalInputTokens = systemPromptTokens + userPromptTokens;\n\n\t\tif (outputFormat === 'text') {\n\t\t\t// Display detailed token breakdown in a clean box\n\t\t\tdisplayDetailedTokenBreakdown(\n\t\t\t\ttokenBreakdown,\n\t\t\t\tsystemPromptTokens,\n\t\t\t\tuserPromptTokens\n\t\t\t);\n\t\t}\n\n\t\t// Only log detailed info in debug mode or MCP\n\t\tif (outputFormat !== 'text') {\n\t\t\tlogFn.info(\n\t\t\t\t`Calling AI service with research role, context size: ${tokenBreakdown.total} tokens (${gatheredContext.length} characters)`\n\t\t\t);\n\t\t}\n\n\t\t// Start loading indicator for CLI mode\n\t\tlet loadingIndicator = null;\n\t\tif (outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator('Researching with AI...\\n');\n\t\t}\n\n\t\tlet aiResult;\n\t\ttry {\n\t\t\t// Call AI service with research role\n\t\t\taiResult = await generateTextService({\n\t\t\t\trole: 'research', // Always use research role for research command\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tsystemPrompt,\n\t\t\t\tprompt: userPrompt,\n\t\t\t\tcommandName,\n\t\t\t\toutputType\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t}\n\t\t\tthrow error;\n\t\t} finally {\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t}\n\t\t}\n\n\t\tconst researchResult = aiResult.mainResult;\n\t\tconst telemetryData = aiResult.telemetryData;\n\t\tconst tagInfo = aiResult.tagInfo;\n\n\t\t// Format and display results\n\t\t// Initialize interactive save tracking\n\t\tlet interactiveSaveInfo = { interactiveSaveOccurred: false };\n\n\t\tif (outputFormat === 'text') {\n\t\t\tdisplayResearchResults(\n\t\t\t\tresearchResult,\n\t\t\t\tquery,\n\t\t\t\tdetailLevel,\n\t\t\t\ttokenBreakdown\n\t\t\t);\n\n\t\t\t// Display AI usage telemetry for CLI users\n\t\t\tif (telemetryData) {\n\t\t\t\tdisplayAiUsageSummary(telemetryData, 'cli');\n\t\t\t}\n\n\t\t\t// Offer follow-up question option (only for initial CLI queries, not MCP)\n\t\t\tif (allowFollowUp && !isMCP) {\n\t\t\t\tinteractiveSaveInfo = await handleFollowUpQuestions(\n\t\t\t\t\toptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\toutputFormat,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tlogFn,\n\t\t\t\t\tquery,\n\t\t\t\t\tresearchResult\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Handle MCP save-to-file request\n\t\tif (saveToFile && isMCP) {\n\t\t\tconst conversationHistory = [\n\t\t\t\t{\n\t\t\t\t\tquestion: query,\n\t\t\t\t\tanswer: researchResult,\n\t\t\t\t\ttype: 'initial',\n\t\t\t\t\ttimestamp: new Date().toISOString()\n\t\t\t\t}\n\t\t\t];\n\n\t\t\tconst savedFilePath = await handleSaveToFile(\n\t\t\t\tconversationHistory,\n\t\t\t\tprojectRoot,\n\t\t\t\tcontext,\n\t\t\t\tlogFn\n\t\t\t);\n\n\t\t\t// Add saved file path to return data\n\t\t\treturn {\n\t\t\t\tquery,\n\t\t\t\tresult: researchResult,\n\t\t\t\tcontextSize: gatheredContext.length,\n\t\t\t\tcontextTokens: tokenBreakdown.total,\n\t\t\t\ttokenBreakdown,\n\t\t\t\tsystemPromptTokens,\n\t\t\t\tuserPromptTokens,\n\t\t\t\ttotalInputTokens,\n\t\t\t\tdetailLevel,\n\t\t\t\ttelemetryData,\n\t\t\t\ttagInfo,\n\t\t\t\tsavedFilePath,\n\t\t\t\tinteractiveSaveOccurred: false // MCP save-to-file doesn't count as interactive save\n\t\t\t};\n\t\t}\n\n\t\tlogFn.success('Research query completed successfully');\n\n\t\treturn {\n\t\t\tquery,\n\t\t\tresult: researchResult,\n\t\t\tcontextSize: gatheredContext.length,\n\t\t\tcontextTokens: tokenBreakdown.total,\n\t\t\ttokenBreakdown,\n\t\t\tsystemPromptTokens,\n\t\t\tuserPromptTokens,\n\t\t\ttotalInputTokens,\n\t\t\tdetailLevel,\n\t\t\ttelemetryData,\n\t\t\ttagInfo,\n\t\t\tinteractiveSaveOccurred:\n\t\t\t\tinteractiveSaveInfo?.interactiveSaveOccurred || false\n\t\t};\n\t} catch (error) {\n\t\tlogFn.error(`Research query failed: ${error.message}`);\n\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`\\n❌ Research failed: ${error.message}`));\n\t\t}\n\n\t\tthrow error;\n\t}\n}\n\n/**\n * Display detailed token breakdown for context and prompts\n * @param {Object} tokenBreakdown - Token breakdown from context gatherer\n * @param {number} systemPromptTokens - System prompt token count\n * @param {number} userPromptTokens - User prompt token count\n */\nfunction displayDetailedTokenBreakdown(\n\ttokenBreakdown,\n\tsystemPromptTokens,\n\tuserPromptTokens\n) {\n\tconst parts = [];\n\n\t// Custom context\n\tif (tokenBreakdown.customContext) {\n\t\tparts.push(\n\t\t\tchalk.cyan('Custom: ') +\n\t\t\t\tchalk.yellow(tokenBreakdown.customContext.tokens.toLocaleString())\n\t\t);\n\t}\n\n\t// Tasks breakdown\n\tif (tokenBreakdown.tasks && tokenBreakdown.tasks.length > 0) {\n\t\tconst totalTaskTokens = tokenBreakdown.tasks.reduce(\n\t\t\t(sum, task) => sum + task.tokens,\n\t\t\t0\n\t\t);\n\t\tconst taskDetails = tokenBreakdown.tasks\n\t\t\t.map((task) => {\n\t\t\t\tconst titleDisplay =\n\t\t\t\t\ttask.title.length > 30\n\t\t\t\t\t\t? task.title.substring(0, 30) + '...'\n\t\t\t\t\t\t: task.title;\n\t\t\t\treturn ` ${chalk.gray(task.id)} ${chalk.white(titleDisplay)} ${chalk.yellow(task.tokens.toLocaleString())} tokens`;\n\t\t\t})\n\t\t\t.join('\\n');\n\n\t\tparts.push(\n\t\t\tchalk.cyan('Tasks: ') +\n\t\t\t\tchalk.yellow(totalTaskTokens.toLocaleString()) +\n\t\t\t\tchalk.gray(` (${tokenBreakdown.tasks.length} items)`) +\n\t\t\t\t'\\n' +\n\t\t\t\ttaskDetails\n\t\t);\n\t}\n\n\t// Files breakdown\n\tif (tokenBreakdown.files && tokenBreakdown.files.length > 0) {\n\t\tconst totalFileTokens = tokenBreakdown.files.reduce(\n\t\t\t(sum, file) => sum + file.tokens,\n\t\t\t0\n\t\t);\n\t\tconst fileDetails = tokenBreakdown.files\n\t\t\t.map((file) => {\n\t\t\t\tconst pathDisplay =\n\t\t\t\t\tfile.path.length > 40\n\t\t\t\t\t\t? '...' + file.path.substring(file.path.length - 37)\n\t\t\t\t\t\t: file.path;\n\t\t\t\treturn ` ${chalk.gray(pathDisplay)} ${chalk.yellow(file.tokens.toLocaleString())} tokens ${chalk.gray(`(${file.sizeKB}KB)`)}`;\n\t\t\t})\n\t\t\t.join('\\n');\n\n\t\tparts.push(\n\t\t\tchalk.cyan('Files: ') +\n\t\t\t\tchalk.yellow(totalFileTokens.toLocaleString()) +\n\t\t\t\tchalk.gray(` (${tokenBreakdown.files.length} files)`) +\n\t\t\t\t'\\n' +\n\t\t\t\tfileDetails\n\t\t);\n\t}\n\n\t// Project tree\n\tif (tokenBreakdown.projectTree) {\n\t\tparts.push(\n\t\t\tchalk.cyan('Project Tree: ') +\n\t\t\t\tchalk.yellow(tokenBreakdown.projectTree.tokens.toLocaleString()) +\n\t\t\t\tchalk.gray(\n\t\t\t\t\t` (${tokenBreakdown.projectTree.fileCount} files, ${tokenBreakdown.projectTree.dirCount} dirs)`\n\t\t\t\t)\n\t\t);\n\t}\n\n\t// Prompts breakdown\n\tconst totalPromptTokens = systemPromptTokens + userPromptTokens;\n\tconst promptDetails = [\n\t\t` ${chalk.gray('System:')} ${chalk.yellow(systemPromptTokens.toLocaleString())} tokens`,\n\t\t` ${chalk.gray('User:')} ${chalk.yellow(userPromptTokens.toLocaleString())} tokens`\n\t].join('\\n');\n\n\tparts.push(\n\t\tchalk.cyan('Prompts: ') +\n\t\t\tchalk.yellow(totalPromptTokens.toLocaleString()) +\n\t\t\tchalk.gray(' (generated)') +\n\t\t\t'\\n' +\n\t\t\tpromptDetails\n\t);\n\n\t// Display the breakdown in a clean box\n\tif (parts.length > 0) {\n\t\tconst content = parts.join('\\n\\n');\n\t\tconst tokenBox = boxen(content, {\n\t\t\ttitle: chalk.blue.bold('Context Analysis'),\n\t\t\ttitleAlignment: 'left',\n\t\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\t\tmargin: { top: 0, bottom: 1 },\n\t\t\tborderStyle: 'single',\n\t\t\tborderColor: 'blue'\n\t\t});\n\t\tconsole.log(tokenBox);\n\t}\n}\n\n/**\n * Process research result text to highlight code blocks\n * @param {string} text - Raw research result text\n * @returns {string} Processed text with highlighted code blocks\n */\nfunction processCodeBlocks(text) {\n\t// Regex to match code blocks with optional language specification\n\tconst codeBlockRegex = /```(\\w+)?\\n([\\s\\S]*?)```/g;\n\n\treturn text.replace(codeBlockRegex, (match, language, code) => {\n\t\ttry {\n\t\t\t// Default to javascript if no language specified\n\t\t\tconst lang = language || 'javascript';\n\n\t\t\t// Highlight the code using cli-highlight\n\t\t\tconst highlightedCode = highlight(code.trim(), {\n\t\t\t\tlanguage: lang,\n\t\t\t\tignoreIllegals: true // Don't fail on unrecognized syntax\n\t\t\t});\n\n\t\t\t// Add a subtle border around code blocks\n\t\t\tconst codeBox = boxen(highlightedCode, {\n\t\t\t\tpadding: { top: 0, bottom: 0, left: 1, right: 1 },\n\t\t\t\tmargin: { top: 0, bottom: 0 },\n\t\t\t\tborderStyle: 'single',\n\t\t\t\tborderColor: 'dim'\n\t\t\t});\n\n\t\t\treturn '\\n' + codeBox + '\\n';\n\t\t} catch (error) {\n\t\t\t// If highlighting fails, return the original code block with basic formatting\n\t\t\treturn (\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.gray('```' + (language || '')) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.white(code.trim()) +\n\t\t\t\t'\\n' +\n\t\t\t\tchalk.gray('```') +\n\t\t\t\t'\\n'\n\t\t\t);\n\t\t}\n\t});\n}\n\n/**\n * Display research results in formatted output\n * @param {string} result - AI research result\n * @param {string} query - Original query\n * @param {string} detailLevel - Detail level used\n * @param {Object} tokenBreakdown - Detailed token usage\n */\nfunction displayResearchResults(result, query, detailLevel, tokenBreakdown) {\n\t// Header with query info\n\tconst header = boxen(\n\t\tchalk.green.bold('Research Results') +\n\t\t\t'\\n\\n' +\n\t\t\tchalk.gray('Query: ') +\n\t\t\tchalk.white(query) +\n\t\t\t'\\n' +\n\t\t\tchalk.gray('Detail Level: ') +\n\t\t\tchalk.cyan(detailLevel),\n\t\t{\n\t\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\tborderStyle: 'round',\n\t\t\tborderColor: 'green'\n\t\t}\n\t);\n\tconsole.log(header);\n\n\t// Process the result to highlight code blocks\n\tconst processedResult = processCodeBlocks(result);\n\n\t// Main research content in a clean box\n\tconst contentBox = boxen(processedResult, {\n\t\tpadding: { top: 1, bottom: 1, left: 2, right: 2 },\n\t\tmargin: { top: 0, bottom: 1 },\n\t\tborderStyle: 'single',\n\t\tborderColor: 'gray'\n\t});\n\tconsole.log(contentBox);\n\n\t// Success footer\n\tconsole.log(chalk.green('✅ Research completed'));\n}\n\n/**\n * Handle follow-up questions and save functionality in interactive mode\n * @param {Object} originalOptions - Original research options\n * @param {Object} context - Execution context\n * @param {string} outputFormat - Output format\n * @param {string} projectRoot - Project root directory\n * @param {Object} logFn - Logger function\n * @param {string} initialQuery - Initial query for context\n * @param {string} initialResult - Initial AI result for context\n */\nasync function handleFollowUpQuestions(\n\toriginalOptions,\n\tcontext,\n\toutputFormat,\n\tprojectRoot,\n\tlogFn,\n\tinitialQuery,\n\tinitialResult\n) {\n\tlet interactiveSaveOccurred = false;\n\n\ttry {\n\t\t// Import required modules for saving\n\t\tconst { readJSON } = await import('../utils.js');\n\t\tconst updateTaskById = (await import('./update-task-by-id.js')).default;\n\t\tconst { updateSubtaskById } = await import('./update-subtask-by-id.js');\n\n\t\t// Initialize conversation history with the initial Q&A\n\t\tconst conversationHistory = [\n\t\t\t{\n\t\t\t\tquestion: initialQuery,\n\t\t\t\tanswer: initialResult,\n\t\t\t\ttype: 'initial',\n\t\t\t\ttimestamp: new Date().toISOString()\n\t\t\t}\n\t\t];\n\n\t\twhile (true) {\n\t\t\t// Get user choice\n\t\t\tconst { action } = await inquirer.prompt([\n\t\t\t\t{\n\t\t\t\t\ttype: 'list',\n\t\t\t\t\tname: 'action',\n\t\t\t\t\tmessage: 'What would you like to do next?',\n\t\t\t\t\tchoices: [\n\t\t\t\t\t\t{ name: 'Ask a follow-up question', value: 'followup' },\n\t\t\t\t\t\t{ name: 'Save to file', value: 'savefile' },\n\t\t\t\t\t\t{ name: 'Save to task/subtask', value: 'save' },\n\t\t\t\t\t\t{ name: 'Quit', value: 'quit' }\n\t\t\t\t\t],\n\t\t\t\t\tpageSize: 4\n\t\t\t\t}\n\t\t\t]);\n\n\t\t\tif (action === 'quit') {\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tif (action === 'savefile') {\n\t\t\t\t// Handle save to file functionality\n\t\t\t\tawait handleSaveToFile(\n\t\t\t\t\tconversationHistory,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcontext,\n\t\t\t\t\tlogFn\n\t\t\t\t);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (action === 'save') {\n\t\t\t\t// Handle save functionality\n\t\t\t\tconst saveResult = await handleSaveToTask(\n\t\t\t\t\tconversationHistory,\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tcontext,\n\t\t\t\t\tlogFn\n\t\t\t\t);\n\t\t\t\tif (saveResult) {\n\t\t\t\t\tinteractiveSaveOccurred = true;\n\t\t\t\t}\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (action === 'followup') {\n\t\t\t\t// Get the follow-up question\n\t\t\t\tconst { followUpQuery } = await inquirer.prompt([\n\t\t\t\t\t{\n\t\t\t\t\t\ttype: 'input',\n\t\t\t\t\t\tname: 'followUpQuery',\n\t\t\t\t\t\tmessage: 'Enter your follow-up question:',\n\t\t\t\t\t\tvalidate: (input) => {\n\t\t\t\t\t\t\tif (!input || input.trim().length === 0) {\n\t\t\t\t\t\t\t\treturn 'Please enter a valid question.';\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]);\n\n\t\t\t\tif (!followUpQuery || followUpQuery.trim().length === 0) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconsole.log('\\n' + chalk.gray('─'.repeat(60)) + '\\n');\n\n\t\t\t\t// Build cumulative conversation context from all previous exchanges\n\t\t\t\tconst conversationContext =\n\t\t\t\t\tbuildConversationContext(conversationHistory);\n\n\t\t\t\t// Create enhanced options for follow-up with full conversation context\n\t\t\t\tconst followUpOptions = {\n\t\t\t\t\t...originalOptions,\n\t\t\t\t\ttaskIds: [], // Clear task IDs to allow fresh fuzzy search\n\t\t\t\t\tcustomContext:\n\t\t\t\t\t\tconversationContext +\n\t\t\t\t\t\t(originalOptions.customContext\n\t\t\t\t\t\t\t? `\\n\\n--- Original Context ---\\n${originalOptions.customContext}`\n\t\t\t\t\t\t\t: '')\n\t\t\t\t};\n\n\t\t\t\t// Perform follow-up research\n\t\t\t\tconst followUpResult = await performResearch(\n\t\t\t\t\tfollowUpQuery.trim(),\n\t\t\t\t\tfollowUpOptions,\n\t\t\t\t\tcontext,\n\t\t\t\t\toutputFormat,\n\t\t\t\t\tfalse // allowFollowUp = false for nested calls\n\t\t\t\t);\n\n\t\t\t\t// Add this exchange to the conversation history\n\t\t\t\tconversationHistory.push({\n\t\t\t\t\tquestion: followUpQuery.trim(),\n\t\t\t\t\tanswer: followUpResult.result,\n\t\t\t\t\ttype: 'followup',\n\t\t\t\t\ttimestamp: new Date().toISOString()\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\t// If there's an error with inquirer (e.g., non-interactive terminal),\n\t\t// silently continue without follow-up functionality\n\t\tlogFn.debug(`Follow-up questions not available: ${error.message}`);\n\t}\n\n\treturn { interactiveSaveOccurred };\n}\n\n/**\n * Handle saving conversation to a task or subtask\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @param {string} projectRoot - Project root directory\n * @param {Object} context - Execution context\n * @param {Object} logFn - Logger function\n */\nasync function handleSaveToTask(\n\tconversationHistory,\n\tprojectRoot,\n\tcontext,\n\tlogFn\n) {\n\ttry {\n\t\t// Import required modules\n\t\tconst { readJSON } = await import('../utils.js');\n\t\tconst updateTaskById = (await import('./update-task-by-id.js')).default;\n\t\tconst { updateSubtaskById } = await import('./update-subtask-by-id.js');\n\n\t\t// Get task ID from user\n\t\tconst { taskId } = await inquirer.prompt([\n\t\t\t{\n\t\t\t\ttype: 'input',\n\t\t\t\tname: 'taskId',\n\t\t\t\tmessage: 'Enter task ID (e.g., \"15\" for task or \"15.2\" for subtask):',\n\t\t\t\tvalidate: (input) => {\n\t\t\t\t\tif (!input || input.trim().length === 0) {\n\t\t\t\t\t\treturn 'Please enter a task ID.';\n\t\t\t\t\t}\n\n\t\t\t\t\tconst trimmedInput = input.trim();\n\t\t\t\t\t// Validate format: number or number.number\n\t\t\t\t\tif (!/^\\d+(\\.\\d+)?$/.test(trimmedInput)) {\n\t\t\t\t\t\treturn 'Invalid format. Use \"15\" for task or \"15.2\" for subtask.';\n\t\t\t\t\t}\n\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t}\n\t\t]);\n\n\t\tconst trimmedTaskId = taskId.trim();\n\n\t\t// Format conversation thread for saving\n\t\tconst conversationThread = formatConversationForSaving(conversationHistory);\n\n\t\t// Determine if it's a task or subtask\n\t\tconst isSubtask = trimmedTaskId.includes('.');\n\n\t\t// Try to save - first validate the ID exists\n\t\tconst tasksPath = path.join(\n\t\t\tprojectRoot,\n\t\t\t'.taskmaster',\n\t\t\t'tasks',\n\t\t\t'tasks.json'\n\t\t);\n\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\tconsole.log(\n\t\t\t\tchalk.red('❌ Tasks file not found. Please run task-master init first.')\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\tconst data = readJSON(tasksPath, projectRoot, context.tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tconsole.log(chalk.red('❌ No valid tasks found.'));\n\t\t\treturn;\n\t\t}\n\n\t\tif (isSubtask) {\n\t\t\t// Validate subtask exists\n\t\t\tconst [parentId, subtaskId] = trimmedTaskId\n\t\t\t\t.split('.')\n\t\t\t\t.map((id) => parseInt(id, 10));\n\t\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\n\t\t\tif (!parentTask) {\n\t\t\t\tconsole.log(chalk.red(`❌ Parent task ${parentId} not found.`));\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tif (\n\t\t\t\t!parentTask.subtasks ||\n\t\t\t\t!parentTask.subtasks.find((st) => st.id === subtaskId)\n\t\t\t) {\n\t\t\t\tconsole.log(chalk.red(`❌ Subtask ${trimmedTaskId} not found.`));\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Save to subtask using updateSubtaskById\n\t\t\tconsole.log(chalk.blue('💾 Saving research conversation to subtask...'));\n\n\t\t\tawait updateSubtaskById(\n\t\t\t\ttasksPath,\n\t\t\t\ttrimmedTaskId,\n\t\t\t\tconversationThread,\n\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\tcontext,\n\t\t\t\t'text'\n\t\t\t);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.green(\n\t\t\t\t\t`✅ Research conversation saved to subtask ${trimmedTaskId}`\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\t// Validate task exists\n\t\t\tconst taskIdNum = parseInt(trimmedTaskId, 10);\n\t\t\tconst task = data.tasks.find((t) => t.id === taskIdNum);\n\n\t\t\tif (!task) {\n\t\t\t\tconsole.log(chalk.red(`❌ Task ${trimmedTaskId} not found.`));\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Save to task using updateTaskById with append mode\n\t\t\tconsole.log(chalk.blue('💾 Saving research conversation to task...'));\n\n\t\t\tawait updateTaskById(\n\t\t\t\ttasksPath,\n\t\t\t\ttaskIdNum,\n\t\t\t\tconversationThread,\n\t\t\t\tfalse, // useResearch = false for simple append\n\t\t\t\tcontext,\n\t\t\t\t'text',\n\t\t\t\ttrue // appendMode = true\n\t\t\t);\n\n\t\t\tconsole.log(\n\t\t\t\tchalk.green(`✅ Research conversation saved to task ${trimmedTaskId}`)\n\t\t\t);\n\t\t}\n\n\t\treturn true; // Indicate successful save\n\t} catch (error) {\n\t\tconsole.log(chalk.red(`❌ Error saving conversation: ${error.message}`));\n\t\tlogFn.error(`Error saving conversation: ${error.message}`);\n\t\treturn false; // Indicate failed save\n\t}\n}\n\n/**\n * Handle saving conversation to a file in .taskmaster/docs/research/\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @param {string} projectRoot - Project root directory\n * @param {Object} context - Execution context\n * @param {Object} logFn - Logger function\n * @returns {Promise<string>} Path to saved file\n */\nasync function handleSaveToFile(\n\tconversationHistory,\n\tprojectRoot,\n\tcontext,\n\tlogFn\n) {\n\ttry {\n\t\t// Create research directory if it doesn't exist\n\t\tconst researchDir = path.join(\n\t\t\tprojectRoot,\n\t\t\t'.taskmaster',\n\t\t\t'docs',\n\t\t\t'research'\n\t\t);\n\t\tif (!fs.existsSync(researchDir)) {\n\t\t\tfs.mkdirSync(researchDir, { recursive: true });\n\t\t}\n\n\t\t// Generate filename from first query and timestamp\n\t\tconst firstQuery = conversationHistory[0]?.question || 'research-query';\n\t\tconst timestamp = new Date().toISOString().split('T')[0]; // YYYY-MM-DD format\n\n\t\t// Create a slug from the query (remove special chars, limit length)\n\t\tconst querySlug = firstQuery\n\t\t\t.toLowerCase()\n\t\t\t.replace(/[^a-z0-9\\s-]/g, '') // Remove special characters\n\t\t\t.replace(/\\s+/g, '-') // Replace spaces with hyphens\n\t\t\t.replace(/-+/g, '-') // Replace multiple hyphens with single\n\t\t\t.substring(0, 50) // Limit length\n\t\t\t.replace(/^-+|-+$/g, ''); // Remove leading/trailing hyphens\n\n\t\tconst filename = `${timestamp}_${querySlug}.md`;\n\t\tconst filePath = path.join(researchDir, filename);\n\n\t\t// Format conversation for file\n\t\tconst fileContent = formatConversationForFile(\n\t\t\tconversationHistory,\n\t\t\tfirstQuery\n\t\t);\n\n\t\t// Write file\n\t\tfs.writeFileSync(filePath, fileContent, 'utf8');\n\n\t\tconst relativePath = path.relative(projectRoot, filePath);\n\t\tconsole.log(\n\t\t\tchalk.green(`✅ Research saved to: ${chalk.cyan(relativePath)}`)\n\t\t);\n\n\t\tlogFn.success(`Research conversation saved to ${relativePath}`);\n\n\t\treturn filePath;\n\t} catch (error) {\n\t\tconsole.log(chalk.red(`❌ Error saving research file: ${error.message}`));\n\t\tlogFn.error(`Error saving research file: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Format conversation history for saving to a file\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @param {string} initialQuery - The initial query for metadata\n * @returns {string} Formatted file content\n */\nfunction formatConversationForFile(conversationHistory, initialQuery) {\n\tconst timestamp = new Date().toISOString();\n\tconst date = new Date().toLocaleDateString();\n\tconst time = new Date().toLocaleTimeString();\n\n\t// Create metadata header\n\tlet content = `---\ntitle: Research Session\nquery: \"${initialQuery}\"\ndate: ${date}\ntime: ${time}\ntimestamp: ${timestamp}\nexchanges: ${conversationHistory.length}\n---\n\n# Research Session\n\n`;\n\n\t// Add each conversation exchange\n\tconversationHistory.forEach((exchange, index) => {\n\t\tif (exchange.type === 'initial') {\n\t\t\tcontent += `## Initial Query\\n\\n**Question:** ${exchange.question}\\n\\n**Response:**\\n\\n${exchange.answer}\\n\\n`;\n\t\t} else {\n\t\t\tcontent += `## Follow-up ${index}\\n\\n**Question:** ${exchange.question}\\n\\n**Response:**\\n\\n${exchange.answer}\\n\\n`;\n\t\t}\n\n\t\tif (index < conversationHistory.length - 1) {\n\t\t\tcontent += '---\\n\\n';\n\t\t}\n\t});\n\n\t// Add footer\n\tcontent += `\\n---\\n\\n*Generated by Task Master Research Command* \\n*Timestamp: ${timestamp}*\\n`;\n\n\treturn content;\n}\n\n/**\n * Format conversation history for saving to a task/subtask\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @returns {string} Formatted conversation thread\n */\nfunction formatConversationForSaving(conversationHistory) {\n\tconst timestamp = new Date().toISOString();\n\tlet formatted = `## Research Session - ${new Date().toLocaleDateString()} ${new Date().toLocaleTimeString()}\\n\\n`;\n\n\tconversationHistory.forEach((exchange, index) => {\n\t\tif (exchange.type === 'initial') {\n\t\t\tformatted += `**Initial Query:** ${exchange.question}\\n\\n`;\n\t\t\tformatted += `**Response:** ${exchange.answer}\\n\\n`;\n\t\t} else {\n\t\t\tformatted += `**Follow-up ${index}:** ${exchange.question}\\n\\n`;\n\t\t\tformatted += `**Response:** ${exchange.answer}\\n\\n`;\n\t\t}\n\n\t\tif (index < conversationHistory.length - 1) {\n\t\t\tformatted += '---\\n\\n';\n\t\t}\n\t});\n\n\treturn formatted;\n}\n\n/**\n * Build conversation context string from conversation history\n * @param {Array} conversationHistory - Array of conversation exchanges\n * @returns {string} Formatted conversation context\n */\nfunction buildConversationContext(conversationHistory) {\n\tif (conversationHistory.length === 0) {\n\t\treturn '';\n\t}\n\n\tconst contextParts = ['--- Conversation History ---'];\n\n\tconversationHistory.forEach((exchange, index) => {\n\t\tconst questionLabel =\n\t\t\texchange.type === 'initial' ? 'Initial Question' : `Follow-up ${index}`;\n\t\tconst answerLabel =\n\t\t\texchange.type === 'initial' ? 'Initial Answer' : `Answer ${index}`;\n\n\t\tcontextParts.push(`\\n${questionLabel}: ${exchange.question}`);\n\t\tcontextParts.push(`${answerLabel}: ${exchange.answer}`);\n\t});\n\n\treturn contextParts.join('\\n');\n}\n\nexport { performResearch };\n"], ["/claude-task-master/scripts/modules/task-manager/add-task.js", "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\nimport { z } from 'zod';\nimport Fuse from 'fuse.js'; // Import Fuse.js for advanced fuzzy search\n\nimport {\n\tdisplayBanner,\n\tgetStatusWithColor,\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tsucceedLoadingIndicator,\n\tfailLoadingIndicator,\n\tdisplayAiUsageSummary,\n\tdisplayContextAnalysis\n} from '../ui.js';\nimport {\n\treadJSON,\n\twriteJSON,\n\tlog as consoleLog,\n\ttruncate,\n\tensureTagMetadata,\n\tperformCompleteTagMigration,\n\tmarkMigrationForNotice\n} from '../utils.js';\nimport { generateObjectService } from '../ai-services-unified.js';\nimport { getDefaultPriority } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport ContextGatherer from '../utils/contextGatherer.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport {\n\tTASK_PRIORITY_OPTIONS,\n\tDEFAULT_TASK_PRIORITY,\n\tisValidTaskPriority,\n\tnormalizeTaskPriority\n} from '../../../src/constants/task-priority.js';\n\n// Define Zod schema for the expected AI output object\nconst AiTaskDataSchema = z.object({\n\ttitle: z.string().describe('Clear, concise title for the task'),\n\tdescription: z\n\t\t.string()\n\t\t.describe('A one or two sentence description of the task'),\n\tdetails: z\n\t\t.string()\n\t\t.describe('In-depth implementation details, considerations, and guidance'),\n\ttestStrategy: z\n\t\t.string()\n\t\t.describe('Detailed approach for verifying task completion'),\n\tdependencies: z\n\t\t.array(z.number())\n\t\t.nullable()\n\t\t.describe(\n\t\t\t'Array of task IDs that this task depends on (must be completed before this task can start)'\n\t\t)\n});\n\n/**\n * Get all tasks from all tags\n * @param {Object} rawData - The raw tagged data object\n * @returns {Array} A flat array of all task objects\n */\nfunction getAllTasks(rawData) {\n\tlet allTasks = [];\n\tfor (const tagName in rawData) {\n\t\tif (\n\t\t\tObject.prototype.hasOwnProperty.call(rawData, tagName) &&\n\t\t\trawData[tagName] &&\n\t\t\tArray.isArray(rawData[tagName].tasks)\n\t\t) {\n\t\t\tallTasks = allTasks.concat(rawData[tagName].tasks);\n\t\t}\n\t}\n\treturn allTasks;\n}\n\n/**\n * Add a new task using AI\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} prompt - Description of the task to add (required for AI-driven creation)\n * @param {Array} dependencies - Task dependencies\n * @param {string} priority - Task priority\n * @param {function} reportProgress - Function to report progress to MCP server (optional)\n * @param {Object} mcpLog - MCP logger object (optional)\n * @param {Object} session - Session object from MCP server (optional)\n * @param {string} outputFormat - Output format (text or json)\n * @param {Object} customEnv - Custom environment variables (optional) - Note: AI params override deprecated\n * @param {Object} manualTaskData - Manual task data (optional, for direct task creation without AI)\n * @param {boolean} useResearch - Whether to use the research model (passed to unified service)\n * @param {Object} context - Context object containing session and potentially projectRoot\n * @param {string} [context.projectRoot] - Project root path (for MCP/env fallback)\n * @param {string} [context.commandName] - The name of the command being executed (for telemetry)\n * @param {string} [context.outputType] - The output type ('cli' or 'mcp', for telemetry)\n * @param {string} [context.tag] - Tag for the task (optional)\n * @returns {Promise<object>} An object containing newTaskId and telemetryData\n */\nasync function addTask(\n\ttasksPath,\n\tprompt,\n\tdependencies = [],\n\tpriority = null,\n\tcontext = {},\n\toutputFormat = 'text', // Default to text for CLI\n\tmanualTaskData = null,\n\tuseResearch = false\n) {\n\tconst { session, mcpLog, projectRoot, commandName, outputType, tag } =\n\t\tcontext;\n\tconst isMCP = !!mcpLog;\n\n\t// Create a consistent logFn object regardless of context\n\tconst logFn = isMCP\n\t\t? mcpLog // Use MCP logger if provided\n\t\t: {\n\t\t\t\t// Create a wrapper around consoleLog for CLI\n\t\t\t\tinfo: (...args) => consoleLog('info', ...args),\n\t\t\t\twarn: (...args) => consoleLog('warn', ...args),\n\t\t\t\terror: (...args) => consoleLog('error', ...args),\n\t\t\t\tdebug: (...args) => consoleLog('debug', ...args),\n\t\t\t\tsuccess: (...args) => consoleLog('success', ...args)\n\t\t\t};\n\n\t// Validate priority - only accept high, medium, or low\n\tlet effectivePriority =\n\t\tpriority || getDefaultPriority(projectRoot) || DEFAULT_TASK_PRIORITY;\n\n\t// If priority is provided, validate and normalize it\n\tif (priority) {\n\t\tconst normalizedPriority = normalizeTaskPriority(priority);\n\t\tif (normalizedPriority) {\n\t\t\teffectivePriority = normalizedPriority;\n\t\t} else {\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsoleLog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Invalid priority \"${priority}\". Using default priority \"${DEFAULT_TASK_PRIORITY}\".`\n\t\t\t\t);\n\t\t\t}\n\t\t\teffectivePriority = DEFAULT_TASK_PRIORITY;\n\t\t}\n\t}\n\n\tlogFn.info(\n\t\t`Adding new task with prompt: \"${prompt}\", Priority: ${effectivePriority}, Dependencies: ${dependencies.join(', ') || 'None'}, Research: ${useResearch}, ProjectRoot: ${projectRoot}`\n\t);\n\tif (tag) {\n\t\tlogFn.info(`Using tag context: ${tag}`);\n\t}\n\n\tlet loadingIndicator = null;\n\tlet aiServiceResponse = null; // To store the full response from AI service\n\n\t// Create custom reporter that checks for MCP log\n\tconst report = (message, level = 'info') => {\n\t\tif (mcpLog) {\n\t\t\tmcpLog[level](message);\n\t\t} else if (outputFormat === 'text') {\n\t\t\tconsoleLog(level, message);\n\t\t}\n\t};\n\n\t/**\n\t * Recursively builds a dependency graph for a given task\n\t * @param {Array} tasks - All tasks from tasks.json\n\t * @param {number} taskId - ID of the task to analyze\n\t * @param {Set} visited - Set of already visited task IDs\n\t * @param {Map} depthMap - Map of task ID to its depth in the graph\n\t * @param {number} depth - Current depth in the recursion\n\t * @return {Object} Dependency graph data\n\t */\n\tfunction buildDependencyGraph(\n\t\ttasks,\n\t\ttaskId,\n\t\tvisited = new Set(),\n\t\tdepthMap = new Map(),\n\t\tdepth = 0\n\t) {\n\t\t// Skip if we've already visited this task or it doesn't exist\n\t\tif (visited.has(taskId)) {\n\t\t\treturn null;\n\t\t}\n\n\t\t// Find the task\n\t\tconst task = tasks.find((t) => t.id === taskId);\n\t\tif (!task) {\n\t\t\treturn null;\n\t\t}\n\n\t\t// Mark as visited\n\t\tvisited.add(taskId);\n\n\t\t// Update depth if this is a deeper path to this task\n\t\tif (!depthMap.has(taskId) || depth < depthMap.get(taskId)) {\n\t\t\tdepthMap.set(taskId, depth);\n\t\t}\n\n\t\t// Process dependencies\n\t\tconst dependencyData = [];\n\t\tif (task.dependencies && task.dependencies.length > 0) {\n\t\t\tfor (const depId of task.dependencies) {\n\t\t\t\tconst depData = buildDependencyGraph(\n\t\t\t\t\ttasks,\n\t\t\t\t\tdepId,\n\t\t\t\t\tvisited,\n\t\t\t\t\tdepthMap,\n\t\t\t\t\tdepth + 1\n\t\t\t\t);\n\t\t\t\tif (depData) {\n\t\t\t\t\tdependencyData.push(depData);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn {\n\t\t\tid: task.id,\n\t\t\ttitle: task.title,\n\t\t\tdescription: task.description,\n\t\t\tstatus: task.status,\n\t\t\tdependencies: dependencyData\n\t\t};\n\t}\n\n\ttry {\n\t\t// Read the existing tasks - IMPORTANT: Read the raw data without tag resolution\n\t\tlet rawData = readJSON(tasksPath, projectRoot, tag); // No tag parameter\n\n\t\t// Handle the case where readJSON returns resolved data with _rawTaggedData\n\t\tif (rawData && rawData._rawTaggedData) {\n\t\t\t// Use the raw tagged data and discard the resolved view\n\t\t\trawData = rawData._rawTaggedData;\n\t\t}\n\n\t\t// If file doesn't exist or is invalid, create a new structure in memory\n\t\tif (!rawData) {\n\t\t\treport(\n\t\t\t\t'tasks.json not found or invalid. Initializing new structure.',\n\t\t\t\t'info'\n\t\t\t);\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: [],\n\t\t\t\t\tmetadata: {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Default tasks context'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t\t// Do not write the file here; it will be written later with the new task.\n\t\t}\n\n\t\t// Handle legacy format migration using utilities\n\t\tif (rawData && Array.isArray(rawData.tasks) && !rawData._rawTaggedData) {\n\t\t\treport('Legacy format detected. Migrating to tagged format...', 'info');\n\n\t\t\t// This is legacy format - migrate it to tagged format\n\t\t\trawData = {\n\t\t\t\tmaster: {\n\t\t\t\t\ttasks: rawData.tasks,\n\t\t\t\t\tmetadata: rawData.metadata || {\n\t\t\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\t\t\tdescription: 'Tasks for master context'\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t};\n\t\t\t// Ensure proper metadata using utility\n\t\t\tensureTagMetadata(rawData.master, {\n\t\t\t\tdescription: 'Tasks for master context'\n\t\t\t});\n\t\t\t// Do not write the file here; it will be written later with the new task.\n\n\t\t\t// Perform complete migration (config.json, state.json)\n\t\t\tperformCompleteTagMigration(tasksPath);\n\t\t\tmarkMigrationForNotice(tasksPath);\n\n\t\t\treport('Successfully migrated to tagged format.', 'success');\n\t\t}\n\n\t\t// Use the provided tag, or the current active tag, or default to 'master'\n\t\tconst targetTag = tag;\n\n\t\t// Ensure the target tag exists\n\t\tif (!rawData[targetTag]) {\n\t\t\treport(\n\t\t\t\t`Tag \"${targetTag}\" does not exist. Please create it first using the 'add-tag' command.`,\n\t\t\t\t'error'\n\t\t\t);\n\t\t\tthrow new Error(`Tag \"${targetTag}\" not found.`);\n\t\t}\n\n\t\t// Ensure the target tag has a tasks array and metadata object\n\t\tif (!rawData[targetTag].tasks) {\n\t\t\trawData[targetTag].tasks = [];\n\t\t}\n\t\tif (!rawData[targetTag].metadata) {\n\t\t\trawData[targetTag].metadata = {\n\t\t\t\tcreated: new Date().toISOString(),\n\t\t\t\tupdated: new Date().toISOString(),\n\t\t\t\tdescription: ``\n\t\t\t};\n\t\t}\n\n\t\t// Get a flat list of ALL tasks across ALL tags to validate dependencies\n\t\tconst allTasks = getAllTasks(rawData);\n\n\t\t// Find the highest task ID *within the target tag* to determine the next ID\n\t\tconst tasksInTargetTag = rawData[targetTag].tasks;\n\t\tconst highestId =\n\t\t\ttasksInTargetTag.length > 0\n\t\t\t\t? Math.max(...tasksInTargetTag.map((t) => t.id))\n\t\t\t\t: 0;\n\t\tconst newTaskId = highestId + 1;\n\n\t\t// Only show UI box for CLI mode\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Creating New Task #${newTaskId}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\n\t\t// Validate dependencies before proceeding\n\t\tconst invalidDeps = dependencies.filter((depId) => {\n\t\t\t// Ensure depId is parsed as a number for comparison\n\t\t\tconst numDepId = parseInt(depId, 10);\n\t\t\treturn Number.isNaN(numDepId) || !allTasks.some((t) => t.id === numDepId);\n\t\t});\n\n\t\tif (invalidDeps.length > 0) {\n\t\t\treport(\n\t\t\t\t`The following dependencies do not exist or are invalid: ${invalidDeps.join(', ')}`,\n\t\t\t\t'warn'\n\t\t\t);\n\t\t\treport('Removing invalid dependencies...', 'info');\n\t\t\tdependencies = dependencies.filter(\n\t\t\t\t(depId) => !invalidDeps.includes(depId)\n\t\t\t);\n\t\t}\n\t\t// Ensure dependencies are numbers\n\t\tconst numericDependencies = dependencies.map((dep) => parseInt(dep, 10));\n\n\t\t// Build dependency graphs for explicitly specified dependencies\n\t\tconst dependencyGraphs = [];\n\t\tconst allRelatedTaskIds = new Set();\n\t\tconst depthMap = new Map();\n\n\t\t// First pass: build a complete dependency graph for each specified dependency\n\t\tfor (const depId of numericDependencies) {\n\t\t\tconst graph = buildDependencyGraph(allTasks, depId, new Set(), depthMap);\n\t\t\tif (graph) {\n\t\t\t\tdependencyGraphs.push(graph);\n\t\t\t}\n\t\t}\n\n\t\t// Second pass: build a set of all related task IDs for flat analysis\n\t\tfor (const [taskId, depth] of depthMap.entries()) {\n\t\t\tallRelatedTaskIds.add(taskId);\n\t\t}\n\n\t\tlet taskData;\n\n\t\t// Check if manual task data is provided\n\t\tif (manualTaskData) {\n\t\t\treport('Using manually provided task data', 'info');\n\t\t\ttaskData = manualTaskData;\n\t\t\treport('DEBUG: Taking MANUAL task data path.', 'debug');\n\n\t\t\t// Basic validation for manual data\n\t\t\tif (\n\t\t\t\t!taskData.title ||\n\t\t\t\ttypeof taskData.title !== 'string' ||\n\t\t\t\t!taskData.description ||\n\t\t\t\ttypeof taskData.description !== 'string'\n\t\t\t) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t'Manual task data must include at least a title and description.'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\treport('DEBUG: Taking AI task generation path.', 'debug');\n\t\t\t// --- Refactored AI Interaction ---\n\t\t\treport(`Generating task data with AI with prompt:\\n${prompt}`, 'info');\n\n\t\t\t// --- Use the new ContextGatherer ---\n\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\tconst gatherResult = await contextGatherer.gather({\n\t\t\t\tsemanticQuery: prompt,\n\t\t\t\tdependencyTasks: numericDependencies,\n\t\t\t\tformat: 'research'\n\t\t\t});\n\n\t\t\tconst gatheredContext = gatherResult.context;\n\t\t\tconst analysisData = gatherResult.analysisData;\n\n\t\t\t// Display context analysis if not in silent mode\n\t\t\tif (outputFormat === 'text' && analysisData) {\n\t\t\t\tdisplayContextAnalysis(analysisData, prompt, gatheredContext.length);\n\t\t\t}\n\n\t\t\t// Add any manually provided details to the prompt for context\n\t\t\tlet contextFromArgs = '';\n\t\t\tif (manualTaskData?.title)\n\t\t\t\tcontextFromArgs += `\\n- Suggested Title: \"${manualTaskData.title}\"`;\n\t\t\tif (manualTaskData?.description)\n\t\t\t\tcontextFromArgs += `\\n- Suggested Description: \"${manualTaskData.description}\"`;\n\t\t\tif (manualTaskData?.details)\n\t\t\t\tcontextFromArgs += `\\n- Additional Details Context: \"${manualTaskData.details}\"`;\n\t\t\tif (manualTaskData?.testStrategy)\n\t\t\t\tcontextFromArgs += `\\n- Additional Test Strategy Context: \"${manualTaskData.testStrategy}\"`;\n\n\t\t\t// Load prompts using PromptManager\n\t\t\tconst promptManager = getPromptManager();\n\t\t\tconst { systemPrompt, userPrompt } = await promptManager.loadPrompt(\n\t\t\t\t'add-task',\n\t\t\t\t{\n\t\t\t\t\tprompt,\n\t\t\t\t\tnewTaskId,\n\t\t\t\t\texistingTasks: allTasks,\n\t\t\t\t\tgatheredContext,\n\t\t\t\t\tcontextFromArgs,\n\t\t\t\t\tuseResearch,\n\t\t\t\t\tpriority: effectivePriority,\n\t\t\t\t\tdependencies: numericDependencies\n\t\t\t\t}\n\t\t\t);\n\n\t\t\t// Start the loading indicator - only for text mode\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\t\t`Generating new task with ${useResearch ? 'Research' : 'Main'} AI... \\n`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\tconst serviceRole = useResearch ? 'research' : 'main';\n\t\t\t\treport('DEBUG: Calling generateObjectService...', 'debug');\n\n\t\t\t\taiServiceResponse = await generateObjectService({\n\t\t\t\t\t// Capture the full response\n\t\t\t\t\trole: serviceRole,\n\t\t\t\t\tsession: session,\n\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\tschema: AiTaskDataSchema,\n\t\t\t\t\tobjectName: 'newTaskData',\n\t\t\t\t\tsystemPrompt: systemPrompt,\n\t\t\t\t\tprompt: userPrompt,\n\t\t\t\t\tcommandName: commandName || 'add-task', // Use passed commandName or default\n\t\t\t\t\toutputType: outputType || (isMCP ? 'mcp' : 'cli') // Use passed outputType or derive\n\t\t\t\t});\n\t\t\t\treport('DEBUG: generateObjectService returned successfully.', 'debug');\n\n\t\t\t\tif (!aiServiceResponse || !aiServiceResponse.mainResult) {\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t'AI service did not return the expected object structure.'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Prefer mainResult if it looks like a valid task object, otherwise try mainResult.object\n\t\t\t\tif (\n\t\t\t\t\taiServiceResponse.mainResult.title &&\n\t\t\t\t\taiServiceResponse.mainResult.description\n\t\t\t\t) {\n\t\t\t\t\ttaskData = aiServiceResponse.mainResult;\n\t\t\t\t} else if (\n\t\t\t\t\taiServiceResponse.mainResult.object &&\n\t\t\t\t\taiServiceResponse.mainResult.object.title &&\n\t\t\t\t\taiServiceResponse.mainResult.object.description\n\t\t\t\t) {\n\t\t\t\t\ttaskData = aiServiceResponse.mainResult.object;\n\t\t\t\t} else {\n\t\t\t\t\tthrow new Error('AI service did not return a valid task object.');\n\t\t\t\t}\n\n\t\t\t\treport('Successfully generated task data from AI.', 'success');\n\n\t\t\t\t// Success! Show checkmark\n\t\t\t\tif (loadingIndicator) {\n\t\t\t\t\tsucceedLoadingIndicator(\n\t\t\t\t\t\tloadingIndicator,\n\t\t\t\t\t\t'Task generated successfully'\n\t\t\t\t\t);\n\t\t\t\t\tloadingIndicator = null; // Clear it\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\t// Failure! Show X\n\t\t\t\tif (loadingIndicator) {\n\t\t\t\t\tfailLoadingIndicator(loadingIndicator, 'AI generation failed');\n\t\t\t\t\tloadingIndicator = null;\n\t\t\t\t}\n\t\t\t\treport(\n\t\t\t\t\t`DEBUG: generateObjectService caught error: ${error.message}`,\n\t\t\t\t\t'debug'\n\t\t\t\t);\n\t\t\t\treport(`Error generating task with AI: ${error.message}`, 'error');\n\t\t\t\tthrow error; // Re-throw error after logging\n\t\t\t} finally {\n\t\t\t\treport('DEBUG: generateObjectService finally block reached.', 'debug');\n\t\t\t\t// Clean up if somehow still running\n\t\t\t\tif (loadingIndicator) {\n\t\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\t}\n\t\t\t}\n\t\t\t// --- End Refactored AI Interaction ---\n\t\t}\n\n\t\t// Create the new task object\n\t\tconst newTask = {\n\t\t\tid: newTaskId,\n\t\t\ttitle: taskData.title,\n\t\t\tdescription: taskData.description,\n\t\t\tdetails: taskData.details || '',\n\t\t\ttestStrategy: taskData.testStrategy || '',\n\t\t\tstatus: 'pending',\n\t\t\tdependencies: taskData.dependencies?.length\n\t\t\t\t? taskData.dependencies\n\t\t\t\t: numericDependencies, // Use AI-suggested dependencies if available, fallback to manually specified\n\t\t\tpriority: effectivePriority,\n\t\t\tsubtasks: [] // Initialize with empty subtasks array\n\t\t};\n\n\t\t// Additional check: validate all dependencies in the AI response\n\t\tif (taskData.dependencies?.length) {\n\t\t\tconst allValidDeps = taskData.dependencies.every((depId) => {\n\t\t\t\tconst numDepId = parseInt(depId, 10);\n\t\t\t\treturn (\n\t\t\t\t\t!Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId)\n\t\t\t\t);\n\t\t\t});\n\n\t\t\tif (!allValidDeps) {\n\t\t\t\treport(\n\t\t\t\t\t'AI suggested invalid dependencies. Filtering them out...',\n\t\t\t\t\t'warn'\n\t\t\t\t);\n\t\t\t\tnewTask.dependencies = taskData.dependencies.filter((depId) => {\n\t\t\t\t\tconst numDepId = parseInt(depId, 10);\n\t\t\t\t\treturn (\n\t\t\t\t\t\t!Number.isNaN(numDepId) && allTasks.some((t) => t.id === numDepId)\n\t\t\t\t\t);\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\t// Add the task to the tasks array OF THE CORRECT TAG\n\t\trawData[targetTag].tasks.push(newTask);\n\t\t// Update the tag's metadata\n\t\tensureTagMetadata(rawData[targetTag], {\n\t\t\tdescription: `Tasks for ${targetTag} context`\n\t\t});\n\n\t\treport('DEBUG: Writing tasks.json...', 'debug');\n\t\t// Write the updated raw data back to the file\n\t\t// The writeJSON function will automatically filter out _rawTaggedData\n\t\twriteJSON(tasksPath, rawData, projectRoot, targetTag);\n\t\treport('DEBUG: tasks.json written.', 'debug');\n\n\t\t// Show success message - only for text output (CLI)\n\t\tif (outputFormat === 'text') {\n\t\t\tconst table = new Table({\n\t\t\t\thead: [\n\t\t\t\t\tchalk.cyan.bold('ID'),\n\t\t\t\t\tchalk.cyan.bold('Title'),\n\t\t\t\t\tchalk.cyan.bold('Description')\n\t\t\t\t],\n\t\t\t\tcolWidths: [5, 30, 50] // Adjust widths as needed\n\t\t\t});\n\n\t\t\ttable.push([\n\t\t\t\tnewTask.id,\n\t\t\t\ttruncate(newTask.title, 27),\n\t\t\t\ttruncate(newTask.description, 47)\n\t\t\t]);\n\n\t\t\tconsole.log(chalk.green('✓ New task created successfully:'));\n\t\t\tconsole.log(table.toString());\n\n\t\t\t// Helper to get priority color\n\t\t\tconst getPriorityColor = (p) => {\n\t\t\t\tswitch (p?.toLowerCase()) {\n\t\t\t\t\tcase 'high':\n\t\t\t\t\t\treturn 'red';\n\t\t\t\t\tcase 'low':\n\t\t\t\t\t\treturn 'gray';\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn 'yellow';\n\t\t\t\t}\n\t\t\t};\n\n\t\t\t// Check if AI added new dependencies that weren't explicitly provided\n\t\t\tconst aiAddedDeps = newTask.dependencies.filter(\n\t\t\t\t(dep) => !numericDependencies.includes(dep)\n\t\t\t);\n\n\t\t\t// Check if AI removed any dependencies that were explicitly provided\n\t\t\tconst aiRemovedDeps = numericDependencies.filter(\n\t\t\t\t(dep) => !newTask.dependencies.includes(dep)\n\t\t\t);\n\n\t\t\t// Get task titles for dependencies to display\n\t\t\tconst depTitles = {};\n\t\t\tnewTask.dependencies.forEach((dep) => {\n\t\t\t\tconst depTask = allTasks.find((t) => t.id === dep);\n\t\t\t\tif (depTask) {\n\t\t\t\t\tdepTitles[dep] = truncate(depTask.title, 30);\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t// Prepare dependency display string\n\t\t\tlet dependencyDisplay = '';\n\t\t\tif (newTask.dependencies.length > 0) {\n\t\t\t\tdependencyDisplay = chalk.white('Dependencies:') + '\\n';\n\t\t\t\tnewTask.dependencies.forEach((dep) => {\n\t\t\t\t\tconst isAiAdded = aiAddedDeps.includes(dep);\n\t\t\t\t\tconst depType = isAiAdded ? chalk.yellow(' (AI suggested)') : '';\n\t\t\t\t\tdependencyDisplay +=\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t` - ${dep}: ${depTitles[dep] || 'Unknown task'}${depType}`\n\t\t\t\t\t\t) + '\\n';\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tdependencyDisplay = chalk.white('Dependencies: None') + '\\n';\n\t\t\t}\n\n\t\t\t// Add info about removed dependencies if any\n\t\t\tif (aiRemovedDeps.length > 0) {\n\t\t\t\tdependencyDisplay +=\n\t\t\t\t\tchalk.gray('\\nUser-specified dependencies that were not used:') +\n\t\t\t\t\t'\\n';\n\t\t\t\taiRemovedDeps.forEach((dep) => {\n\t\t\t\t\tconst depTask = allTasks.find((t) => t.id === dep);\n\t\t\t\t\tconst title = depTask ? truncate(depTask.title, 30) : 'Unknown task';\n\t\t\t\t\tdependencyDisplay += chalk.gray(` - ${dep}: ${title}`) + '\\n';\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Add dependency analysis summary\n\t\t\tlet dependencyAnalysis = '';\n\t\t\tif (aiAddedDeps.length > 0 || aiRemovedDeps.length > 0) {\n\t\t\t\tdependencyAnalysis =\n\t\t\t\t\t'\\n' + chalk.white.bold('Dependency Analysis:') + '\\n';\n\t\t\t\tif (aiAddedDeps.length > 0) {\n\t\t\t\t\tdependencyAnalysis +=\n\t\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t\t`AI identified ${aiAddedDeps.length} additional dependencies`\n\t\t\t\t\t\t) + '\\n';\n\t\t\t\t}\n\t\t\t\tif (aiRemovedDeps.length > 0) {\n\t\t\t\t\tdependencyAnalysis +=\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`AI excluded ${aiRemovedDeps.length} user-provided dependencies`\n\t\t\t\t\t\t) + '\\n';\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Show success message box\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold(`Task ${newTaskId} Created Successfully`) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tchalk.white(`Title: ${newTask.title}`) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(`Status: ${getStatusWithColor(newTask.status)}`) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white(\n\t\t\t\t\t\t\t`Priority: ${chalk[getPriorityColor(newTask.priority)](newTask.priority)}`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\tdependencyDisplay +\n\t\t\t\t\t\tdependencyAnalysis +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t`1. Run ${chalk.yellow(`task-master show ${newTaskId}`)} to see complete task details`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t`2. Run ${chalk.yellow(`task-master set-status --id=${newTaskId} --status=in-progress`)} to start working on it`\n\t\t\t\t\t\t) +\n\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t\t`3. Run ${chalk.yellow(`task-master expand --id=${newTaskId}`)} to break it down into subtasks`\n\t\t\t\t\t\t),\n\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// Display AI Usage Summary if telemetryData is available\n\t\t\tif (\n\t\t\t\taiServiceResponse &&\n\t\t\t\taiServiceResponse.telemetryData &&\n\t\t\t\t(outputType === 'cli' || outputType === 'text')\n\t\t\t) {\n\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t}\n\t\t}\n\n\t\treport(\n\t\t\t`DEBUG: Returning new task ID: ${newTaskId} and telemetry.`,\n\t\t\t'debug'\n\t\t);\n\t\treturn {\n\t\t\tnewTaskId: newTaskId,\n\t\t\ttelemetryData: aiServiceResponse ? aiServiceResponse.telemetryData : null,\n\t\t\ttagInfo: aiServiceResponse ? aiServiceResponse.tagInfo : null\n\t\t};\n\t} catch (error) {\n\t\t// Stop any loading indicator on error\n\t\tif (loadingIndicator) {\n\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t}\n\n\t\treport(`Error adding task: ${error.message}`, 'error');\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\t\t}\n\t\t// In MCP mode, we let the direct function handler catch and format\n\t\tthrow error;\n\t}\n}\n\nexport default addTask;\n"], ["/claude-task-master/scripts/modules/dependency-manager.js", "/**\n * dependency-manager.js\n * Manages task dependencies and relationships\n */\n\nimport path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\ttaskExists,\n\tformatTaskId,\n\tfindCycles,\n\tisSilentMode\n} from './utils.js';\n\nimport { displayBanner } from './ui.js';\n\nimport { generateTaskFiles } from './task-manager.js';\n\n/**\n * Add a dependency to a task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number|string} taskId - ID of the task to add dependency to\n * @param {number|string} dependencyId - ID of the task to add as dependency\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nasync function addDependency(tasksPath, taskId, dependencyId, context = {}) {\n\tlog('info', `Adding dependency ${dependencyId} to task ${taskId}...`);\n\n\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found in tasks.json');\n\t\tprocess.exit(1);\n\t}\n\n\t// Format the task and dependency IDs correctly\n\tconst formattedTaskId =\n\t\ttypeof taskId === 'string' && taskId.includes('.')\n\t\t\t? taskId\n\t\t\t: parseInt(taskId, 10);\n\n\tconst formattedDependencyId = formatTaskId(dependencyId);\n\n\t// Check if the dependency task or subtask actually exists\n\tif (!taskExists(data.tasks, formattedDependencyId)) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`Dependency target ${formattedDependencyId} does not exist in tasks.json`\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\t// Find the task to update\n\tlet targetTask = null;\n\tlet isSubtask = false;\n\n\tif (typeof formattedTaskId === 'string' && formattedTaskId.includes('.')) {\n\t\t// Handle dot notation for subtasks (e.g., \"1.2\")\n\t\tconst [parentId, subtaskId] = formattedTaskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask) {\n\t\t\tlog('error', `Parent task ${parentId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\tif (!parentTask.subtasks) {\n\t\t\tlog('error', `Parent task ${parentId} has no subtasks.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\ttargetTask = parentTask.subtasks.find((s) => s.id === subtaskId);\n\t\tisSubtask = true;\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Subtask ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t} else {\n\t\t// Regular task (not a subtask)\n\t\ttargetTask = data.tasks.find((t) => t.id === formattedTaskId);\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Task ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t}\n\n\t// Initialize dependencies array if it doesn't exist\n\tif (!targetTask.dependencies) {\n\t\ttargetTask.dependencies = [];\n\t}\n\n\t// Check if dependency already exists\n\tif (\n\t\ttargetTask.dependencies.some((d) => {\n\t\t\t// Convert both to strings for comparison to handle both numeric and string IDs\n\t\t\treturn String(d) === String(formattedDependencyId);\n\t\t})\n\t) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Dependency ${formattedDependencyId} already exists in task ${formattedTaskId}.`\n\t\t);\n\t\treturn;\n\t}\n\n\t// Check if the task is trying to depend on itself - compare full IDs (including subtask parts)\n\tif (String(formattedTaskId) === String(formattedDependencyId)) {\n\t\tlog('error', `Task ${formattedTaskId} cannot depend on itself.`);\n\t\tprocess.exit(1);\n\t}\n\n\t// For subtasks of the same parent, we need to make sure we're not treating it as a self-dependency\n\t// Check if we're dealing with subtasks with the same parent task\n\tlet isSelfDependency = false;\n\n\tif (\n\t\ttypeof formattedTaskId === 'string' &&\n\t\ttypeof formattedDependencyId === 'string' &&\n\t\tformattedTaskId.includes('.') &&\n\t\tformattedDependencyId.includes('.')\n\t) {\n\t\tconst [taskParentId] = formattedTaskId.split('.');\n\t\tconst [depParentId] = formattedDependencyId.split('.');\n\n\t\t// Only treat it as a self-dependency if both the parent ID and subtask ID are identical\n\t\tisSelfDependency = formattedTaskId === formattedDependencyId;\n\n\t\t// Log for debugging\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`Adding dependency between subtasks: ${formattedTaskId} depends on ${formattedDependencyId}`\n\t\t);\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`Parent IDs: ${taskParentId} and ${depParentId}, Self-dependency check: ${isSelfDependency}`\n\t\t);\n\t}\n\n\tif (isSelfDependency) {\n\t\tlog('error', `Subtask ${formattedTaskId} cannot depend on itself.`);\n\t\tprocess.exit(1);\n\t}\n\n\t// Check for circular dependencies\n\tconst dependencyChain = [formattedTaskId];\n\tif (\n\t\t!isCircularDependency(data.tasks, formattedDependencyId, dependencyChain)\n\t) {\n\t\t// Add the dependency\n\t\ttargetTask.dependencies.push(formattedDependencyId);\n\n\t\t// Sort dependencies numerically or by parent task ID first, then subtask ID\n\t\ttargetTask.dependencies.sort((a, b) => {\n\t\t\tif (typeof a === 'number' && typeof b === 'number') {\n\t\t\t\treturn a - b;\n\t\t\t} else if (typeof a === 'string' && typeof b === 'string') {\n\t\t\t\tconst [aParent, aChild] = a.split('.').map(Number);\n\t\t\t\tconst [bParent, bChild] = b.split('.').map(Number);\n\t\t\t\treturn aParent !== bParent ? aParent - bParent : aChild - bChild;\n\t\t\t} else if (typeof a === 'number') {\n\t\t\t\treturn -1; // Numbers come before strings\n\t\t\t} else {\n\t\t\t\treturn 1; // Strings come after numbers\n\t\t\t}\n\t\t});\n\n\t\t// Save changes\n\t\twriteJSON(tasksPath, data, context.projectRoot, context.tag);\n\t\tlog(\n\t\t\t'success',\n\t\t\t`Added dependency ${formattedDependencyId} to task ${formattedTaskId}`\n\t\t);\n\n\t\t// Display a more visually appealing success message\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(`Successfully added dependency:\\n\\n`) +\n\t\t\t\t\t\t`Task ${chalk.bold(formattedTaskId)} now depends on ${chalk.bold(formattedDependencyId)}`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\n\t\t// Generate updated task files\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\n\t\tlog('info', 'Task files regenerated with updated dependencies.');\n\t} else {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`Cannot add dependency ${formattedDependencyId} to task ${formattedTaskId} as it would create a circular dependency.`\n\t\t);\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Remove a dependency from a task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number|string} taskId - ID of the task to remove dependency from\n * @param {number|string} dependencyId - ID of the task to remove as dependency\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nasync function removeDependency(tasksPath, taskId, dependencyId, context = {}) {\n\tlog('info', `Removing dependency ${dependencyId} from task ${taskId}...`);\n\n\t// Read tasks file\n\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\t// Format the task and dependency IDs correctly\n\tconst formattedTaskId =\n\t\ttypeof taskId === 'string' && taskId.includes('.')\n\t\t\t? taskId\n\t\t\t: parseInt(taskId, 10);\n\n\tconst formattedDependencyId = formatTaskId(dependencyId);\n\n\t// Find the task to update\n\tlet targetTask = null;\n\tlet isSubtask = false;\n\n\tif (typeof formattedTaskId === 'string' && formattedTaskId.includes('.')) {\n\t\t// Handle dot notation for subtasks (e.g., \"1.2\")\n\t\tconst [parentId, subtaskId] = formattedTaskId\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\n\t\tif (!parentTask) {\n\t\t\tlog('error', `Parent task ${parentId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\tif (!parentTask.subtasks) {\n\t\t\tlog('error', `Parent task ${parentId} has no subtasks.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\ttargetTask = parentTask.subtasks.find((s) => s.id === subtaskId);\n\t\tisSubtask = true;\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Subtask ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t} else {\n\t\t// Regular task (not a subtask)\n\t\ttargetTask = data.tasks.find((t) => t.id === formattedTaskId);\n\n\t\tif (!targetTask) {\n\t\t\tlog('error', `Task ${formattedTaskId} not found.`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t}\n\n\t// Check if the task has any dependencies\n\tif (!targetTask.dependencies || targetTask.dependencies.length === 0) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Task ${formattedTaskId} has no dependencies, nothing to remove.`\n\t\t);\n\t\treturn;\n\t}\n\n\t// Normalize the dependency ID for comparison to handle different formats\n\tconst normalizedDependencyId = String(formattedDependencyId);\n\n\t// Check if the dependency exists by comparing string representations\n\tconst dependencyIndex = targetTask.dependencies.findIndex((dep) => {\n\t\t// Convert both to strings for comparison\n\t\tlet depStr = String(dep);\n\n\t\t// Special handling for numeric IDs that might be subtask references\n\t\tif (typeof dep === 'number' && dep < 100 && isSubtask) {\n\t\t\t// It's likely a reference to another subtask in the same parent task\n\t\t\t// Convert to full format for comparison (e.g., 2 -> \"1.2\" for a subtask in task 1)\n\t\t\tconst [parentId] = formattedTaskId.split('.');\n\t\t\tdepStr = `${parentId}.${dep}`;\n\t\t}\n\n\t\treturn depStr === normalizedDependencyId;\n\t});\n\n\tif (dependencyIndex === -1) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Task ${formattedTaskId} does not depend on ${formattedDependencyId}, no changes made.`\n\t\t);\n\t\treturn;\n\t}\n\n\t// Remove the dependency\n\ttargetTask.dependencies.splice(dependencyIndex, 1);\n\n\t// Save the updated tasks\n\twriteJSON(tasksPath, data, context.projectRoot, context.tag);\n\n\t// Success message\n\tlog(\n\t\t'success',\n\t\t`Removed dependency: Task ${formattedTaskId} no longer depends on ${formattedDependencyId}`\n\t);\n\n\tif (!isSilentMode()) {\n\t\t// Display a more visually appealing success message\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\tchalk.green(`Successfully removed dependency:\\n\\n`) +\n\t\t\t\t\t`Task ${chalk.bold(formattedTaskId)} no longer depends on ${chalk.bold(formattedDependencyId)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Regenerate task files\n\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n}\n\n/**\n * Check if adding a dependency would create a circular dependency\n * @param {Array} tasks - Array of all tasks\n * @param {number|string} taskId - ID of task to check\n * @param {Array} chain - Chain of dependencies to check\n * @returns {boolean} True if circular dependency would be created\n */\nfunction isCircularDependency(tasks, taskId, chain = []) {\n\t// Convert taskId to string for comparison\n\tconst taskIdStr = String(taskId);\n\n\t// If we've seen this task before in the chain, we have a circular dependency\n\tif (chain.some((id) => String(id) === taskIdStr)) {\n\t\treturn true;\n\t}\n\n\t// Find the task or subtask\n\tlet task = null;\n\tlet parentIdForSubtask = null;\n\n\t// Check if this is a subtask reference (e.g., \"1.2\")\n\tif (taskIdStr.includes('.')) {\n\t\tconst [parentId, subtaskId] = taskIdStr.split('.').map(Number);\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\t\tparentIdForSubtask = parentId; // Store parent ID if it's a subtask\n\n\t\tif (parentTask && parentTask.subtasks) {\n\t\t\ttask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\t}\n\t} else {\n\t\t// Regular task\n\t\ttask = tasks.find((t) => String(t.id) === taskIdStr);\n\t}\n\n\tif (!task) {\n\t\treturn false; // Task doesn't exist, can't create circular dependency\n\t}\n\n\t// No dependencies, can't create circular dependency\n\tif (!task.dependencies || task.dependencies.length === 0) {\n\t\treturn false;\n\t}\n\n\t// Check each dependency recursively\n\tconst newChain = [...chain, taskIdStr]; // Use taskIdStr for consistency\n\treturn task.dependencies.some((depId) => {\n\t\tlet normalizedDepId = String(depId);\n\t\t// Normalize relative subtask dependencies\n\t\tif (typeof depId === 'number' && parentIdForSubtask !== null) {\n\t\t\t// If the current task is a subtask AND the dependency is a number,\n\t\t\t// assume it refers to a sibling subtask.\n\t\t\tnormalizedDepId = `${parentIdForSubtask}.${depId}`;\n\t\t}\n\t\t// Pass the normalized ID to the recursive call\n\t\treturn isCircularDependency(tasks, normalizedDepId, newChain);\n\t});\n}\n\n/**\n * Validate task dependencies\n * @param {Array} tasks - Array of all tasks\n * @returns {Object} Validation result with valid flag and issues array\n */\nfunction validateTaskDependencies(tasks) {\n\tconst issues = [];\n\n\t// Check each task's dependencies\n\ttasks.forEach((task) => {\n\t\tif (!task.dependencies) {\n\t\t\treturn; // No dependencies to validate\n\t\t}\n\n\t\ttask.dependencies.forEach((depId) => {\n\t\t\t// Check for self-dependencies\n\t\t\tif (String(depId) === String(task.id)) {\n\t\t\t\tissues.push({\n\t\t\t\t\ttype: 'self',\n\t\t\t\t\ttaskId: task.id,\n\t\t\t\t\tmessage: `Task ${task.id} depends on itself`\n\t\t\t\t});\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Check if dependency exists\n\t\t\tif (!taskExists(tasks, depId)) {\n\t\t\t\tissues.push({\n\t\t\t\t\ttype: 'missing',\n\t\t\t\t\ttaskId: task.id,\n\t\t\t\t\tdependencyId: depId,\n\t\t\t\t\tmessage: `Task ${task.id} depends on non-existent task ${depId}`\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Check for circular dependencies\n\t\tif (isCircularDependency(tasks, task.id)) {\n\t\t\tissues.push({\n\t\t\t\ttype: 'circular',\n\t\t\t\ttaskId: task.id,\n\t\t\t\tmessage: `Task ${task.id} is part of a circular dependency chain`\n\t\t\t});\n\t\t}\n\n\t\t// Check subtask dependencies if they exist\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (!subtask.dependencies) {\n\t\t\t\t\treturn; // No dependencies to validate\n\t\t\t\t}\n\n\t\t\t\t// Create a full subtask ID for reference\n\t\t\t\tconst fullSubtaskId = `${task.id}.${subtask.id}`;\n\n\t\t\t\tsubtask.dependencies.forEach((depId) => {\n\t\t\t\t\t// Check for self-dependencies in subtasks\n\t\t\t\t\tif (\n\t\t\t\t\t\tString(depId) === String(fullSubtaskId) ||\n\t\t\t\t\t\t(typeof depId === 'number' && depId === subtask.id)\n\t\t\t\t\t) {\n\t\t\t\t\t\tissues.push({\n\t\t\t\t\t\t\ttype: 'self',\n\t\t\t\t\t\t\ttaskId: fullSubtaskId,\n\t\t\t\t\t\t\tmessage: `Subtask ${fullSubtaskId} depends on itself`\n\t\t\t\t\t\t});\n\t\t\t\t\t\treturn;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Check if dependency exists\n\t\t\t\t\tif (!taskExists(tasks, depId)) {\n\t\t\t\t\t\tissues.push({\n\t\t\t\t\t\t\ttype: 'missing',\n\t\t\t\t\t\t\ttaskId: fullSubtaskId,\n\t\t\t\t\t\t\tdependencyId: depId,\n\t\t\t\t\t\t\tmessage: `Subtask ${fullSubtaskId} depends on non-existent task/subtask ${depId}`\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\t// Check for circular dependencies in subtasks\n\t\t\t\tif (isCircularDependency(tasks, fullSubtaskId)) {\n\t\t\t\t\tissues.push({\n\t\t\t\t\t\ttype: 'circular',\n\t\t\t\t\t\ttaskId: fullSubtaskId,\n\t\t\t\t\t\tmessage: `Subtask ${fullSubtaskId} is part of a circular dependency chain`\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\treturn {\n\t\tvalid: issues.length === 0,\n\t\tissues\n\t};\n}\n\n/**\n * Remove duplicate dependencies from tasks\n * @param {Object} tasksData - Tasks data object with tasks array\n * @returns {Object} Updated tasks data with duplicates removed\n */\nfunction removeDuplicateDependencies(tasksData) {\n\tconst tasks = tasksData.tasks.map((task) => {\n\t\tif (!task.dependencies) {\n\t\t\treturn task;\n\t\t}\n\n\t\t// Convert to Set and back to array to remove duplicates\n\t\tconst uniqueDeps = [...new Set(task.dependencies)];\n\t\treturn {\n\t\t\t...task,\n\t\t\tdependencies: uniqueDeps\n\t\t};\n\t});\n\n\treturn {\n\t\t...tasksData,\n\t\ttasks\n\t};\n}\n\n/**\n * Clean up invalid subtask dependencies\n * @param {Object} tasksData - Tasks data object with tasks array\n * @returns {Object} Updated tasks data with invalid subtask dependencies removed\n */\nfunction cleanupSubtaskDependencies(tasksData) {\n\tconst tasks = tasksData.tasks.map((task) => {\n\t\t// Handle task's own dependencies\n\t\tif (task.dependencies) {\n\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t// Keep only dependencies that exist\n\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t});\n\t\t}\n\n\t\t// Handle subtask dependencies\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks = task.subtasks.map((subtask) => {\n\t\t\t\tif (!subtask.dependencies) {\n\t\t\t\t\treturn subtask;\n\t\t\t\t}\n\n\t\t\t\t// Filter out dependencies to non-existent subtasks\n\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t\t});\n\n\t\t\t\treturn subtask;\n\t\t\t});\n\t\t}\n\n\t\treturn task;\n\t});\n\n\treturn {\n\t\t...tasksData,\n\t\ttasks\n\t};\n}\n\n/**\n * Validate dependencies in task files\n * @param {string} tasksPath - Path to tasks.json\n * @param {Object} options - Options object, including context\n */\nasync function validateDependenciesCommand(tasksPath, options = {}) {\n\tconst { context = {} } = options;\n\tlog('info', 'Checking for invalid dependencies in task files...');\n\n\t// Read tasks data\n\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found in tasks.json');\n\t\tprocess.exit(1);\n\t}\n\n\t// Count of tasks and subtasks for reporting\n\tconst taskCount = data.tasks.length;\n\tlet subtaskCount = 0;\n\tdata.tasks.forEach((task) => {\n\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\tsubtaskCount += task.subtasks.length;\n\t\t}\n\t});\n\n\tlog(\n\t\t'info',\n\t\t`Analyzing dependencies for ${taskCount} tasks and ${subtaskCount} subtasks...`\n\t);\n\n\ttry {\n\t\t// Directly call the validation function\n\t\tconst validationResult = validateTaskDependencies(data.tasks);\n\n\t\tif (!validationResult.valid) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`Dependency validation failed. Found ${validationResult.issues.length} issue(s):`\n\t\t\t);\n\t\t\tvalidationResult.issues.forEach((issue) => {\n\t\t\t\tlet errorMsg = ` [${issue.type.toUpperCase()}] Task ${issue.taskId}: ${issue.message}`;\n\t\t\t\tif (issue.dependencyId) {\n\t\t\t\t\terrorMsg += ` (Dependency: ${issue.dependencyId})`;\n\t\t\t\t}\n\t\t\t\tlog('error', errorMsg); // Log each issue as an error\n\t\t\t});\n\n\t\t\t// Optionally exit if validation fails, depending on desired behavior\n\t\t\t// process.exit(1); // Uncomment if validation failure should stop the process\n\n\t\t\t// Display summary box even on failure, showing issues found\n\t\t\tif (!isSilentMode()) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.red(`Dependency Validation FAILED\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks checked:')} ${taskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Subtasks checked:')} ${subtaskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.red('Issues found:')} ${validationResult.issues.length}`, // Display count from result\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'red',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog(\n\t\t\t\t'success',\n\t\t\t\t'No invalid dependencies found - all dependencies are valid'\n\t\t\t);\n\n\t\t\t// Show validation summary - only if not in silent mode\n\t\t\tif (!isSilentMode()) {\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.green(`All Dependencies Are Valid\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks checked:')} ${taskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Subtasks checked:')} ${subtaskCount}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Total dependencies verified:')} ${countAllDependencies(data.tasks)}`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlog('error', 'Error validating dependencies:', error);\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Helper function to count all dependencies across tasks and subtasks\n * @param {Array} tasks - All tasks\n * @returns {number} - Total number of dependencies\n */\nfunction countAllDependencies(tasks) {\n\tlet count = 0;\n\n\ttasks.forEach((task) => {\n\t\t// Count main task dependencies\n\t\tif (task.dependencies && Array.isArray(task.dependencies)) {\n\t\t\tcount += task.dependencies.length;\n\t\t}\n\n\t\t// Count subtask dependencies\n\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\tcount += subtask.dependencies.length;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\treturn count;\n}\n\n/**\n * Fixes invalid dependencies in tasks.json\n * @param {string} tasksPath - Path to tasks.json\n * @param {Object} options - Options object, including context\n */\nasync function fixDependenciesCommand(tasksPath, options = {}) {\n\tconst { context = {} } = options;\n\tlog('info', 'Checking for and fixing invalid dependencies in tasks.json...');\n\n\ttry {\n\t\t// Read tasks data\n\t\tconst data = readJSON(tasksPath, context.projectRoot, context.tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tlog('error', 'No valid tasks found in tasks.json');\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\t// Create a deep copy of the original data for comparison\n\t\tconst originalData = JSON.parse(JSON.stringify(data));\n\n\t\t// Track fixes for reporting\n\t\tconst stats = {\n\t\t\tnonExistentDependenciesRemoved: 0,\n\t\t\tselfDependenciesRemoved: 0,\n\t\t\tduplicateDependenciesRemoved: 0,\n\t\t\tcircularDependenciesFixed: 0,\n\t\t\ttasksFixed: 0,\n\t\t\tsubtasksFixed: 0\n\t\t};\n\n\t\t// First phase: Remove duplicate dependencies in tasks\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.dependencies && Array.isArray(task.dependencies)) {\n\t\t\t\tconst uniqueDeps = new Set();\n\t\t\t\tconst originalLength = task.dependencies.length;\n\t\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t\tconst depIdStr = String(depId);\n\t\t\t\t\tif (uniqueDeps.has(depIdStr)) {\n\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t`Removing duplicate dependency from task ${task.id}: ${depId}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tstats.duplicateDependenciesRemoved++;\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t\tuniqueDeps.add(depIdStr);\n\t\t\t\t\treturn true;\n\t\t\t\t});\n\t\t\t\tif (task.dependencies.length < originalLength) {\n\t\t\t\t\tstats.tasksFixed++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check for duplicates in subtasks\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\t\tconst uniqueDeps = new Set();\n\t\t\t\t\t\tconst originalLength = subtask.dependencies.length;\n\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\tlet depIdStr = String(depId);\n\t\t\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\tdepIdStr = `${task.id}.${depId}`;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (uniqueDeps.has(depIdStr)) {\n\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t`Removing duplicate dependency from subtask ${task.id}.${subtask.id}: ${depId}`\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstats.duplicateDependenciesRemoved++;\n\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tuniqueDeps.add(depIdStr);\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t});\n\t\t\t\t\t\tif (subtask.dependencies.length < originalLength) {\n\t\t\t\t\t\t\tstats.subtasksFixed++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Create validity maps for tasks and subtasks\n\t\tconst validTaskIds = new Set(data.tasks.map((t) => t.id));\n\t\tconst validSubtaskIds = new Set();\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tvalidSubtaskIds.add(`${task.id}.${subtask.id}`);\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Second phase: Remove invalid task dependencies (non-existent tasks)\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.dependencies && Array.isArray(task.dependencies)) {\n\t\t\t\tconst originalLength = task.dependencies.length;\n\t\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t\tconst isSubtask = typeof depId === 'string' && depId.includes('.');\n\n\t\t\t\t\tif (isSubtask) {\n\t\t\t\t\t\t// Check if the subtask exists\n\t\t\t\t\t\tif (!validSubtaskIds.has(depId)) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t`Removing invalid subtask dependency from task ${task.id}: ${depId} (subtask does not exist)`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Check if the task exists\n\t\t\t\t\t\tconst numericId =\n\t\t\t\t\t\t\ttypeof depId === 'string' ? parseInt(depId, 10) : depId;\n\t\t\t\t\t\tif (!validTaskIds.has(numericId)) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t`Removing invalid task dependency from task ${task.id}: ${depId} (task does not exist)`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn true;\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\tif (task.dependencies.length < originalLength) {\n\t\t\t\t\tstats.tasksFixed++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check subtask dependencies for invalid references\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\t\tconst originalLength = subtask.dependencies.length;\n\t\t\t\t\t\tconst subtaskId = `${task.id}.${subtask.id}`;\n\n\t\t\t\t\t\t// First check for self-dependencies\n\t\t\t\t\t\tconst hasSelfDependency = subtask.dependencies.some((depId) => {\n\t\t\t\t\t\t\tif (typeof depId === 'string' && depId.includes('.')) {\n\t\t\t\t\t\t\t\treturn depId === subtaskId;\n\t\t\t\t\t\t\t} else if (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\treturn depId === subtask.id;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tif (hasSelfDependency) {\n\t\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\t\tconst normalizedDepId =\n\t\t\t\t\t\t\t\t\ttypeof depId === 'number' && depId < 100\n\t\t\t\t\t\t\t\t\t\t? `${task.id}.${depId}`\n\t\t\t\t\t\t\t\t\t\t: String(depId);\n\n\t\t\t\t\t\t\t\tif (normalizedDepId === subtaskId) {\n\t\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t\t`Removing self-dependency from subtask ${subtaskId}`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tstats.selfDependenciesRemoved++;\n\t\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Then check for non-existent dependencies\n\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\tif (typeof depId === 'string' && depId.includes('.')) {\n\t\t\t\t\t\t\t\tif (!validSubtaskIds.has(depId)) {\n\t\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t\t`Removing invalid subtask dependency from subtask ${subtaskId}: ${depId} (subtask does not exist)`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Handle numeric dependencies\n\t\t\t\t\t\t\tconst numericId =\n\t\t\t\t\t\t\t\ttypeof depId === 'number' ? depId : parseInt(depId, 10);\n\n\t\t\t\t\t\t\t// Small numbers likely refer to subtasks in the same task\n\t\t\t\t\t\t\tif (numericId < 100) {\n\t\t\t\t\t\t\t\tconst fullSubtaskId = `${task.id}.${numericId}`;\n\n\t\t\t\t\t\t\t\tif (!validSubtaskIds.has(fullSubtaskId)) {\n\t\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t\t`Removing invalid subtask dependency from subtask ${subtaskId}: ${numericId}`\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Otherwise it's a task reference\n\t\t\t\t\t\t\tif (!validTaskIds.has(numericId)) {\n\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t`Removing invalid task dependency from subtask ${subtaskId}: ${numericId}`\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstats.nonExistentDependenciesRemoved++;\n\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tif (subtask.dependencies.length < originalLength) {\n\t\t\t\t\t\t\tstats.subtasksFixed++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Third phase: Check for circular dependencies\n\t\tlog('info', 'Checking for circular dependencies...');\n\n\t\t// Build the dependency map for subtasks\n\t\tconst subtaskDependencyMap = new Map();\n\t\tdata.tasks.forEach((task) => {\n\t\t\tif (task.subtasks && Array.isArray(task.subtasks)) {\n\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\tconst subtaskId = `${task.id}.${subtask.id}`;\n\n\t\t\t\t\tif (subtask.dependencies && Array.isArray(subtask.dependencies)) {\n\t\t\t\t\t\tconst normalizedDeps = subtask.dependencies.map((depId) => {\n\t\t\t\t\t\t\tif (typeof depId === 'string' && depId.includes('.')) {\n\t\t\t\t\t\t\t\treturn depId;\n\t\t\t\t\t\t\t} else if (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\t\treturn `${task.id}.${depId}`;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn String(depId);\n\t\t\t\t\t\t});\n\t\t\t\t\t\tsubtaskDependencyMap.set(subtaskId, normalizedDeps);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsubtaskDependencyMap.set(subtaskId, []);\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\t\t// Check for and fix circular dependencies\n\t\tfor (const [subtaskId, dependencies] of subtaskDependencyMap.entries()) {\n\t\t\tconst visited = new Set();\n\t\t\tconst recursionStack = new Set();\n\n\t\t\t// Detect cycles\n\t\t\tconst cycleEdges = findCycles(\n\t\t\t\tsubtaskId,\n\t\t\t\tsubtaskDependencyMap,\n\t\t\t\tvisited,\n\t\t\t\trecursionStack\n\t\t\t);\n\n\t\t\tif (cycleEdges.length > 0) {\n\t\t\t\tconst [taskId, subtaskNum] = subtaskId\n\t\t\t\t\t.split('.')\n\t\t\t\t\t.map((part) => Number(part));\n\t\t\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\n\t\t\t\tif (task && task.subtasks) {\n\t\t\t\t\tconst subtask = task.subtasks.find((st) => st.id === subtaskNum);\n\n\t\t\t\t\tif (subtask && subtask.dependencies) {\n\t\t\t\t\t\tconst originalLength = subtask.dependencies.length;\n\n\t\t\t\t\t\tconst edgesToRemove = cycleEdges.map((edge) => {\n\t\t\t\t\t\t\tif (edge.includes('.')) {\n\t\t\t\t\t\t\t\tconst [depTaskId, depSubtaskId] = edge\n\t\t\t\t\t\t\t\t\t.split('.')\n\t\t\t\t\t\t\t\t\t.map((part) => Number(part));\n\n\t\t\t\t\t\t\t\tif (depTaskId === taskId) {\n\t\t\t\t\t\t\t\t\treturn depSubtaskId;\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\treturn edge;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\treturn Number(edge);\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t\tconst normalizedDepId =\n\t\t\t\t\t\t\t\ttypeof depId === 'number' && depId < 100\n\t\t\t\t\t\t\t\t\t? `${taskId}.${depId}`\n\t\t\t\t\t\t\t\t\t: String(depId);\n\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\tedgesToRemove.includes(depId) ||\n\t\t\t\t\t\t\t\tedgesToRemove.includes(normalizedDepId)\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t\t'info',\n\t\t\t\t\t\t\t\t\t`Breaking circular dependency: Removing ${normalizedDepId} from subtask ${subtaskId}`\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstats.circularDependenciesFixed++;\n\t\t\t\t\t\t\t\treturn false;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tif (subtask.dependencies.length < originalLength) {\n\t\t\t\t\t\t\tstats.subtasksFixed++;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check if any changes were made by comparing with original data\n\t\tconst dataChanged = JSON.stringify(data) !== JSON.stringify(originalData);\n\n\t\tif (dataChanged) {\n\t\t\t// Save the changes\n\t\t\twriteJSON(tasksPath, data, context.projectRoot, context.tag);\n\t\t\tlog('success', 'Fixed dependency issues in tasks.json');\n\n\t\t\t// Regenerate task files\n\t\t\tlog('info', 'Regenerating task files to reflect dependency changes...');\n\t\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath));\n\t\t} else {\n\t\t\tlog('info', 'No changes needed to fix dependencies');\n\t\t}\n\n\t\t// Show detailed statistics report\n\t\tconst totalFixedAll =\n\t\t\tstats.nonExistentDependenciesRemoved +\n\t\t\tstats.selfDependenciesRemoved +\n\t\t\tstats.duplicateDependenciesRemoved +\n\t\t\tstats.circularDependenciesFixed;\n\n\t\tif (!isSilentMode()) {\n\t\t\tif (totalFixedAll > 0) {\n\t\t\t\tlog('success', `Fixed ${totalFixedAll} dependency issues in total!`);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.green(`Dependency Fixes Summary:\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Invalid dependencies removed:')} ${stats.nonExistentDependenciesRemoved}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Self-dependencies removed:')} ${stats.selfDependenciesRemoved}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Duplicate dependencies removed:')} ${stats.duplicateDependenciesRemoved}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Circular dependencies fixed:')} ${stats.circularDependenciesFixed}\\n\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks fixed:')} ${stats.tasksFixed}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Subtasks fixed:')} ${stats.subtasksFixed}\\n`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'success',\n\t\t\t\t\t'No dependency issues found - all dependencies are valid'\n\t\t\t\t);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.green(`All Dependencies Are Valid\\n\\n`) +\n\t\t\t\t\t\t\t`${chalk.cyan('Tasks checked:')} ${data.tasks.length}\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('Total dependencies verified:')} ${countAllDependencies(data.tasks)}`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlog('error', 'Error in fix-dependencies command:', error);\n\t\tprocess.exit(1);\n\t}\n}\n\n/**\n * Ensure at least one subtask in each task has no dependencies\n * @param {Object} tasksData - The tasks data object with tasks array\n * @returns {boolean} - True if any changes were made\n */\nfunction ensureAtLeastOneIndependentSubtask(tasksData) {\n\tif (!tasksData || !tasksData.tasks || !Array.isArray(tasksData.tasks)) {\n\t\treturn false;\n\t}\n\n\tlet changesDetected = false;\n\n\ttasksData.tasks.forEach((task) => {\n\t\tif (\n\t\t\t!task.subtasks ||\n\t\t\t!Array.isArray(task.subtasks) ||\n\t\t\ttask.subtasks.length === 0\n\t\t) {\n\t\t\treturn;\n\t\t}\n\n\t\t// Check if any subtask has no dependencies\n\t\tconst hasIndependentSubtask = task.subtasks.some(\n\t\t\t(st) =>\n\t\t\t\t!st.dependencies ||\n\t\t\t\t!Array.isArray(st.dependencies) ||\n\t\t\t\tst.dependencies.length === 0\n\t\t);\n\n\t\tif (!hasIndependentSubtask) {\n\t\t\t// Find the first subtask and clear its dependencies\n\t\t\tif (task.subtasks.length > 0) {\n\t\t\t\tconst firstSubtask = task.subtasks[0];\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Ensuring at least one independent subtask: Clearing dependencies for subtask ${task.id}.${firstSubtask.id}`\n\t\t\t\t);\n\t\t\t\tfirstSubtask.dependencies = [];\n\t\t\t\tchangesDetected = true;\n\t\t\t}\n\t\t}\n\t});\n\n\treturn changesDetected;\n}\n\n/**\n * Validate and fix dependencies across all tasks and subtasks\n * This function is designed to be called after any task modification\n * @param {Object} tasksData - The tasks data object with tasks array\n * @param {string} tasksPath - Optional path to save the changes\n * @param {string} projectRoot - Optional project root for tag context\n * @param {string} tag - Optional tag for tag context\n * @returns {boolean} - True if any changes were made\n */\nfunction validateAndFixDependencies(\n\ttasksData,\n\ttasksPath = null,\n\tprojectRoot = null,\n\ttag = null\n) {\n\tif (!tasksData || !tasksData.tasks || !Array.isArray(tasksData.tasks)) {\n\t\tlog('error', 'Invalid tasks data');\n\t\treturn false;\n\t}\n\n\tlog('debug', 'Validating and fixing dependencies...');\n\n\t// Create a deep copy for comparison\n\tconst originalData = JSON.parse(JSON.stringify(tasksData));\n\n\t// 1. Remove duplicate dependencies from tasks and subtasks\n\ttasksData.tasks = tasksData.tasks.map((task) => {\n\t\t// Handle task dependencies\n\t\tif (task.dependencies) {\n\t\t\tconst uniqueDeps = [...new Set(task.dependencies)];\n\t\t\ttask.dependencies = uniqueDeps;\n\t\t}\n\n\t\t// Handle subtask dependencies\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks = task.subtasks.map((subtask) => {\n\t\t\t\tif (subtask.dependencies) {\n\t\t\t\t\tconst uniqueDeps = [...new Set(subtask.dependencies)];\n\t\t\t\t\tsubtask.dependencies = uniqueDeps;\n\t\t\t\t}\n\t\t\t\treturn subtask;\n\t\t\t});\n\t\t}\n\t\treturn task;\n\t});\n\n\t// 2. Remove invalid task dependencies (non-existent tasks)\n\ttasksData.tasks.forEach((task) => {\n\t\t// Clean up task dependencies\n\t\tif (task.dependencies) {\n\t\t\ttask.dependencies = task.dependencies.filter((depId) => {\n\t\t\t\t// Remove self-dependencies\n\t\t\t\tif (String(depId) === String(task.id)) {\n\t\t\t\t\treturn false;\n\t\t\t\t}\n\t\t\t\t// Remove non-existent dependencies\n\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t});\n\t\t}\n\n\t\t// Clean up subtask dependencies\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (subtask.dependencies) {\n\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter((depId) => {\n\t\t\t\t\t\t// Handle numeric subtask references\n\t\t\t\t\t\tif (typeof depId === 'number' && depId < 100) {\n\t\t\t\t\t\t\tconst fullSubtaskId = `${task.id}.${depId}`;\n\t\t\t\t\t\t\treturn taskExists(tasksData.tasks, fullSubtaskId);\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Handle full task/subtask references\n\t\t\t\t\t\treturn taskExists(tasksData.tasks, depId);\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\t// 3. Ensure at least one subtask has no dependencies in each task\n\ttasksData.tasks.forEach((task) => {\n\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\tconst hasIndependentSubtask = task.subtasks.some(\n\t\t\t\t(st) =>\n\t\t\t\t\t!st.dependencies ||\n\t\t\t\t\t!Array.isArray(st.dependencies) ||\n\t\t\t\t\tst.dependencies.length === 0\n\t\t\t);\n\n\t\t\tif (!hasIndependentSubtask) {\n\t\t\t\ttask.subtasks[0].dependencies = [];\n\t\t\t}\n\t\t}\n\t});\n\n\t// Check if any changes were made by comparing with original data\n\tconst changesDetected =\n\t\tJSON.stringify(tasksData) !== JSON.stringify(originalData);\n\n\t// Save changes if needed\n\tif (tasksPath && changesDetected) {\n\t\ttry {\n\t\t\twriteJSON(tasksPath, tasksData, projectRoot, tag);\n\t\t\tlog('debug', 'Saved dependency fixes to tasks.json');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to save dependency fixes to tasks.json', error);\n\t\t}\n\t}\n\n\treturn changesDetected;\n}\n\nexport {\n\taddDependency,\n\tremoveDependency,\n\tisCircularDependency,\n\tvalidateTaskDependencies,\n\tvalidateDependenciesCommand,\n\tfixDependenciesCommand,\n\tremoveDuplicateDependencies,\n\tcleanupSubtaskDependencies,\n\tensureAtLeastOneIndependentSubtask,\n\tvalidateAndFixDependencies\n};\n"], ["/claude-task-master/scripts/modules/task-manager/set-task-status.js", "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\n\nimport {\n\tlog,\n\treadJSON,\n\twriteJSON,\n\tfindTaskById,\n\tensureTagMetadata\n} from '../utils.js';\nimport { displayBanner } from '../ui.js';\nimport { validateTaskDependencies } from '../dependency-manager.js';\nimport { getDebugFlag } from '../config-manager.js';\nimport updateSingleTaskStatus from './update-single-task-status.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport {\n\tisValidTaskStatus,\n\tTASK_STATUS_OPTIONS\n} from '../../../src/constants/task-status.js';\n\n/**\n * Set the status of a task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} taskIdInput - Task ID(s) to update\n * @param {string} newStatus - New status\n * @param {Object} options - Additional options (mcpLog for MCP mode, projectRoot for tag resolution)\n * @param {string} [options.projectRoot] - Project root path\n * @param {string} [options.tag] - Optional tag to override current tag resolution\n * @param {string} [options.mcpLog] - MCP logger object\n * @returns {Object|undefined} Result object in MCP mode, undefined in CLI mode\n */\nasync function setTaskStatus(tasksPath, taskIdInput, newStatus, options = {}) {\n\tconst { projectRoot, tag } = options;\n\ttry {\n\t\tif (!isValidTaskStatus(newStatus)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`\n\t\t\t);\n\t\t}\n\t\t// Determine if we're in MCP mode by checking for mcpLog\n\t\tconst isMcpMode = !!options?.mcpLog;\n\n\t\t// Only display UI elements if not in MCP mode\n\t\tif (!isMcpMode) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold(`Updating Task Status to: ${newStatus}`), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\n\t\tlog('info', `Reading tasks from ${tasksPath}...`);\n\n\t\t// Read the raw data without tag resolution to preserve tagged structure\n\t\tlet rawData = readJSON(tasksPath, projectRoot, tag); // No tag parameter\n\n\t\t// Handle the case where readJSON returns resolved data with _rawTaggedData\n\t\tif (rawData && rawData._rawTaggedData) {\n\t\t\t// Use the raw tagged data and discard the resolved view\n\t\t\trawData = rawData._rawTaggedData;\n\t\t}\n\n\t\t// Ensure the tag exists in the raw data\n\t\tif (!rawData || !rawData[tag] || !Array.isArray(rawData[tag].tasks)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid tasks file or tag \"${tag}\" not found at ${tasksPath}`\n\t\t\t);\n\t\t}\n\n\t\t// Get the tasks for the current tag\n\t\tconst data = {\n\t\t\ttasks: rawData[tag].tasks,\n\t\t\ttag,\n\t\t\t_rawTaggedData: rawData\n\t\t};\n\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`No valid tasks found in ${tasksPath}`);\n\t\t}\n\n\t\t// Handle multiple task IDs (comma-separated)\n\t\tconst taskIds = taskIdInput.split(',').map((id) => id.trim());\n\t\tconst updatedTasks = [];\n\n\t\t// Update each task and capture old status for display\n\t\tfor (const id of taskIds) {\n\t\t\t// Capture old status before updating\n\t\t\tlet oldStatus = 'unknown';\n\n\t\t\tif (id.includes('.')) {\n\t\t\t\t// Handle subtask\n\t\t\t\tconst [parentId, subtaskId] = id\n\t\t\t\t\t.split('.')\n\t\t\t\t\t.map((id) => parseInt(id, 10));\n\t\t\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\t\t\t\tif (parentTask?.subtasks) {\n\t\t\t\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\t\t\t\toldStatus = subtask?.status || 'pending';\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Handle regular task\n\t\t\t\tconst taskId = parseInt(id, 10);\n\t\t\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\t\t\t\toldStatus = task?.status || 'pending';\n\t\t\t}\n\n\t\t\tawait updateSingleTaskStatus(tasksPath, id, newStatus, data, !isMcpMode);\n\t\t\tupdatedTasks.push({ id, oldStatus, newStatus });\n\t\t}\n\n\t\t// Update the raw data structure with the modified tasks\n\t\trawData[tag].tasks = data.tasks;\n\n\t\t// Ensure the tag has proper metadata\n\t\tensureTagMetadata(rawData[tag], {\n\t\t\tdescription: `Tasks for ${tag} context`\n\t\t});\n\n\t\t// Write the updated raw data back to the file\n\t\t// The writeJSON function will automatically filter out _rawTaggedData\n\t\twriteJSON(tasksPath, rawData, projectRoot, tag);\n\n\t\t// Validate dependencies after status update\n\t\tlog('info', 'Validating dependencies after status update...');\n\t\tvalidateTaskDependencies(data.tasks);\n\n\t\t// Generate individual task files\n\t\t// log('info', 'Regenerating task files...');\n\t\t// await generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t// \tmcpLog: options.mcpLog\n\t\t// });\n\n\t\t// Display success message - only in CLI mode\n\t\tif (!isMcpMode) {\n\t\t\tfor (const updateInfo of updatedTasks) {\n\t\t\t\tconst { id, oldStatus, newStatus: updatedStatus } = updateInfo;\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.white.bold(`Successfully updated task ${id} status:`) +\n\t\t\t\t\t\t\t'\\n' +\n\t\t\t\t\t\t\t`From: ${chalk.yellow(oldStatus)}\\n` +\n\t\t\t\t\t\t\t`To: ${chalk.green(updatedStatus)}`,\n\t\t\t\t\t\t{ padding: 1, borderColor: 'green', borderStyle: 'round' }\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Return success value for programmatic use\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tupdatedTasks: updatedTasks.map(({ id, oldStatus, newStatus }) => ({\n\t\t\t\tid,\n\t\t\t\toldStatus,\n\t\t\t\tnewStatus\n\t\t\t}))\n\t\t};\n\t} catch (error) {\n\t\tlog('error', `Error setting task status: ${error.message}`);\n\n\t\t// Only show error UI in CLI mode\n\t\tif (!options?.mcpLog) {\n\t\t\tconsole.error(chalk.red(`Error: ${error.message}`));\n\n\t\t\t// Pass session to getDebugFlag\n\t\t\tif (getDebugFlag(options?.session)) {\n\t\t\t\t// Use getter\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\t// In MCP mode, throw the error for the caller to handle\n\t\t\tthrow error;\n\t\t}\n\t}\n}\n\nexport default setTaskStatus;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/move-task.js", "/**\n * Direct function wrapper for moveTask\n */\n\nimport { moveTask } from '../../../../scripts/modules/task-manager.js';\nimport { findTasksPath } from '../utils/path-utils.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Move a task or subtask to a new position\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file\n * @param {string} args.sourceId - ID of the task/subtask to move (e.g., '5' or '5.2' or '5,6,7')\n * @param {string} args.destinationId - ID of the destination (e.g., '7' or '7.3' or '7,8,9')\n * @param {string} args.file - Alternative path to the tasks.json file\n * @param {string} args.projectRoot - Project root directory\n * @param {string} args.tag - Tag for the task (optional)\n * @param {boolean} args.generateFiles - Whether to regenerate task files after moving (default: true)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: Object}>}\n */\nexport async function moveTaskDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst { projectRoot, tag } = args;\n\n\t// Validate required parameters\n\tif (!args.sourceId) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tmessage: 'Source ID is required',\n\t\t\t\tcode: 'MISSING_SOURCE_ID'\n\t\t\t}\n\t\t};\n\t}\n\n\tif (!args.destinationId) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tmessage: 'Destination ID is required',\n\t\t\t\tcode: 'MISSING_DESTINATION_ID'\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\t// Find tasks.json path if not provided\n\t\tlet tasksPath = args.tasksJsonPath || args.file;\n\t\tif (!tasksPath) {\n\t\t\tif (!args.projectRoot) {\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\t'Project root is required if tasksJsonPath is not provided',\n\t\t\t\t\t\tcode: 'MISSING_PROJECT_ROOT'\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t\ttasksPath = findTasksPath(args, log);\n\t\t}\n\n\t\t// Enable silent mode to prevent console output during MCP operation\n\t\tenableSilentMode();\n\n\t\t// Call the core moveTask function with file generation control\n\t\tconst generateFiles = args.generateFiles !== false; // Default to true\n\t\tconst result = await moveTask(\n\t\t\ttasksPath,\n\t\t\targs.sourceId,\n\t\t\targs.destinationId,\n\t\t\tgenerateFiles,\n\t\t\t{\n\t\t\t\tprojectRoot,\n\t\t\t\ttag\n\t\t\t}\n\t\t);\n\n\t\t// Restore console output\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\t...result,\n\t\t\t\tmessage: `Successfully moved task/subtask ${args.sourceId} to ${args.destinationId}`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Restore console output in case of error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Failed to move task: ${error.message}`);\n\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tmessage: error.message,\n\t\t\t\tcode: 'MOVE_TASK_ERROR'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/rules.js", "/**\n * rules.js\n * Direct function implementation for adding or removing rules\n */\n\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport {\n\tconvertAllRulesToProfileRules,\n\tremoveProfileRules,\n\tgetRulesProfile,\n\tisValidProfile\n} from '../../../../src/utils/rule-transformer.js';\nimport { RULE_PROFILES } from '../../../../src/constants/profiles.js';\nimport { RULES_ACTIONS } from '../../../../src/constants/rules-actions.js';\nimport {\n\twouldRemovalLeaveNoProfiles,\n\tgetInstalledProfiles\n} from '../../../../src/utils/profiles.js';\nimport path from 'path';\nimport fs from 'fs';\n\n/**\n * Direct function wrapper for adding or removing rules.\n * @param {Object} args - Command arguments\n * @param {\"add\"|\"remove\"} args.action - Action to perform: add or remove rules\n * @param {string[]} args.profiles - List of profiles to add or remove\n * @param {string} args.projectRoot - Absolute path to the project root\n * @param {boolean} [args.yes=true] - Run non-interactively\n * @param {Object} log - Logger object\n * @param {Object} context - Additional context (session)\n * @returns {Promise<Object>} - Result object { success: boolean, data?: any, error?: { code: string, message: string } }\n */\nexport async function rulesDirect(args, log, context = {}) {\n\tenableSilentMode();\n\ttry {\n\t\tconst { action, profiles, projectRoot, yes, force } = args;\n\t\tif (\n\t\t\t!action ||\n\t\t\t!Array.isArray(profiles) ||\n\t\t\tprofiles.length === 0 ||\n\t\t\t!projectRoot\n\t\t) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'action, profiles, and projectRoot are required.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst removalResults = [];\n\t\tconst addResults = [];\n\n\t\tif (action === RULES_ACTIONS.REMOVE) {\n\t\t\t// Safety check: Ensure this won't remove all rule profiles (unless forced)\n\t\t\tif (!force && wouldRemovalLeaveNoProfiles(projectRoot, profiles)) {\n\t\t\t\tconst installedProfiles = getInstalledProfiles(projectRoot);\n\t\t\t\tconst remainingProfiles = installedProfiles.filter(\n\t\t\t\t\t(profile) => !profiles.includes(profile)\n\t\t\t\t);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'CRITICAL_REMOVAL_BLOCKED',\n\t\t\t\t\t\tmessage: `CRITICAL: This operation would remove ALL remaining rule profiles (${profiles.join(', ')}), leaving your project with no rules configurations. This could significantly impact functionality. Currently installed profiles: ${installedProfiles.join(', ')}. If you're certain you want to proceed, set force: true or use the CLI with --force flag.`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\n\t\t\tfor (const profile of profiles) {\n\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\tremovalResults.push({\n\t\t\t\t\t\tprofileName: profile,\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: `The requested rule profile for '${profile}' is unavailable. Supported profiles are: ${RULE_PROFILES.join(', ')}.`\n\t\t\t\t\t});\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\t\t\t\tconst result = removeProfileRules(projectRoot, profileConfig);\n\t\t\t\tremovalResults.push(result);\n\t\t\t}\n\t\t\tconst successes = removalResults\n\t\t\t\t.filter((r) => r.success)\n\t\t\t\t.map((r) => r.profileName);\n\t\t\tconst skipped = removalResults\n\t\t\t\t.filter((r) => r.skipped)\n\t\t\t\t.map((r) => r.profileName);\n\t\t\tconst errors = removalResults.filter(\n\t\t\t\t(r) => r.error && !r.success && !r.skipped\n\t\t\t);\n\t\t\tconst withNotices = removalResults.filter((r) => r.notice);\n\n\t\t\tlet summary = '';\n\t\t\tif (successes.length > 0) {\n\t\t\t\tsummary += `Successfully removed Task Master rules: ${successes.join(', ')}.`;\n\t\t\t}\n\t\t\tif (skipped.length > 0) {\n\t\t\t\tsummary += `Skipped (default or protected): ${skipped.join(', ')}.`;\n\t\t\t}\n\t\t\tif (errors.length > 0) {\n\t\t\t\tsummary += errors\n\t\t\t\t\t.map((r) => `Error removing ${r.profileName}: ${r.error}`)\n\t\t\t\t\t.join(' ');\n\t\t\t}\n\t\t\tif (withNotices.length > 0) {\n\t\t\t\tsummary += ` Notices: ${withNotices.map((r) => `${r.profileName} - ${r.notice}`).join('; ')}.`;\n\t\t\t}\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: errors.length === 0,\n\t\t\t\tdata: { summary, results: removalResults }\n\t\t\t};\n\t\t} else if (action === RULES_ACTIONS.ADD) {\n\t\t\tfor (const profile of profiles) {\n\t\t\t\tif (!isValidProfile(profile)) {\n\t\t\t\t\taddResults.push({\n\t\t\t\t\t\tprofileName: profile,\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: `Profile not found: static import missing for '${profile}'. Valid profiles: ${RULE_PROFILES.join(', ')}`\n\t\t\t\t\t});\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tconst profileConfig = getRulesProfile(profile);\n\t\t\t\tconst { success, failed } = convertAllRulesToProfileRules(\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tprofileConfig\n\t\t\t\t);\n\n\t\t\t\t// Determine paths\n\t\t\t\tconst rulesDir = profileConfig.rulesDir;\n\t\t\t\tconst profileRulesDir = path.join(projectRoot, rulesDir);\n\t\t\t\tconst profileDir = profileConfig.profileDir;\n\t\t\t\tconst mcpConfig = profileConfig.mcpConfig !== false;\n\t\t\t\tconst mcpPath =\n\t\t\t\t\tmcpConfig && profileConfig.mcpConfigPath\n\t\t\t\t\t\t? path.join(projectRoot, profileConfig.mcpConfigPath)\n\t\t\t\t\t\t: null;\n\n\t\t\t\t// Check what was created\n\t\t\t\tconst mcpConfigCreated =\n\t\t\t\t\tmcpConfig && mcpPath ? fs.existsSync(mcpPath) : undefined;\n\t\t\t\tconst rulesDirCreated = fs.existsSync(profileRulesDir);\n\t\t\t\tconst profileFolderCreated = fs.existsSync(\n\t\t\t\t\tpath.join(projectRoot, profileDir)\n\t\t\t\t);\n\n\t\t\t\tconst error =\n\t\t\t\t\tfailed > 0 ? `${failed} rule files failed to convert.` : null;\n\t\t\t\tconst resultObj = {\n\t\t\t\t\tprofileName: profile,\n\t\t\t\t\tmcpConfigCreated,\n\t\t\t\t\trulesDirCreated,\n\t\t\t\t\tprofileFolderCreated,\n\t\t\t\t\tskipped: false,\n\t\t\t\t\terror,\n\t\t\t\t\tsuccess:\n\t\t\t\t\t\t(mcpConfig ? mcpConfigCreated : true) &&\n\t\t\t\t\t\trulesDirCreated &&\n\t\t\t\t\t\tsuccess > 0 &&\n\t\t\t\t\t\t!error\n\t\t\t\t};\n\t\t\t\taddResults.push(resultObj);\n\t\t\t}\n\n\t\t\tconst successes = addResults\n\t\t\t\t.filter((r) => r.success)\n\t\t\t\t.map((r) => r.profileName);\n\t\t\tconst errors = addResults.filter((r) => r.error && !r.success);\n\n\t\t\tlet summary = '';\n\t\t\tif (successes.length > 0) {\n\t\t\t\tsummary += `Successfully added rules: ${successes.join(', ')}.`;\n\t\t\t}\n\t\t\tif (errors.length > 0) {\n\t\t\t\tsummary += errors\n\t\t\t\t\t.map((r) => ` Error adding ${r.profileName}: ${r.error}`)\n\t\t\t\t\t.join(' ');\n\t\t\t}\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: errors.length === 0,\n\t\t\t\tdata: { summary, results: addResults }\n\t\t\t};\n\t\t} else {\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INVALID_ACTION',\n\t\t\t\t\tmessage: `Unknown action. Use \"${RULES_ACTIONS.ADD}\" or \"${RULES_ACTIONS.REMOVE}\".`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\tdisableSilentMode();\n\t\tlog.error(`[rulesDirect] Error: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'RULES_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/add-dependency.js", "/**\n * add-dependency.js\n * Direct function implementation for adding a dependency to a task\n */\n\nimport { addDependency } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for addDependency with error handling.\n *\n * @param {Object} args - Command arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string|number} args.id - Task ID to add dependency to\n * @param {string|number} args.dependsOn - Task ID that will become a dependency\n * @param {string} args.tag - Tag for the task (optional)\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Result object with success status and data/error information\n */\nexport async function addDependencyDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, dependsOn, tag, projectRoot } = args;\n\ttry {\n\t\tlog.info(`Adding dependency with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('addDependencyDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate required parameters\n\t\tif (!id) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Task ID (id) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!dependsOn) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Dependency ID (dependsOn) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Format IDs for the core function\n\t\tconst taskId =\n\t\t\tid && id.includes && id.includes('.') ? id : parseInt(id, 10);\n\t\tconst dependencyId =\n\t\t\tdependsOn && dependsOn.includes && dependsOn.includes('.')\n\t\t\t\t? dependsOn\n\t\t\t\t: parseInt(dependsOn, 10);\n\n\t\tlog.info(\n\t\t\t`Adding dependency: task ${taskId} will depend on ${dependencyId}`\n\t\t);\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\t// Create context object\n\t\tconst context = { projectRoot, tag };\n\n\t\t// Call the core function using the provided path\n\t\tawait addDependency(tasksPath, taskId, dependencyId, context);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully added dependency: Task ${taskId} now depends on ${dependencyId}`,\n\t\t\t\ttaskId: taskId,\n\t\t\t\tdependencyId: dependencyId\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in addDependencyDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/analyze-task-complexity.js", "import chalk from 'chalk';\nimport boxen from 'boxen';\nimport readline from 'readline';\nimport fs from 'fs';\n\nimport { log, readJSON, writeJSON, isSilentMode } from '../utils.js';\n\nimport {\n\tstartLoadingIndicator,\n\tstopLoadingIndicator,\n\tdisplayAiUsageSummary\n} from '../ui.js';\n\nimport { generateTextService } from '../ai-services-unified.js';\n\nimport { getDebugFlag, getProjectName } from '../config-manager.js';\nimport { getPromptManager } from '../prompt-manager.js';\nimport {\n\tCOMPLEXITY_REPORT_FILE,\n\tLEGACY_TASKS_FILE\n} from '../../../src/constants/paths.js';\nimport { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js';\nimport { ContextGatherer } from '../utils/contextGatherer.js';\nimport { FuzzyTaskSearch } from '../utils/fuzzyTaskSearch.js';\nimport { flattenTasksWithSubtasks } from '../utils.js';\n\n/**\n * Generates the prompt for complexity analysis.\n * (Moved from ai-services.js and simplified)\n * @param {Object} tasksData - The tasks data object.\n * @param {string} [gatheredContext] - The gathered context for the analysis.\n * @returns {string} The generated prompt.\n */\nfunction generateInternalComplexityAnalysisPrompt(\n\ttasksData,\n\tgatheredContext = ''\n) {\n\tconst tasksString = JSON.stringify(tasksData.tasks, null, 2);\n\tlet prompt = `Analyze the following tasks to determine their complexity (1-10 scale) and recommend the number of subtasks for expansion. Provide a brief reasoning and an initial expansion prompt for each.\n\nTasks:\n${tasksString}`;\n\n\tif (gatheredContext) {\n\t\tprompt += `\\n\\n# Project Context\\n\\n${gatheredContext}`;\n\t}\n\n\tprompt += `\n\nRespond ONLY with a valid JSON array matching the schema:\n[\n {\n \"taskId\": <number>,\n \"taskTitle\": \"<string>\",\n \"complexityScore\": <number 1-10>,\n \"recommendedSubtasks\": <number>,\n \"expansionPrompt\": \"<string>\",\n \"reasoning\": \"<string>\"\n },\n ...\n]\n\nDo not include any explanatory text, markdown formatting, or code block markers before or after the JSON array.`;\n\treturn prompt;\n}\n\n/**\n * Analyzes task complexity and generates expansion recommendations\n * @param {Object} options Command options\n * @param {string} options.file - Path to tasks file\n * @param {string} options.output - Path to report output file\n * @param {string|number} [options.threshold] - Complexity threshold\n * @param {boolean} [options.research] - Use research role\n * @param {string} [options.projectRoot] - Project root path (for MCP/env fallback).\n * @param {string} [options.tag] - Tag for the task\n * @param {string} [options.id] - Comma-separated list of task IDs to analyze specifically\n * @param {number} [options.from] - Starting task ID in a range to analyze\n * @param {number} [options.to] - Ending task ID in a range to analyze\n * @param {Object} [options._filteredTasksData] - Pre-filtered task data (internal use)\n * @param {number} [options._originalTaskCount] - Original task count (internal use)\n * @param {Object} context - Context object, potentially containing session and mcpLog\n * @param {Object} [context.session] - Session object from MCP server (optional)\n * @param {Object} [context.mcpLog] - MCP logger object (optional)\n * @param {function} [context.reportProgress] - Deprecated: Function to report progress (ignored)\n */\nasync function analyzeTaskComplexity(options, context = {}) {\n\tconst { session, mcpLog } = context;\n\tconst tasksPath = options.file || LEGACY_TASKS_FILE;\n\tconst thresholdScore = parseFloat(options.threshold || '5');\n\tconst useResearch = options.research || false;\n\tconst projectRoot = options.projectRoot;\n\tconst tag = options.tag;\n\t// New parameters for task ID filtering\n\tconst specificIds = options.id\n\t\t? options.id\n\t\t\t\t.split(',')\n\t\t\t\t.map((id) => parseInt(id.trim(), 10))\n\t\t\t\t.filter((id) => !Number.isNaN(id))\n\t\t: null;\n\tconst fromId = options.from !== undefined ? parseInt(options.from, 10) : null;\n\tconst toId = options.to !== undefined ? parseInt(options.to, 10) : null;\n\n\tconst outputFormat = mcpLog ? 'json' : 'text';\n\n\tconst reportLog = (message, level = 'info') => {\n\t\tif (mcpLog) {\n\t\t\tmcpLog[level](message);\n\t\t} else if (!isSilentMode() && outputFormat === 'text') {\n\t\t\tlog(level, message);\n\t\t}\n\t};\n\n\t// Resolve output path using tag-aware resolution\n\tconst outputPath = resolveComplexityReportOutputPath(\n\t\toptions.output,\n\t\t{ projectRoot, tag },\n\t\treportLog\n\t);\n\n\tif (outputFormat === 'text') {\n\t\tconsole.log(\n\t\t\tchalk.blue(\n\t\t\t\t'Analyzing task complexity and generating expansion recommendations...'\n\t\t\t)\n\t\t);\n\t}\n\n\ttry {\n\t\treportLog(`Reading tasks from ${tasksPath}...`, 'info');\n\t\tlet tasksData;\n\t\tlet originalTaskCount = 0;\n\t\tlet originalData = null;\n\n\t\tif (options._filteredTasksData) {\n\t\t\ttasksData = options._filteredTasksData;\n\t\t\toriginalTaskCount = options._originalTaskCount || tasksData.tasks.length;\n\t\t\tif (!options._originalTaskCount) {\n\t\t\t\ttry {\n\t\t\t\t\toriginalData = readJSON(tasksPath, projectRoot, tag);\n\t\t\t\t\tif (originalData && originalData.tasks) {\n\t\t\t\t\t\toriginalTaskCount = originalData.tasks.length;\n\t\t\t\t\t}\n\t\t\t\t} catch (e) {\n\t\t\t\t\tlog('warn', `Could not read original tasks file: ${e.message}`);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\toriginalData = readJSON(tasksPath, projectRoot, tag);\n\t\t\tif (\n\t\t\t\t!originalData ||\n\t\t\t\t!originalData.tasks ||\n\t\t\t\t!Array.isArray(originalData.tasks) ||\n\t\t\t\toriginalData.tasks.length === 0\n\t\t\t) {\n\t\t\t\tthrow new Error('No tasks found in the tasks file');\n\t\t\t}\n\t\t\toriginalTaskCount = originalData.tasks.length;\n\n\t\t\t// Filter tasks based on active status\n\t\t\tconst activeStatuses = ['pending', 'blocked', 'in-progress'];\n\t\t\tlet filteredTasks = originalData.tasks.filter((task) =>\n\t\t\t\tactiveStatuses.includes(task.status?.toLowerCase() || 'pending')\n\t\t\t);\n\n\t\t\t// Apply ID filtering if specified\n\t\t\tif (specificIds && specificIds.length > 0) {\n\t\t\t\treportLog(\n\t\t\t\t\t`Filtering tasks by specific IDs: ${specificIds.join(', ')}`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t\tfilteredTasks = filteredTasks.filter((task) =>\n\t\t\t\t\tspecificIds.includes(task.id)\n\t\t\t\t);\n\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tif (filteredTasks.length === 0 && specificIds.length > 0) {\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Warning: No active tasks found with IDs: ${specificIds.join(', ')}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t} else if (filteredTasks.length < specificIds.length) {\n\t\t\t\t\t\tconst foundIds = filteredTasks.map((t) => t.id);\n\t\t\t\t\t\tconst missingIds = specificIds.filter(\n\t\t\t\t\t\t\t(id) => !foundIds.includes(id)\n\t\t\t\t\t\t);\n\t\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t\t`Warning: Some requested task IDs were not found or are not active: ${missingIds.join(', ')}`\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Apply range filtering if specified\n\t\t\telse if (fromId !== null || toId !== null) {\n\t\t\t\tconst effectiveFromId = fromId !== null ? fromId : 1;\n\t\t\t\tconst effectiveToId =\n\t\t\t\t\ttoId !== null\n\t\t\t\t\t\t? toId\n\t\t\t\t\t\t: Math.max(...originalData.tasks.map((t) => t.id));\n\n\t\t\t\treportLog(\n\t\t\t\t\t`Filtering tasks by ID range: ${effectiveFromId} to ${effectiveToId}`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t\tfilteredTasks = filteredTasks.filter(\n\t\t\t\t\t(task) => task.id >= effectiveFromId && task.id <= effectiveToId\n\t\t\t\t);\n\n\t\t\t\tif (outputFormat === 'text' && filteredTasks.length === 0) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Warning: No active tasks found in range: ${effectiveFromId}-${effectiveToId}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttasksData = {\n\t\t\t\t...originalData,\n\t\t\t\ttasks: filteredTasks,\n\t\t\t\t_originalTaskCount: originalTaskCount\n\t\t\t};\n\t\t}\n\n\t\t// --- Context Gathering ---\n\t\tlet gatheredContext = '';\n\t\tif (originalData && originalData.tasks.length > 0) {\n\t\t\ttry {\n\t\t\t\tconst contextGatherer = new ContextGatherer(projectRoot, tag);\n\t\t\t\tconst allTasksFlat = flattenTasksWithSubtasks(originalData.tasks);\n\t\t\t\tconst fuzzySearch = new FuzzyTaskSearch(\n\t\t\t\t\tallTasksFlat,\n\t\t\t\t\t'analyze-complexity'\n\t\t\t\t);\n\t\t\t\t// Create a query from the tasks being analyzed\n\t\t\t\tconst searchQuery = tasksData.tasks\n\t\t\t\t\t.map((t) => `${t.title} ${t.description}`)\n\t\t\t\t\t.join(' ');\n\t\t\t\tconst searchResults = fuzzySearch.findRelevantTasks(searchQuery, {\n\t\t\t\t\tmaxResults: 10\n\t\t\t\t});\n\t\t\t\tconst relevantTaskIds = fuzzySearch.getTaskIds(searchResults);\n\n\t\t\t\tif (relevantTaskIds.length > 0) {\n\t\t\t\t\tconst contextResult = await contextGatherer.gather({\n\t\t\t\t\t\ttasks: relevantTaskIds,\n\t\t\t\t\t\tformat: 'research'\n\t\t\t\t\t});\n\t\t\t\t\tgatheredContext = contextResult.context || '';\n\t\t\t\t}\n\t\t\t} catch (contextError) {\n\t\t\t\treportLog(\n\t\t\t\t\t`Could not gather additional context: ${contextError.message}`,\n\t\t\t\t\t'warn'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t\t// --- End Context Gathering ---\n\n\t\tconst skippedCount = originalTaskCount - tasksData.tasks.length;\n\t\treportLog(\n\t\t\t`Found ${originalTaskCount} total tasks in the task file.`,\n\t\t\t'info'\n\t\t);\n\n\t\t// Updated messaging to reflect filtering logic\n\t\tif (specificIds || fromId !== null || toId !== null) {\n\t\t\tconst filterMsg = specificIds\n\t\t\t\t? `Analyzing ${tasksData.tasks.length} tasks with specific IDs: ${specificIds.join(', ')}`\n\t\t\t\t: `Analyzing ${tasksData.tasks.length} tasks in range: ${fromId || 1} to ${toId || 'end'}`;\n\n\t\t\treportLog(filterMsg, 'info');\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(chalk.blue(filterMsg));\n\t\t\t}\n\t\t} else if (skippedCount > 0) {\n\t\t\tconst skipMessage = `Skipping ${skippedCount} tasks marked as done/cancelled/deferred. Analyzing ${tasksData.tasks.length} active tasks.`;\n\t\t\treportLog(skipMessage, 'info');\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(chalk.yellow(skipMessage));\n\t\t\t}\n\t\t}\n\n\t\t// Check for existing report before doing analysis\n\t\tlet existingReport = null;\n\t\tconst existingAnalysisMap = new Map(); // For quick lookups by task ID\n\t\ttry {\n\t\t\tif (fs.existsSync(outputPath)) {\n\t\t\t\texistingReport = JSON.parse(fs.readFileSync(outputPath, 'utf8'));\n\t\t\t\treportLog(`Found existing complexity report at ${outputPath}`, 'info');\n\n\t\t\t\tif (\n\t\t\t\t\texistingReport &&\n\t\t\t\t\texistingReport.complexityAnalysis &&\n\t\t\t\t\tArray.isArray(existingReport.complexityAnalysis)\n\t\t\t\t) {\n\t\t\t\t\t// Create lookup map of existing analysis entries\n\t\t\t\t\texistingReport.complexityAnalysis.forEach((item) => {\n\t\t\t\t\t\texistingAnalysisMap.set(item.taskId, item);\n\t\t\t\t\t});\n\t\t\t\t\treportLog(\n\t\t\t\t\t\t`Existing report contains ${existingReport.complexityAnalysis.length} task analyses`,\n\t\t\t\t\t\t'info'\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (readError) {\n\t\t\treportLog(\n\t\t\t\t`Warning: Could not read existing report: ${readError.message}`,\n\t\t\t\t'warn'\n\t\t\t);\n\t\t\texistingReport = null;\n\t\t\texistingAnalysisMap.clear();\n\t\t}\n\n\t\tif (tasksData.tasks.length === 0) {\n\t\t\t// If using ID filtering but no matching tasks, return existing report or empty\n\t\t\tif (existingReport && (specificIds || fromId !== null || toId !== null)) {\n\t\t\t\treportLog(\n\t\t\t\t\t'No matching tasks found for analysis. Keeping existing report.',\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'No matching tasks found for analysis. Keeping existing report.'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\treturn {\n\t\t\t\t\treport: existingReport,\n\t\t\t\t\ttelemetryData: null\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Otherwise create empty report\n\t\t\tconst emptyReport = {\n\t\t\t\tmeta: {\n\t\t\t\t\tgeneratedAt: new Date().toISOString(),\n\t\t\t\t\ttasksAnalyzed: 0,\n\t\t\t\t\tthresholdScore: thresholdScore,\n\t\t\t\t\tprojectName: getProjectName(session),\n\t\t\t\t\tusedResearch: useResearch\n\t\t\t\t},\n\t\t\t\tcomplexityAnalysis: existingReport?.complexityAnalysis || []\n\t\t\t};\n\t\t\treportLog(`Writing complexity report to ${outputPath}...`, 'info');\n\t\t\tfs.writeFileSync(\n\t\t\t\toutputPath,\n\t\t\t\tJSON.stringify(emptyReport, null, '\\t'),\n\t\t\t\t'utf8'\n\t\t\t);\n\t\t\treportLog(\n\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`,\n\t\t\t\t'success'\n\t\t\t);\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconst highComplexity = 0;\n\t\t\t\tconst mediumComplexity = 0;\n\t\t\t\tconst lowComplexity = 0;\n\t\t\t\tconst totalAnalyzed = 0;\n\n\t\t\t\tconsole.log('\\nComplexity Analysis Summary:');\n\t\t\t\tconsole.log('----------------------------');\n\t\t\t\tconsole.log(`Tasks in input file: ${originalTaskCount}`);\n\t\t\t\tconsole.log(`Tasks successfully analyzed: ${totalAnalyzed}`);\n\t\t\t\tconsole.log(`High complexity tasks: ${highComplexity}`);\n\t\t\t\tconsole.log(`Medium complexity tasks: ${mediumComplexity}`);\n\t\t\t\tconsole.log(`Low complexity tasks: ${lowComplexity}`);\n\t\t\t\tconsole.log(\n\t\t\t\t\t`Sum verification: ${highComplexity + mediumComplexity + lowComplexity} (should equal ${totalAnalyzed})`\n\t\t\t\t);\n\t\t\t\tconsole.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);\n\t\t\t\tconsole.log(\n\t\t\t\t\t`\\nSee ${outputPath} for the full report and expansion commands.`\n\t\t\t\t);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.white.bold('Suggested Next Steps:') +\n\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn {\n\t\t\t\treport: emptyReport,\n\t\t\t\ttelemetryData: null\n\t\t\t};\n\t\t}\n\n\t\t// Continue with regular analysis path\n\t\t// Load prompts using PromptManager\n\t\tconst promptManager = getPromptManager();\n\n\t\tconst promptParams = {\n\t\t\ttasks: tasksData.tasks,\n\t\t\tgatheredContext: gatheredContext || '',\n\t\t\tuseResearch: useResearch\n\t\t};\n\n\t\tconst { systemPrompt, userPrompt: prompt } = await promptManager.loadPrompt(\n\t\t\t'analyze-complexity',\n\t\t\tpromptParams,\n\t\t\t'default'\n\t\t);\n\n\t\tlet loadingIndicator = null;\n\t\tif (outputFormat === 'text') {\n\t\t\tloadingIndicator = startLoadingIndicator(\n\t\t\t\t`${useResearch ? 'Researching' : 'Analyzing'} the complexity of your tasks with AI...\\n`\n\t\t\t);\n\t\t}\n\n\t\tlet aiServiceResponse = null;\n\t\tlet complexityAnalysis = null;\n\n\t\ttry {\n\t\t\tconst role = useResearch ? 'research' : 'main';\n\n\t\t\taiServiceResponse = await generateTextService({\n\t\t\t\tprompt,\n\t\t\t\tsystemPrompt,\n\t\t\t\trole,\n\t\t\t\tsession,\n\t\t\t\tprojectRoot,\n\t\t\t\tcommandName: 'analyze-complexity',\n\t\t\t\toutputType: mcpLog ? 'mcp' : 'cli'\n\t\t\t});\n\n\t\t\tif (loadingIndicator) {\n\t\t\t\tstopLoadingIndicator(loadingIndicator);\n\t\t\t\tloadingIndicator = null;\n\t\t\t}\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\treadline.clearLine(process.stdout, 0);\n\t\t\t\treadline.cursorTo(process.stdout, 0);\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green('AI service call complete. Parsing response...')\n\t\t\t\t);\n\t\t\t}\n\n\t\t\treportLog('Parsing complexity analysis from text response...', 'info');\n\t\t\ttry {\n\t\t\t\tlet cleanedResponse = aiServiceResponse.mainResult;\n\t\t\t\tcleanedResponse = cleanedResponse.trim();\n\n\t\t\t\tconst codeBlockMatch = cleanedResponse.match(\n\t\t\t\t\t/```(?:json)?\\s*([\\s\\S]*?)\\s*```/\n\t\t\t\t);\n\t\t\t\tif (codeBlockMatch) {\n\t\t\t\t\tcleanedResponse = codeBlockMatch[1].trim();\n\t\t\t\t} else {\n\t\t\t\t\tconst firstBracket = cleanedResponse.indexOf('[');\n\t\t\t\t\tconst lastBracket = cleanedResponse.lastIndexOf(']');\n\t\t\t\t\tif (firstBracket !== -1 && lastBracket > firstBracket) {\n\t\t\t\t\t\tcleanedResponse = cleanedResponse.substring(\n\t\t\t\t\t\t\tfirstBracket,\n\t\t\t\t\t\t\tlastBracket + 1\n\t\t\t\t\t\t);\n\t\t\t\t\t} else {\n\t\t\t\t\t\treportLog(\n\t\t\t\t\t\t\t'Warning: Response does not appear to be a JSON array.',\n\t\t\t\t\t\t\t'warn'\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (outputFormat === 'text' && getDebugFlag(session)) {\n\t\t\t\t\tconsole.log(chalk.gray('Attempting to parse cleaned JSON...'));\n\t\t\t\t\tconsole.log(chalk.gray('Cleaned response (first 100 chars):'));\n\t\t\t\t\tconsole.log(chalk.gray(cleanedResponse.substring(0, 100)));\n\t\t\t\t\tconsole.log(chalk.gray('Last 100 chars:'));\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.gray(cleanedResponse.substring(cleanedResponse.length - 100))\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tcomplexityAnalysis = JSON.parse(cleanedResponse);\n\t\t\t} catch (parseError) {\n\t\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\t\treportLog(\n\t\t\t\t\t`Error parsing complexity analysis JSON: ${parseError.message}`,\n\t\t\t\t\t'error'\n\t\t\t\t);\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\tchalk.red(\n\t\t\t\t\t\t\t`Error parsing complexity analysis JSON: ${parseError.message}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tthrow parseError;\n\t\t\t}\n\n\t\t\tconst taskIds = tasksData.tasks.map((t) => t.id);\n\t\t\tconst analysisTaskIds = complexityAnalysis.map((a) => a.taskId);\n\t\t\tconst missingTaskIds = taskIds.filter(\n\t\t\t\t(id) => !analysisTaskIds.includes(id)\n\t\t\t);\n\n\t\t\tif (missingTaskIds.length > 0) {\n\t\t\t\treportLog(\n\t\t\t\t\t`Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}`,\n\t\t\t\t\t'warn'\n\t\t\t\t);\n\t\t\t\tif (outputFormat === 'text') {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Missing analysis for ${missingTaskIds.length} tasks: ${missingTaskIds.join(', ')}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tfor (const missingId of missingTaskIds) {\n\t\t\t\t\tconst missingTask = tasksData.tasks.find((t) => t.id === missingId);\n\t\t\t\t\tif (missingTask) {\n\t\t\t\t\t\treportLog(`Adding default analysis for task ${missingId}`, 'info');\n\t\t\t\t\t\tcomplexityAnalysis.push({\n\t\t\t\t\t\t\ttaskId: missingId,\n\t\t\t\t\t\t\ttaskTitle: missingTask.title,\n\t\t\t\t\t\t\tcomplexityScore: 5,\n\t\t\t\t\t\t\trecommendedSubtasks: 3,\n\t\t\t\t\t\t\texpansionPrompt: `Break down this task with a focus on ${missingTask.title.toLowerCase()}.`,\n\t\t\t\t\t\t\treasoning:\n\t\t\t\t\t\t\t\t'Automatically added due to missing analysis in AI response.'\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Merge with existing report - only keep entries from the current tag\n\t\t\tlet finalComplexityAnalysis = [];\n\n\t\t\tif (existingReport && Array.isArray(existingReport.complexityAnalysis)) {\n\t\t\t\t// Create a map of task IDs that we just analyzed\n\t\t\t\tconst analyzedTaskIds = new Set(\n\t\t\t\t\tcomplexityAnalysis.map((item) => item.taskId)\n\t\t\t\t);\n\n\t\t\t\t// Keep existing entries that weren't in this analysis run AND belong to the current tag\n\t\t\t\t// We determine tag membership by checking if the task ID exists in the current tag's tasks\n\t\t\t\tconst currentTagTaskIds = new Set(tasksData.tasks.map((t) => t.id));\n\t\t\t\tconst existingEntriesNotAnalyzed =\n\t\t\t\t\texistingReport.complexityAnalysis.filter(\n\t\t\t\t\t\t(item) =>\n\t\t\t\t\t\t\t!analyzedTaskIds.has(item.taskId) &&\n\t\t\t\t\t\t\tcurrentTagTaskIds.has(item.taskId) // Only keep entries for tasks in current tag\n\t\t\t\t\t);\n\n\t\t\t\t// Combine with new analysis\n\t\t\t\tfinalComplexityAnalysis = [\n\t\t\t\t\t...existingEntriesNotAnalyzed,\n\t\t\t\t\t...complexityAnalysis\n\t\t\t\t];\n\n\t\t\t\treportLog(\n\t\t\t\t\t`Merged ${complexityAnalysis.length} new analyses with ${existingEntriesNotAnalyzed.length} existing entries from current tag`,\n\t\t\t\t\t'info'\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// No existing report or invalid format, just use the new analysis\n\t\t\t\tfinalComplexityAnalysis = complexityAnalysis;\n\t\t\t}\n\n\t\t\tconst report = {\n\t\t\t\tmeta: {\n\t\t\t\t\tgeneratedAt: new Date().toISOString(),\n\t\t\t\t\ttasksAnalyzed: tasksData.tasks.length,\n\t\t\t\t\ttotalTasks: originalTaskCount,\n\t\t\t\t\tanalysisCount: finalComplexityAnalysis.length,\n\t\t\t\t\tthresholdScore: thresholdScore,\n\t\t\t\t\tprojectName: getProjectName(session),\n\t\t\t\t\tusedResearch: useResearch\n\t\t\t\t},\n\t\t\t\tcomplexityAnalysis: finalComplexityAnalysis\n\t\t\t};\n\t\t\treportLog(`Writing complexity report to ${outputPath}...`, 'info');\n\t\t\tfs.writeFileSync(outputPath, JSON.stringify(report, null, '\\t'), 'utf8');\n\n\t\t\treportLog(\n\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`,\n\t\t\t\t'success'\n\t\t\t);\n\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.log(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Task complexity analysis complete. Report written to ${outputPath}`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\t// Calculate statistics specifically for this analysis run\n\t\t\t\tconst highComplexity = complexityAnalysis.filter(\n\t\t\t\t\t(t) => t.complexityScore >= 8\n\t\t\t\t).length;\n\t\t\t\tconst mediumComplexity = complexityAnalysis.filter(\n\t\t\t\t\t(t) => t.complexityScore >= 5 && t.complexityScore < 8\n\t\t\t\t).length;\n\t\t\t\tconst lowComplexity = complexityAnalysis.filter(\n\t\t\t\t\t(t) => t.complexityScore < 5\n\t\t\t\t).length;\n\t\t\t\tconst totalAnalyzed = complexityAnalysis.length;\n\n\t\t\t\tconsole.log('\\nCurrent Analysis Summary:');\n\t\t\t\tconsole.log('----------------------------');\n\t\t\t\tconsole.log(`Tasks analyzed in this run: ${totalAnalyzed}`);\n\t\t\t\tconsole.log(`High complexity tasks: ${highComplexity}`);\n\t\t\t\tconsole.log(`Medium complexity tasks: ${mediumComplexity}`);\n\t\t\t\tconsole.log(`Low complexity tasks: ${lowComplexity}`);\n\n\t\t\t\tif (existingReport) {\n\t\t\t\t\tconsole.log('\\nUpdated Report Summary:');\n\t\t\t\t\tconsole.log('----------------------------');\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Total analyses in report: ${finalComplexityAnalysis.length}`\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\t`Analyses from previous runs: ${finalComplexityAnalysis.length - totalAnalyzed}`\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(`New/updated analyses: ${totalAnalyzed}`);\n\t\t\t\t}\n\n\t\t\t\tconsole.log(`Research-backed analysis: ${useResearch ? 'Yes' : 'No'}`);\n\t\t\t\tconsole.log(\n\t\t\t\t\t`\\nSee ${outputPath} for the full report and expansion commands.`\n\t\t\t\t);\n\n\t\t\t\tconsole.log(\n\t\t\t\t\tboxen(\n\t\t\t\t\t\tchalk.white.bold('Suggested Next Steps:') +\n\t\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master complexity-report')} to review detailed findings\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master expand --id=<id>')} to break down complex tasks\\n` +\n\t\t\t\t\t\t\t`${chalk.cyan('3.')} Run ${chalk.yellow('task-master expand --all')} to expand all pending tasks based on complexity`,\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t\t}\n\t\t\t\t\t)\n\t\t\t\t);\n\n\t\t\t\tif (getDebugFlag(session)) {\n\t\t\t\t\tconsole.debug(\n\t\t\t\t\t\tchalk.gray(\n\t\t\t\t\t\t\t`Final analysis object: ${JSON.stringify(report, null, 2)}`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tif (aiServiceResponse.telemetryData) {\n\t\t\t\t\tdisplayAiUsageSummary(aiServiceResponse.telemetryData, 'cli');\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn {\n\t\t\t\treport: report,\n\t\t\t\ttelemetryData: aiServiceResponse?.telemetryData,\n\t\t\t\ttagInfo: aiServiceResponse?.tagInfo\n\t\t\t};\n\t\t} catch (aiError) {\n\t\t\tif (loadingIndicator) stopLoadingIndicator(loadingIndicator);\n\t\t\treportLog(`Error during AI service call: ${aiError.message}`, 'error');\n\t\t\tif (outputFormat === 'text') {\n\t\t\t\tconsole.error(\n\t\t\t\t\tchalk.red(`Error during AI service call: ${aiError.message}`)\n\t\t\t\t);\n\t\t\t\tif (aiError.message.includes('API key')) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t'\\nPlease ensure your API keys are correctly configured in .env or ~/.taskmaster/.env'\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\"Run 'task-master models --setup' if needed.\")\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t\tthrow aiError;\n\t\t}\n\t} catch (error) {\n\t\treportLog(`Error analyzing task complexity: ${error.message}`, 'error');\n\t\tif (outputFormat === 'text') {\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(`Error analyzing task complexity: ${error.message}`)\n\t\t\t);\n\t\t\tif (getDebugFlag(session)) {\n\t\t\t\tconsole.error(error);\n\t\t\t}\n\t\t\tprocess.exit(1);\n\t\t} else {\n\t\t\tthrow error;\n\t\t}\n\t}\n}\n\nexport default analyzeTaskComplexity;\n"], ["/claude-task-master/scripts/modules/task-manager/remove-task.js", "import path from 'path';\nimport * as fs from 'fs';\nimport { readJSON, writeJSON, log, findTaskById } from '../utils.js';\nimport generateTaskFiles from './generate-task-files.js';\nimport taskExists from './task-exists.js';\n\n/**\n * Removes one or more tasks or subtasks from the tasks file\n * @param {string} tasksPath - Path to the tasks file\n * @param {string} taskIds - Comma-separated string of task/subtask IDs to remove (e.g., '5,6.1,7')\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @returns {Object} Result object with success status, messages, and removed task info\n */\nasync function removeTask(tasksPath, taskIds, context = {}) {\n\tconst { projectRoot, tag } = context;\n\tconst results = {\n\t\tsuccess: true,\n\t\tmessages: [],\n\t\terrors: [],\n\t\tremovedTasks: []\n\t};\n\tconst taskIdsToRemove = taskIds\n\t\t.split(',')\n\t\t.map((id) => id.trim())\n\t\t.filter(Boolean); // Remove empty strings if any\n\n\tif (taskIdsToRemove.length === 0) {\n\t\tresults.success = false;\n\t\tresults.errors.push('No valid task IDs provided.');\n\t\treturn results;\n\t}\n\n\ttry {\n\t\t// Read the tasks file ONCE before the loop, preserving the full tagged structure\n\t\tconst rawData = readJSON(tasksPath, projectRoot, tag); // Read raw data\n\t\tif (!rawData) {\n\t\t\tthrow new Error(`Could not read tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Use the full tagged data if available, otherwise use the data as is\n\t\tconst fullTaggedData = rawData._rawTaggedData || rawData;\n\n\t\tif (!fullTaggedData[tag] || !fullTaggedData[tag].tasks) {\n\t\t\tthrow new Error(`Tag '${tag}' not found or has no tasks.`);\n\t\t}\n\n\t\tconst tasks = fullTaggedData[tag].tasks; // Work with tasks from the correct tag\n\n\t\tconst tasksToDeleteFiles = []; // Collect IDs of main tasks whose files should be deleted\n\n\t\tfor (const taskId of taskIdsToRemove) {\n\t\t\t// Check if the task ID exists *before* attempting removal\n\t\t\tif (!taskExists(tasks, taskId)) {\n\t\t\t\tconst errorMsg = `Task with ID ${taskId} in tag '${tag}' not found or already removed.`;\n\t\t\t\tresults.errors.push(errorMsg);\n\t\t\t\tresults.success = false; // Mark overall success as false if any error occurs\n\t\t\t\tcontinue; // Skip to the next ID\n\t\t\t}\n\n\t\t\ttry {\n\t\t\t\t// Handle subtask removal (e.g., '5.2')\n\t\t\t\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\t\t\t\tconst [parentTaskId, subtaskId] = taskId\n\t\t\t\t\t\t.split('.')\n\t\t\t\t\t\t.map((id) => parseInt(id, 10));\n\n\t\t\t\t\t// Find the parent task\n\t\t\t\t\tconst parentTask = tasks.find((t) => t.id === parentTaskId);\n\t\t\t\t\tif (!parentTask || !parentTask.subtasks) {\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Parent task ${parentTaskId} or its subtasks not found for subtask ${taskId}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Find the subtask to remove\n\t\t\t\t\tconst subtaskIndex = parentTask.subtasks.findIndex(\n\t\t\t\t\t\t(st) => st.id === subtaskId\n\t\t\t\t\t);\n\t\t\t\t\tif (subtaskIndex === -1) {\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Subtask ${subtaskId} not found in parent task ${parentTaskId}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Store the subtask info before removal\n\t\t\t\t\tconst removedSubtask = {\n\t\t\t\t\t\t...parentTask.subtasks[subtaskIndex],\n\t\t\t\t\t\tparentTaskId: parentTaskId\n\t\t\t\t\t};\n\t\t\t\t\tresults.removedTasks.push(removedSubtask);\n\n\t\t\t\t\t// Remove the subtask from the parent\n\t\t\t\t\tparentTask.subtasks.splice(subtaskIndex, 1);\n\n\t\t\t\t\tresults.messages.push(\n\t\t\t\t\t\t`Successfully removed subtask ${taskId} from tag '${tag}'`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\t// Handle main task removal\n\t\t\t\telse {\n\t\t\t\t\tconst taskIdNum = parseInt(taskId, 10);\n\t\t\t\t\tconst taskIndex = tasks.findIndex((t) => t.id === taskIdNum);\n\t\t\t\t\tif (taskIndex === -1) {\n\t\t\t\t\t\tthrow new Error(`Task with ID ${taskId} not found in tag '${tag}'`);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Store the task info before removal\n\t\t\t\t\tconst removedTask = tasks[taskIndex];\n\t\t\t\t\tresults.removedTasks.push(removedTask);\n\t\t\t\t\ttasksToDeleteFiles.push(taskIdNum); // Add to list for file deletion\n\n\t\t\t\t\t// Remove the task from the main array\n\t\t\t\t\ttasks.splice(taskIndex, 1);\n\n\t\t\t\t\tresults.messages.push(\n\t\t\t\t\t\t`Successfully removed task ${taskId} from tag '${tag}'`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (innerError) {\n\t\t\t\t// Catch errors specific to processing *this* ID\n\t\t\t\tconst errorMsg = `Error processing ID ${taskId}: ${innerError.message}`;\n\t\t\t\tresults.errors.push(errorMsg);\n\t\t\t\tresults.success = false;\n\t\t\t\tlog('warn', errorMsg); // Log as warning and continue with next ID\n\t\t\t}\n\t\t} // End of loop through taskIdsToRemove\n\n\t\t// --- Post-Loop Operations ---\n\n\t\t// Only proceed with cleanup and saving if at least one task was potentially removed\n\t\tif (results.removedTasks.length > 0) {\n\t\t\tconst allRemovedIds = new Set(\n\t\t\t\ttaskIdsToRemove.map((id) =>\n\t\t\t\t\ttypeof id === 'string' && id.includes('.') ? id : parseInt(id, 10)\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// Update the tasks in the current tag of the full data structure\n\t\t\tfullTaggedData[tag].tasks = tasks;\n\n\t\t\t// Remove dependencies from all tags\n\t\t\tfor (const tagName in fullTaggedData) {\n\t\t\t\tif (\n\t\t\t\t\tObject.prototype.hasOwnProperty.call(fullTaggedData, tagName) &&\n\t\t\t\t\tfullTaggedData[tagName] &&\n\t\t\t\t\tfullTaggedData[tagName].tasks\n\t\t\t\t) {\n\t\t\t\t\tconst currentTagTasks = fullTaggedData[tagName].tasks;\n\t\t\t\t\tcurrentTagTasks.forEach((task) => {\n\t\t\t\t\t\tif (task.dependencies) {\n\t\t\t\t\t\t\ttask.dependencies = task.dependencies.filter(\n\t\t\t\t\t\t\t\t(depId) => !allRemovedIds.has(depId)\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif (task.subtasks) {\n\t\t\t\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\t\t\t\tif (subtask.dependencies) {\n\t\t\t\t\t\t\t\t\tsubtask.dependencies = subtask.dependencies.filter(\n\t\t\t\t\t\t\t\t\t\t(depId) =>\n\t\t\t\t\t\t\t\t\t\t\t!allRemovedIds.has(`${task.id}.${depId}`) &&\n\t\t\t\t\t\t\t\t\t\t\t!allRemovedIds.has(depId)\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Save the updated raw data structure\n\t\t\twriteJSON(tasksPath, fullTaggedData, projectRoot, tag);\n\n\t\t\t// Delete task files AFTER saving tasks.json\n\t\t\tfor (const taskIdNum of tasksToDeleteFiles) {\n\t\t\t\tconst taskFileName = path.join(\n\t\t\t\t\tpath.dirname(tasksPath),\n\t\t\t\t\t`task_${taskIdNum.toString().padStart(3, '0')}.txt`\n\t\t\t\t);\n\t\t\t\tif (fs.existsSync(taskFileName)) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tfs.unlinkSync(taskFileName);\n\t\t\t\t\t\tresults.messages.push(`Deleted task file: ${taskFileName}`);\n\t\t\t\t\t} catch (unlinkError) {\n\t\t\t\t\t\tconst unlinkMsg = `Failed to delete task file ${taskFileName}: ${unlinkError.message}`;\n\t\t\t\t\t\tresults.errors.push(unlinkMsg);\n\t\t\t\t\t\tresults.success = false;\n\t\t\t\t\t\tlog('warn', unlinkMsg);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Generate updated task files ONCE, with context\n\t\t\t// try {\n\t\t\t// \tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t\t// \t\tprojectRoot,\n\t\t\t// \t\ttag\n\t\t\t// \t});\n\t\t\t// \tresults.messages.push('Task files regenerated successfully.');\n\t\t\t// } catch (genError) {\n\t\t\t// \tconst genErrMsg = `Failed to regenerate task files: ${genError.message}`;\n\t\t\t// \tresults.errors.push(genErrMsg);\n\t\t\t// \tresults.success = false;\n\t\t\t// \tlog('warn', genErrMsg);\n\t\t\t// }\n\t\t} else if (results.errors.length === 0) {\n\t\t\tresults.messages.push('No tasks found matching the provided IDs.');\n\t\t}\n\n\t\t// Consolidate messages for final output\n\t\tconst finalMessage = results.messages.join('\\n');\n\t\tconst finalError = results.errors.join('\\n');\n\n\t\treturn {\n\t\t\tsuccess: results.success,\n\t\t\tmessage: finalMessage || 'No tasks were removed.',\n\t\t\terror: finalError || null,\n\t\t\tremovedTasks: results.removedTasks\n\t\t};\n\t} catch (error) {\n\t\t// Catch errors from reading file or other initial setup\n\t\tlog('error', `Error removing tasks: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\tmessage: '',\n\t\t\terror: `Operation failed: ${error.message}`,\n\t\t\tremovedTasks: []\n\t\t};\n\t}\n}\n\nexport default removeTask;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/remove-dependency.js", "/**\n * Direct function wrapper for removeDependency\n */\n\nimport { removeDependency } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Remove a dependency from a task\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string|number} args.id - Task ID to remove dependency from\n * @param {string|number} args.dependsOn - Task ID to remove as a dependency\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function removeDependencyDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, id, dependsOn, projectRoot, tag } = args;\n\ttry {\n\t\tlog.info(`Removing dependency with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('removeDependencyDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Validate required parameters\n\t\tif (!id) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Task ID (id) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tif (!dependsOn) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'INPUT_VALIDATION_ERROR',\n\t\t\t\t\tmessage: 'Dependency ID (dependsOn) is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Format IDs for the core function\n\t\tconst taskId =\n\t\t\tid && id.includes && id.includes('.') ? id : parseInt(id, 10);\n\t\tconst dependencyId =\n\t\t\tdependsOn && dependsOn.includes && dependsOn.includes('.')\n\t\t\t\t? dependsOn\n\t\t\t\t: parseInt(dependsOn, 10);\n\n\t\tlog.info(\n\t\t\t`Removing dependency: task ${taskId} no longer depends on ${dependencyId}`\n\t\t);\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\t// Call the core function using the provided tasksPath\n\t\tawait removeDependency(tasksPath, taskId, dependencyId, {\n\t\t\tprojectRoot,\n\t\t\ttag\n\t\t});\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: `Successfully removed dependency: Task ${taskId} no longer depends on ${dependencyId}`,\n\t\t\t\ttaskId: taskId,\n\t\t\t\tdependencyId: dependencyId\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in removeDependencyDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CORE_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/fix-dependencies.js", "/**\n * Direct function wrapper for fixDependenciesCommand\n */\n\nimport { fixDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\n\n/**\n * Fix invalid dependencies in tasks.json automatically\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.projectRoot - Project root directory\n * @param {string} args.tag - Tag for the project\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function fixDependenciesDirect(args, log) {\n\t// Destructure expected args\n\tconst { tasksJsonPath, projectRoot, tag } = args;\n\ttry {\n\t\tlog.info(`Fixing invalid dependencies in tasks: ${tasksJsonPath}`);\n\n\t\t// Check if tasksJsonPath was provided\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('fixDependenciesDirect called without tasksJsonPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Use provided path\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Verify the file exists\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'FILE_NOT_FOUND',\n\t\t\t\t\tmessage: `Tasks file not found at ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tconst options = { projectRoot, tag };\n\t\t// Call the original command function using the provided path and proper context\n\t\tawait fixDependenciesCommand(tasksPath, options);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: 'Dependencies fixed successfully',\n\t\t\t\ttasksPath,\n\t\t\t\ttag: tag || 'master'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error fixing dependencies: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'FIX_DEPENDENCIES_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/validate-dependencies.js", "/**\n * Direct function wrapper for validateDependenciesCommand\n */\n\nimport { validateDependenciesCommand } from '../../../../scripts/modules/dependency-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport fs from 'fs';\n\n/**\n * Validate dependencies in tasks.json\n * @param {Object} args - Function arguments\n * @param {string} args.tasksJsonPath - Explicit path to the tasks.json file.\n * @param {string} args.projectRoot - Project root path (for MCP/env fallback)\n * @param {string} args.tag - Tag for the task (optional)\n * @param {Object} log - Logger object\n * @returns {Promise<{success: boolean, data?: Object, error?: {code: string, message: string}}>}\n */\nexport async function validateDependenciesDirect(args, log) {\n\t// Destructure the explicit tasksJsonPath\n\tconst { tasksJsonPath, projectRoot, tag } = args;\n\n\tif (!tasksJsonPath) {\n\t\tlog.error('validateDependenciesDirect called without tasksJsonPath');\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tlog.info(`Validating dependencies in tasks: ${tasksJsonPath}`);\n\n\t\t// Use the provided tasksJsonPath\n\t\tconst tasksPath = tasksJsonPath;\n\n\t\t// Verify the file exists\n\t\tif (!fs.existsSync(tasksPath)) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'FILE_NOT_FOUND',\n\t\t\t\t\tmessage: `Tasks file not found at ${tasksPath}`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\tenableSilentMode();\n\n\t\tconst options = { projectRoot, tag };\n\t\t// Call the original command function using the provided tasksPath\n\t\tawait validateDependenciesCommand(tasksPath, options);\n\n\t\t// Restore normal logging\n\t\tdisableSilentMode();\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmessage: 'Dependencies validated successfully',\n\t\t\t\ttasksPath\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\t// Make sure to restore normal logging even if there's an error\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error validating dependencies: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'VALIDATION_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/models.js", "/**\n * models.js\n * Direct function for managing AI model configurations via MCP\n */\n\nimport {\n\tgetModelConfiguration,\n\tgetAvailableModelsList,\n\tsetModel\n} from '../../../../scripts/modules/task-manager/models.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\nimport { CUSTOM_PROVIDERS_ARRAY } from '../../../../src/constants/providers.js';\n\n// Define supported roles for model setting\nconst MODEL_ROLES = ['main', 'research', 'fallback'];\n\n/**\n * Determine provider hint from custom provider flags\n * @param {Object} args - Arguments containing provider flags\n * @returns {string|undefined} Provider hint or undefined if no custom provider flag is set\n */\nfunction getProviderHint(args) {\n\treturn CUSTOM_PROVIDERS_ARRAY.find((provider) => args[provider]);\n}\n\n/**\n * Handle setting models for different roles\n * @param {Object} args - Arguments containing role-specific model IDs\n * @param {Object} context - Context object with session, mcpLog, projectRoot\n * @returns {Object|null} Result if a model was set, null if no model setting was requested\n */\nasync function handleModelSetting(args, context) {\n\tfor (const role of MODEL_ROLES) {\n\t\tconst roleKey = `set${role.charAt(0).toUpperCase() + role.slice(1)}`; // setMain, setResearch, setFallback\n\n\t\tif (args[roleKey]) {\n\t\t\tconst providerHint = getProviderHint(args);\n\n\t\t\treturn await setModel(role, args[roleKey], {\n\t\t\t\t...context,\n\t\t\t\tproviderHint\n\t\t\t});\n\t\t}\n\t}\n\treturn null; // No model setting was requested\n}\n\n/**\n * Get or update model configuration\n * @param {Object} args - Arguments passed by the MCP tool\n * @param {Object} log - MCP logger\n * @param {Object} context - MCP context (contains session)\n * @returns {Object} Result object with success, data/error fields\n */\nexport async function modelsDirect(args, log, context = {}) {\n\tconst { session } = context;\n\tconst { projectRoot } = args; // Extract projectRoot from args\n\n\t// Create a logger wrapper that the core functions can use\n\tconst mcpLog = createLogWrapper(log);\n\n\tlog.info(`Executing models_direct with args: ${JSON.stringify(args)}`);\n\tlog.info(`Using project root: ${projectRoot}`);\n\n\t// Validate flags: only one custom provider flag can be used simultaneously\n\tconst customProviderFlags = CUSTOM_PROVIDERS_ARRAY.filter(\n\t\t(provider) => args[provider]\n\t);\n\n\tif (customProviderFlags.length > 1) {\n\t\tlog.error(\n\t\t\t'Error: Cannot use multiple custom provider flags simultaneously.'\n\t\t);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_ARGS',\n\t\t\t\tmessage:\n\t\t\t\t\t'Cannot use multiple custom provider flags simultaneously. Choose only one: openrouter, ollama, bedrock, azure, or vertex.'\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tenableSilentMode();\n\n\t\ttry {\n\t\t\t// Check for the listAvailableModels flag\n\t\t\tif (args.listAvailableModels === true) {\n\t\t\t\treturn await getAvailableModelsList({\n\t\t\t\t\tsession,\n\t\t\t\t\tmcpLog,\n\t\t\t\t\tprojectRoot\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// Handle setting any model role using unified function\n\t\t\tconst modelContext = { session, mcpLog, projectRoot };\n\t\t\tconst modelSetResult = await handleModelSetting(args, modelContext);\n\t\t\tif (modelSetResult) {\n\t\t\t\treturn modelSetResult;\n\t\t\t}\n\n\t\t\t// Default action: get current configuration\n\t\t\treturn await getModelConfiguration({\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t});\n\t\t} finally {\n\t\t\tdisableSilentMode();\n\t\t}\n\t} catch (error) {\n\t\tlog.error(`Error in models_direct: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message,\n\t\t\t\tdetails: error.stack\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/move-task.js", "import path from 'path';\nimport { log, readJSON, writeJSON, setTasksForTag } from '../utils.js';\nimport { isTaskDependentOn } from '../task-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\n\n/**\n * Move one or more tasks/subtasks to new positions\n * @param {string} tasksPath - Path to tasks.json file\n * @param {string} sourceId - ID(s) of the task/subtask to move (e.g., '5' or '5.2' or '5,6,7')\n * @param {string} destinationId - ID(s) of the destination (e.g., '7' or '7.3' or '7,8,9')\n * @param {boolean} generateFiles - Whether to regenerate task files after moving\n * @param {Object} options - Additional options\n * @param {string} options.projectRoot - Project root directory for tag resolution\n * @param {string} options.tag - Explicit tag to use (optional)\n * @returns {Object} Result object with moved task details\n */\nasync function moveTask(\n\ttasksPath,\n\tsourceId,\n\tdestinationId,\n\tgenerateFiles = false,\n\toptions = {}\n) {\n\tconst { projectRoot, tag } = options;\n\t// Check if we have comma-separated IDs (batch move)\n\tconst sourceIds = sourceId.split(',').map((id) => id.trim());\n\tconst destinationIds = destinationId.split(',').map((id) => id.trim());\n\n\tif (sourceIds.length !== destinationIds.length) {\n\t\tthrow new Error(\n\t\t\t`Number of source IDs (${sourceIds.length}) must match number of destination IDs (${destinationIds.length})`\n\t\t);\n\t}\n\n\t// For batch moves, process each pair sequentially\n\tif (sourceIds.length > 1) {\n\t\tconst results = [];\n\t\tfor (let i = 0; i < sourceIds.length; i++) {\n\t\t\tconst result = await moveTask(\n\t\t\t\ttasksPath,\n\t\t\t\tsourceIds[i],\n\t\t\t\tdestinationIds[i],\n\t\t\t\tfalse, // Don't generate files for each individual move\n\t\t\t\toptions\n\t\t\t);\n\t\t\tresults.push(result);\n\t\t}\n\n\t\t// Generate files once at the end if requested\n\t\tif (generateFiles) {\n\t\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t\t\ttag: tag,\n\t\t\t\tprojectRoot: projectRoot\n\t\t\t});\n\t\t}\n\n\t\treturn {\n\t\t\tmessage: `Successfully moved ${sourceIds.length} tasks/subtasks`,\n\t\t\tmoves: results\n\t\t};\n\t}\n\n\t// Single move logic\n\t// Read the raw data without tag resolution to preserve tagged structure\n\tlet rawData = readJSON(tasksPath, projectRoot, tag);\n\n\t// Handle the case where readJSON returns resolved data with _rawTaggedData\n\tif (rawData && rawData._rawTaggedData) {\n\t\t// Use the raw tagged data and discard the resolved view\n\t\trawData = rawData._rawTaggedData;\n\t}\n\n\t// Ensure the tag exists in the raw data\n\tif (!rawData || !rawData[tag] || !Array.isArray(rawData[tag].tasks)) {\n\t\tthrow new Error(\n\t\t\t`Invalid tasks file or tag \"${tag}\" not found at ${tasksPath}`\n\t\t);\n\t}\n\n\t// Get the tasks for the current tag\n\tconst tasks = rawData[tag].tasks;\n\n\tlog(\n\t\t'info',\n\t\t`Moving task/subtask ${sourceId} to ${destinationId} (tag: ${tag})`\n\t);\n\n\t// Parse source and destination IDs\n\tconst isSourceSubtask = sourceId.includes('.');\n\tconst isDestSubtask = destinationId.includes('.');\n\n\tlet result;\n\n\tif (isSourceSubtask && isDestSubtask) {\n\t\t// Subtask to subtask\n\t\tresult = moveSubtaskToSubtask(tasks, sourceId, destinationId);\n\t} else if (isSourceSubtask && !isDestSubtask) {\n\t\t// Subtask to task\n\t\tresult = moveSubtaskToTask(tasks, sourceId, destinationId);\n\t} else if (!isSourceSubtask && isDestSubtask) {\n\t\t// Task to subtask\n\t\tresult = moveTaskToSubtask(tasks, sourceId, destinationId);\n\t} else {\n\t\t// Task to task\n\t\tresult = moveTaskToTask(tasks, sourceId, destinationId);\n\t}\n\n\t// Update the data structure with the modified tasks\n\trawData[tag].tasks = tasks;\n\n\t// Always write the data object, never the _rawTaggedData directly\n\t// The writeJSON function will filter out _rawTaggedData automatically\n\twriteJSON(tasksPath, rawData, options.projectRoot, tag);\n\n\tif (generateFiles) {\n\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), {\n\t\t\ttag: tag,\n\t\t\tprojectRoot: projectRoot\n\t\t});\n\t}\n\n\treturn result;\n}\n\n// Helper functions for different move scenarios\nfunction moveSubtaskToSubtask(tasks, sourceId, destinationId) {\n\t// Parse IDs\n\tconst [sourceParentId, sourceSubtaskId] = sourceId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\tconst [destParentId, destSubtaskId] = destinationId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\n\t// Find source and destination parent tasks\n\tconst sourceParentTask = tasks.find((t) => t.id === sourceParentId);\n\tconst destParentTask = tasks.find((t) => t.id === destParentId);\n\n\tif (!sourceParentTask) {\n\t\tthrow new Error(`Source parent task with ID ${sourceParentId} not found`);\n\t}\n\tif (!destParentTask) {\n\t\tthrow new Error(\n\t\t\t`Destination parent task with ID ${destParentId} not found`\n\t\t);\n\t}\n\n\t// Initialize subtasks arrays if they don't exist (based on commit fixes)\n\tif (!sourceParentTask.subtasks) {\n\t\tsourceParentTask.subtasks = [];\n\t}\n\tif (!destParentTask.subtasks) {\n\t\tdestParentTask.subtasks = [];\n\t}\n\n\t// Find source subtask\n\tconst sourceSubtaskIndex = sourceParentTask.subtasks.findIndex(\n\t\t(st) => st.id === sourceSubtaskId\n\t);\n\tif (sourceSubtaskIndex === -1) {\n\t\tthrow new Error(`Source subtask ${sourceId} not found`);\n\t}\n\n\tconst sourceSubtask = sourceParentTask.subtasks[sourceSubtaskIndex];\n\n\tif (sourceParentId === destParentId) {\n\t\t// Moving within the same parent\n\t\tif (destParentTask.subtasks.length > 0) {\n\t\t\tconst destSubtaskIndex = destParentTask.subtasks.findIndex(\n\t\t\t\t(st) => st.id === destSubtaskId\n\t\t\t);\n\t\t\tif (destSubtaskIndex !== -1) {\n\t\t\t\t// Remove from old position\n\t\t\t\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\t\t\t\t// Insert at new position (adjust index if moving within same array)\n\t\t\t\tconst adjustedIndex =\n\t\t\t\t\tsourceSubtaskIndex < destSubtaskIndex\n\t\t\t\t\t\t? destSubtaskIndex - 1\n\t\t\t\t\t\t: destSubtaskIndex;\n\t\t\t\tdestParentTask.subtasks.splice(adjustedIndex + 1, 0, sourceSubtask);\n\t\t\t} else {\n\t\t\t\t// Destination subtask doesn't exist, insert at end\n\t\t\t\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\t\t\t\tdestParentTask.subtasks.push(sourceSubtask);\n\t\t\t}\n\t\t} else {\n\t\t\t// No existing subtasks, this will be the first one\n\t\t\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\t\t\tdestParentTask.subtasks.push(sourceSubtask);\n\t\t}\n\t} else {\n\t\t// Moving between different parents\n\t\tmoveSubtaskToAnotherParent(\n\t\t\tsourceSubtask,\n\t\t\tsourceParentTask,\n\t\t\tsourceSubtaskIndex,\n\t\t\tdestParentTask,\n\t\t\tdestSubtaskId\n\t\t);\n\t}\n\n\treturn {\n\t\tmessage: `Moved subtask ${sourceId} to ${destinationId}`,\n\t\tmovedItem: sourceSubtask\n\t};\n}\n\nfunction moveSubtaskToTask(tasks, sourceId, destinationId) {\n\t// Parse source ID\n\tconst [sourceParentId, sourceSubtaskId] = sourceId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\tconst destTaskId = parseInt(destinationId, 10);\n\n\t// Find source parent and destination task\n\tconst sourceParentTask = tasks.find((t) => t.id === sourceParentId);\n\n\tif (!sourceParentTask) {\n\t\tthrow new Error(`Source parent task with ID ${sourceParentId} not found`);\n\t}\n\tif (!sourceParentTask.subtasks) {\n\t\tthrow new Error(`Source parent task ${sourceParentId} has no subtasks`);\n\t}\n\n\t// Find source subtask\n\tconst sourceSubtaskIndex = sourceParentTask.subtasks.findIndex(\n\t\t(st) => st.id === sourceSubtaskId\n\t);\n\tif (sourceSubtaskIndex === -1) {\n\t\tthrow new Error(`Source subtask ${sourceId} not found`);\n\t}\n\n\tconst sourceSubtask = sourceParentTask.subtasks[sourceSubtaskIndex];\n\n\t// Check if destination task exists\n\tconst existingDestTask = tasks.find((t) => t.id === destTaskId);\n\tif (existingDestTask) {\n\t\tthrow new Error(\n\t\t\t`Cannot move to existing task ID ${destTaskId}. Choose a different ID or use subtask destination.`\n\t\t);\n\t}\n\n\t// Create new task from subtask\n\tconst newTask = {\n\t\tid: destTaskId,\n\t\ttitle: sourceSubtask.title,\n\t\tdescription: sourceSubtask.description,\n\t\tstatus: sourceSubtask.status || 'pending',\n\t\tdependencies: sourceSubtask.dependencies || [],\n\t\tpriority: sourceSubtask.priority || 'medium',\n\t\tdetails: sourceSubtask.details || '',\n\t\ttestStrategy: sourceSubtask.testStrategy || '',\n\t\tsubtasks: []\n\t};\n\n\t// Remove subtask from source parent\n\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\n\t// Insert new task in correct position\n\tconst insertIndex = tasks.findIndex((t) => t.id > destTaskId);\n\tif (insertIndex === -1) {\n\t\ttasks.push(newTask);\n\t} else {\n\t\ttasks.splice(insertIndex, 0, newTask);\n\t}\n\n\treturn {\n\t\tmessage: `Converted subtask ${sourceId} to task ${destinationId}`,\n\t\tmovedItem: newTask\n\t};\n}\n\nfunction moveTaskToSubtask(tasks, sourceId, destinationId) {\n\t// Parse IDs\n\tconst sourceTaskId = parseInt(sourceId, 10);\n\tconst [destParentId, destSubtaskId] = destinationId\n\t\t.split('.')\n\t\t.map((id) => parseInt(id, 10));\n\n\t// Find source task and destination parent\n\tconst sourceTaskIndex = tasks.findIndex((t) => t.id === sourceTaskId);\n\tconst destParentTask = tasks.find((t) => t.id === destParentId);\n\n\tif (sourceTaskIndex === -1) {\n\t\tthrow new Error(`Source task with ID ${sourceTaskId} not found`);\n\t}\n\tif (!destParentTask) {\n\t\tthrow new Error(\n\t\t\t`Destination parent task with ID ${destParentId} not found`\n\t\t);\n\t}\n\n\tconst sourceTask = tasks[sourceTaskIndex];\n\n\t// Initialize subtasks array if it doesn't exist (based on commit fixes)\n\tif (!destParentTask.subtasks) {\n\t\tdestParentTask.subtasks = [];\n\t}\n\n\t// Create new subtask from task\n\tconst newSubtask = {\n\t\tid: destSubtaskId,\n\t\ttitle: sourceTask.title,\n\t\tdescription: sourceTask.description,\n\t\tstatus: sourceTask.status || 'pending',\n\t\tdependencies: sourceTask.dependencies || [],\n\t\tdetails: sourceTask.details || '',\n\t\ttestStrategy: sourceTask.testStrategy || ''\n\t};\n\n\t// Find insertion position (based on commit fixes)\n\tlet destSubtaskIndex = -1;\n\tif (destParentTask.subtasks.length > 0) {\n\t\tdestSubtaskIndex = destParentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === destSubtaskId\n\t\t);\n\t\tif (destSubtaskIndex === -1) {\n\t\t\t// Subtask doesn't exist, we'll insert at the end\n\t\t\tdestSubtaskIndex = destParentTask.subtasks.length - 1;\n\t\t}\n\t}\n\n\t// Insert at specific position (based on commit fixes)\n\tconst insertPosition = destSubtaskIndex === -1 ? 0 : destSubtaskIndex + 1;\n\tdestParentTask.subtasks.splice(insertPosition, 0, newSubtask);\n\n\t// Remove the original task from the tasks array\n\ttasks.splice(sourceTaskIndex, 1);\n\n\treturn {\n\t\tmessage: `Converted task ${sourceId} to subtask ${destinationId}`,\n\t\tmovedItem: newSubtask\n\t};\n}\n\nfunction moveTaskToTask(tasks, sourceId, destinationId) {\n\tconst sourceTaskId = parseInt(sourceId, 10);\n\tconst destTaskId = parseInt(destinationId, 10);\n\n\t// Find source task\n\tconst sourceTaskIndex = tasks.findIndex((t) => t.id === sourceTaskId);\n\tif (sourceTaskIndex === -1) {\n\t\tthrow new Error(`Source task with ID ${sourceTaskId} not found`);\n\t}\n\n\tconst sourceTask = tasks[sourceTaskIndex];\n\n\t// Check if destination exists\n\tconst destTaskIndex = tasks.findIndex((t) => t.id === destTaskId);\n\n\tif (destTaskIndex !== -1) {\n\t\t// Destination exists - this could be overwriting or swapping\n\t\tconst destTask = tasks[destTaskIndex];\n\n\t\t// For now, throw an error to avoid accidental overwrites\n\t\tthrow new Error(\n\t\t\t`Task with ID ${destTaskId} already exists. Use a different destination ID.`\n\t\t);\n\t} else {\n\t\t// Destination doesn't exist - create new task ID\n\t\treturn moveTaskToNewId(tasks, sourceTaskIndex, sourceTask, destTaskId);\n\t}\n}\n\nfunction moveSubtaskToAnotherParent(\n\tsourceSubtask,\n\tsourceParentTask,\n\tsourceSubtaskIndex,\n\tdestParentTask,\n\tdestSubtaskId\n) {\n\tconst destSubtaskId_num = parseInt(destSubtaskId, 10);\n\n\t// Create new subtask with destination ID\n\tconst newSubtask = {\n\t\t...sourceSubtask,\n\t\tid: destSubtaskId_num\n\t};\n\n\t// Initialize subtasks array if it doesn't exist (based on commit fixes)\n\tif (!destParentTask.subtasks) {\n\t\tdestParentTask.subtasks = [];\n\t}\n\n\t// Find insertion position\n\tlet destSubtaskIndex = -1;\n\tif (destParentTask.subtasks.length > 0) {\n\t\tdestSubtaskIndex = destParentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === destSubtaskId_num\n\t\t);\n\t\tif (destSubtaskIndex === -1) {\n\t\t\t// Subtask doesn't exist, we'll insert at the end\n\t\t\tdestSubtaskIndex = destParentTask.subtasks.length - 1;\n\t\t}\n\t}\n\n\t// Insert at the destination position (based on commit fixes)\n\tconst insertPosition = destSubtaskIndex === -1 ? 0 : destSubtaskIndex + 1;\n\tdestParentTask.subtasks.splice(insertPosition, 0, newSubtask);\n\n\t// Remove the subtask from the original parent\n\tsourceParentTask.subtasks.splice(sourceSubtaskIndex, 1);\n\n\treturn newSubtask;\n}\n\nfunction moveTaskToNewId(tasks, sourceTaskIndex, sourceTask, destTaskId) {\n\tconst destTaskIndex = tasks.findIndex((t) => t.id === destTaskId);\n\n\t// Create moved task with new ID\n\tconst movedTask = {\n\t\t...sourceTask,\n\t\tid: destTaskId\n\t};\n\n\t// Update any dependencies that reference the old task ID\n\ttasks.forEach((task) => {\n\t\tif (task.dependencies && task.dependencies.includes(sourceTask.id)) {\n\t\t\tconst depIndex = task.dependencies.indexOf(sourceTask.id);\n\t\t\ttask.dependencies[depIndex] = destTaskId;\n\t\t}\n\t\tif (task.subtasks) {\n\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\tif (\n\t\t\t\t\tsubtask.dependencies &&\n\t\t\t\t\tsubtask.dependencies.includes(sourceTask.id)\n\t\t\t\t) {\n\t\t\t\t\tconst depIndex = subtask.dependencies.indexOf(sourceTask.id);\n\t\t\t\t\tsubtask.dependencies[depIndex] = destTaskId;\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\t// Update dependencies within movedTask's subtasks that reference sibling subtasks\n\tif (Array.isArray(movedTask.subtasks)) {\n\t\tmovedTask.subtasks.forEach((subtask) => {\n\t\t\tif (Array.isArray(subtask.dependencies)) {\n\t\t\t\tsubtask.dependencies = subtask.dependencies.map((dep) => {\n\t\t\t\t\t// If dependency is a string like \"oldParent.subId\", update to \"newParent.subId\"\n\t\t\t\t\tif (typeof dep === 'string' && dep.includes('.')) {\n\t\t\t\t\t\tconst [depParent, depSub] = dep.split('.');\n\t\t\t\t\t\tif (parseInt(depParent, 10) === sourceTask.id) {\n\t\t\t\t\t\t\treturn `${destTaskId}.${depSub}`;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// If dependency is a number, and matches a subtask ID in the moved task, leave as is (context is implied)\n\t\t\t\t\treturn dep;\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\t}\n\n\t// Strategy based on commit fixes: remove source first, then replace destination\n\t// This avoids index shifting problems\n\n\t// Remove the source task first\n\ttasks.splice(sourceTaskIndex, 1);\n\n\t// Adjust the destination index if the source was before the destination\n\t// Since we removed the source, indices after it shift down by 1\n\tconst adjustedDestIndex =\n\t\tsourceTaskIndex < destTaskIndex ? destTaskIndex - 1 : destTaskIndex;\n\n\t// Replace the placeholder destination task with the moved task (based on commit fixes)\n\tif (adjustedDestIndex >= 0 && adjustedDestIndex < tasks.length) {\n\t\ttasks[adjustedDestIndex] = movedTask;\n\t} else {\n\t\t// Insert at the end if index is out of bounds\n\t\ttasks.push(movedTask);\n\t}\n\n\tlog('info', `Moved task ${sourceTask.id} to new ID ${destTaskId}`);\n\n\treturn {\n\t\tmessage: `Moved task ${sourceTask.id} to new ID ${destTaskId}`,\n\t\tmovedItem: movedTask\n\t};\n}\n\nexport default moveTask;\n"], ["/claude-task-master/scripts/modules/task-manager/clear-subtasks.js", "import path from 'path';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport Table from 'cli-table3';\n\nimport { log, readJSON, writeJSON, truncate, isSilentMode } from '../utils.js';\nimport { displayBanner } from '../ui.js';\n\n/**\n * Clear subtasks from specified tasks\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} taskIds - Task IDs to clear subtasks from\n * @param {Object} context - Context object containing projectRoot and tag\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n */\nfunction clearSubtasks(tasksPath, taskIds, context = {}) {\n\tconst { projectRoot, tag } = context;\n\tlog('info', `Reading tasks from ${tasksPath}...`);\n\tconst data = readJSON(tasksPath, projectRoot, tag);\n\tif (!data || !data.tasks) {\n\t\tlog('error', 'No valid tasks found.');\n\t\tprocess.exit(1);\n\t}\n\n\tif (!isSilentMode()) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.white.bold('Clearing Subtasks'), {\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t})\n\t\t);\n\t}\n\n\t// Handle multiple task IDs (comma-separated)\n\tconst taskIdArray = taskIds.split(',').map((id) => id.trim());\n\tlet clearedCount = 0;\n\n\t// Create a summary table for the cleared subtasks\n\tconst summaryTable = new Table({\n\t\thead: [\n\t\t\tchalk.cyan.bold('Task ID'),\n\t\t\tchalk.cyan.bold('Task Title'),\n\t\t\tchalk.cyan.bold('Subtasks Cleared')\n\t\t],\n\t\tcolWidths: [10, 50, 20],\n\t\tstyle: { head: [], border: [] }\n\t});\n\n\ttaskIdArray.forEach((taskId) => {\n\t\tconst id = parseInt(taskId, 10);\n\t\tif (Number.isNaN(id)) {\n\t\t\tlog('error', `Invalid task ID: ${taskId}`);\n\t\t\treturn;\n\t\t}\n\n\t\tconst task = data.tasks.find((t) => t.id === id);\n\t\tif (!task) {\n\t\t\tlog('error', `Task ${id} not found`);\n\t\t\treturn;\n\t\t}\n\n\t\tif (!task.subtasks || task.subtasks.length === 0) {\n\t\t\tlog('info', `Task ${id} has no subtasks to clear`);\n\t\t\tsummaryTable.push([\n\t\t\t\tid.toString(),\n\t\t\t\ttruncate(task.title, 47),\n\t\t\t\tchalk.yellow('No subtasks')\n\t\t\t]);\n\t\t\treturn;\n\t\t}\n\n\t\tconst subtaskCount = task.subtasks.length;\n\t\ttask.subtasks = [];\n\t\tclearedCount++;\n\t\tlog('info', `Cleared ${subtaskCount} subtasks from task ${id}`);\n\n\t\tsummaryTable.push([\n\t\t\tid.toString(),\n\t\t\ttruncate(task.title, 47),\n\t\t\tchalk.green(`${subtaskCount} subtasks cleared`)\n\t\t]);\n\t});\n\n\tif (clearedCount > 0) {\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Show summary table\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.white.bold('Subtask Clearing Summary:'), {\n\t\t\t\t\tpadding: { left: 2, right: 2, top: 0, bottom: 0 },\n\t\t\t\t\tmargin: { top: 1, bottom: 0 },\n\t\t\t\t\tborderColor: 'blue',\n\t\t\t\t\tborderStyle: 'round'\n\t\t\t\t})\n\t\t\t);\n\t\t\tconsole.log(summaryTable.toString());\n\t\t}\n\n\t\t// Success message\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.green(\n\t\t\t\t\t\t`Successfully cleared subtasks from ${chalk.bold(clearedCount)} task(s)`\n\t\t\t\t\t),\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'green',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\n\t\t\t// Next steps suggestion\n\t\t\tconsole.log(\n\t\t\t\tboxen(\n\t\t\t\t\tchalk.white.bold('Next Steps:') +\n\t\t\t\t\t\t'\\n\\n' +\n\t\t\t\t\t\t`${chalk.cyan('1.')} Run ${chalk.yellow('task-master expand --id=<id>')} to generate new subtasks\\n` +\n\t\t\t\t\t\t`${chalk.cyan('2.')} Run ${chalk.yellow('task-master list --with-subtasks')} to verify changes`,\n\t\t\t\t\t{\n\t\t\t\t\t\tpadding: 1,\n\t\t\t\t\t\tborderColor: 'cyan',\n\t\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t);\n\t\t}\n\t} else {\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log(\n\t\t\t\tboxen(chalk.yellow('No subtasks were cleared'), {\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tmargin: { top: 1 }\n\t\t\t\t})\n\t\t\t);\n\t\t}\n\t}\n}\n\nexport default clearSubtasks;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/initialize-project.js", "import { initializeProject } from '../../../../scripts/init.js'; // Import core function and its logger if needed separately\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n\t// isSilentMode // Not used directly here\n} from '../../../../scripts/modules/utils.js';\nimport os from 'os'; // Import os module for home directory check\nimport { RULE_PROFILES } from '../../../../src/constants/profiles.js';\nimport { convertAllRulesToProfileRules } from '../../../../src/utils/rule-transformer.js';\n\n/**\n * Direct function wrapper for initializing a project.\n * Derives target directory from session, sets CWD, and calls core init logic.\n * @param {object} args - Arguments containing initialization options (addAliases, initGit, storeTasksInGit, skipInstall, yes, projectRoot, rules)\n * @param {object} log - The FastMCP logger instance.\n * @param {object} context - The context object, must contain { session }.\n * @returns {Promise<{success: boolean, data?: any, error?: {code: string, message: string}}>} - Standard result object.\n */\nexport async function initializeProjectDirect(args, log, context = {}) {\n\tconst { session } = context; // Keep session if core logic needs it\n\tconst homeDir = os.homedir();\n\n\tlog.info(`Args received in direct function: ${JSON.stringify(args)}`);\n\n\t// --- Determine Target Directory ---\n\t// TRUST the projectRoot passed from the tool layer via args\n\t// The HOF in the tool layer already normalized and validated it came from a reliable source (args or session)\n\tconst targetDirectory = args.projectRoot;\n\n\t// --- Validate the targetDirectory (basic sanity checks) ---\n\tif (\n\t\t!targetDirectory ||\n\t\ttypeof targetDirectory !== 'string' || // Ensure it's a string\n\t\ttargetDirectory === '/' ||\n\t\ttargetDirectory === homeDir\n\t) {\n\t\tlog.error(\n\t\t\t`Invalid target directory received from tool layer: '${targetDirectory}'`\n\t\t);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_TARGET_DIRECTORY',\n\t\t\t\tmessage: `Cannot initialize project: Invalid target directory '${targetDirectory}' received. Please ensure a valid workspace/folder is open or specified.`,\n\t\t\t\tdetails: `Received args.projectRoot: ${args.projectRoot}` // Show what was received\n\t\t\t}\n\t\t};\n\t}\n\n\t// --- Proceed with validated targetDirectory ---\n\tlog.info(`Validated target directory for initialization: ${targetDirectory}`);\n\n\tconst originalCwd = process.cwd();\n\tlet resultData;\n\tlet success = false;\n\tlet errorResult = null;\n\n\tlog.info(\n\t\t`Temporarily changing CWD to ${targetDirectory} for initialization.`\n\t);\n\tprocess.chdir(targetDirectory); // Change CWD to the HOF-provided root\n\n\tenableSilentMode();\n\ttry {\n\t\t// Construct options ONLY from the relevant flags in args\n\t\t// The core initializeProject operates in the current CWD, which we just set\n\t\tconst options = {\n\t\t\taddAliases: args.addAliases,\n\t\t\tinitGit: args.initGit,\n\t\t\tstoreTasksInGit: args.storeTasksInGit,\n\t\t\tskipInstall: args.skipInstall,\n\t\t\tyes: true // Force yes mode\n\t\t};\n\n\t\t// Handle rules option with MCP-specific defaults\n\t\tif (Array.isArray(args.rules) && args.rules.length > 0) {\n\t\t\toptions.rules = args.rules;\n\t\t\toptions.rulesExplicitlyProvided = true;\n\t\t\tlog.info(`Including rules: ${args.rules.join(', ')}`);\n\t\t} else {\n\t\t\t// For MCP initialization, default to Cursor profile only\n\t\t\toptions.rules = ['cursor'];\n\t\t\toptions.rulesExplicitlyProvided = true;\n\t\t\tlog.info(`No rule profiles specified, defaulting to: Cursor`);\n\t\t}\n\n\t\tlog.info(`Initializing project with options: ${JSON.stringify(options)}`);\n\t\tconst result = await initializeProject(options); // Call core logic\n\n\t\tresultData = {\n\t\t\tmessage: 'Project initialized successfully.',\n\t\t\tnext_step:\n\t\t\t\t'Now that the project is initialized, the next step is to create the tasks by parsing a PRD. This will create the tasks folder and the initial task files (tasks folder will be created when parse-prd is run). The parse-prd tool will require a prd.txt file as input (typically found in .taskmaster/docs/ directory). You can create a prd.txt file by asking the user about their idea, and then using the .taskmaster/templates/example_prd.txt file as a template to generate a prd.txt file in .taskmaster/docs/. You may skip all of this if the user already has a prd.txt file. You can THEN use the parse-prd tool to create the tasks. So: step 1 after initialization is to create a prd.txt file in .taskmaster/docs/prd.txt or confirm the user already has one. Step 2 is to use the parse-prd tool to create the tasks. Do not bother looking for tasks after initialization, just use the parse-prd tool to create the tasks after creating a prd.txt from which to parse the tasks. You do NOT need to reinitialize the project to parse-prd.',\n\t\t\t...result\n\t\t};\n\t\tsuccess = true;\n\t\tlog.info(\n\t\t\t`Project initialization completed successfully in ${targetDirectory}.`\n\t\t);\n\t} catch (error) {\n\t\tlog.error(`Core initializeProject failed: ${error.message}`);\n\t\terrorResult = {\n\t\t\tcode: 'INITIALIZATION_FAILED',\n\t\t\tmessage: `Core project initialization failed: ${error.message}`,\n\t\t\tdetails: error.stack\n\t\t};\n\t\tsuccess = false;\n\t} finally {\n\t\tdisableSilentMode();\n\t\tlog.info(`Restoring original CWD: ${originalCwd}`);\n\t\tprocess.chdir(originalCwd);\n\t}\n\n\tif (success) {\n\t\treturn { success: true, data: resultData };\n\t} else {\n\t\treturn { success: false, error: errorResult };\n\t}\n}\n"], ["/claude-task-master/scripts/modules/utils/fuzzyTaskSearch.js", "/**\n * fuzzyTaskSearch.js\n * Reusable fuzzy search utility for finding relevant tasks based on semantic similarity\n */\n\nimport Fuse from 'fuse.js';\n\n/**\n * Configuration for different search contexts\n */\nconst SEARCH_CONFIGS = {\n\tresearch: {\n\t\tthreshold: 0.5, // More lenient for research (broader context)\n\t\tlimit: 20,\n\t\tkeys: [\n\t\t\t{ name: 'title', weight: 2.0 },\n\t\t\t{ name: 'description', weight: 1.0 },\n\t\t\t{ name: 'details', weight: 0.5 },\n\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t]\n\t},\n\taddTask: {\n\t\tthreshold: 0.4, // Stricter for add-task (more precise context)\n\t\tlimit: 15,\n\t\tkeys: [\n\t\t\t{ name: 'title', weight: 2.0 },\n\t\t\t{ name: 'description', weight: 1.5 },\n\t\t\t{ name: 'details', weight: 0.8 },\n\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t]\n\t},\n\tdefault: {\n\t\tthreshold: 0.4,\n\t\tlimit: 15,\n\t\tkeys: [\n\t\t\t{ name: 'title', weight: 2.0 },\n\t\t\t{ name: 'description', weight: 1.5 },\n\t\t\t{ name: 'details', weight: 1.0 },\n\t\t\t{ name: 'dependencyTitles', weight: 0.5 }\n\t\t]\n\t}\n};\n\n/**\n * Purpose categories for pattern-based task matching\n */\nconst PURPOSE_CATEGORIES = [\n\t{ pattern: /(command|cli|flag)/i, label: 'CLI commands' },\n\t{ pattern: /(task|subtask|add)/i, label: 'Task management' },\n\t{ pattern: /(dependency|depend)/i, label: 'Dependency handling' },\n\t{ pattern: /(AI|model|prompt|research)/i, label: 'AI integration' },\n\t{ pattern: /(UI|display|show|interface)/i, label: 'User interface' },\n\t{ pattern: /(schedule|time|cron)/i, label: 'Scheduling' },\n\t{ pattern: /(config|setting|option)/i, label: 'Configuration' },\n\t{ pattern: /(test|testing|spec)/i, label: 'Testing' },\n\t{ pattern: /(auth|login|user)/i, label: 'Authentication' },\n\t{ pattern: /(database|db|data)/i, label: 'Data management' },\n\t{ pattern: /(api|endpoint|route)/i, label: 'API development' },\n\t{ pattern: /(deploy|build|release)/i, label: 'Deployment' },\n\t{ pattern: /(security|auth|login|user)/i, label: 'Security' },\n\t{ pattern: /.*/, label: 'Other' }\n];\n\n/**\n * Relevance score thresholds\n */\nconst RELEVANCE_THRESHOLDS = {\n\thigh: 0.25,\n\tmedium: 0.4,\n\tlow: 0.6\n};\n\n/**\n * Fuzzy search utility class for finding relevant tasks\n */\nexport class FuzzyTaskSearch {\n\tconstructor(tasks, searchType = 'default') {\n\t\tthis.tasks = tasks;\n\t\tthis.config = SEARCH_CONFIGS[searchType] || SEARCH_CONFIGS.default;\n\t\tthis.searchableTasks = this._prepareSearchableTasks(tasks);\n\t\tthis.fuse = new Fuse(this.searchableTasks, {\n\t\t\tincludeScore: true,\n\t\t\tthreshold: this.config.threshold,\n\t\t\tkeys: this.config.keys,\n\t\t\tshouldSort: true,\n\t\t\tuseExtendedSearch: true,\n\t\t\tlimit: this.config.limit\n\t\t});\n\t}\n\n\t/**\n\t * Prepare tasks for searching by expanding dependency titles\n\t * @param {Array} tasks - Array of task objects\n\t * @returns {Array} Tasks with expanded dependency information\n\t */\n\t_prepareSearchableTasks(tasks) {\n\t\treturn tasks.map((task) => {\n\t\t\t// Get titles of this task's dependencies if they exist\n\t\t\tconst dependencyTitles =\n\t\t\t\ttask.dependencies?.length > 0\n\t\t\t\t\t? task.dependencies\n\t\t\t\t\t\t\t.map((depId) => {\n\t\t\t\t\t\t\t\tconst depTask = tasks.find((t) => t.id === depId);\n\t\t\t\t\t\t\t\treturn depTask ? depTask.title : '';\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t.filter((title) => title)\n\t\t\t\t\t\t\t.join(' ')\n\t\t\t\t\t: '';\n\n\t\t\treturn {\n\t\t\t\t...task,\n\t\t\t\tdependencyTitles\n\t\t\t};\n\t\t});\n\t}\n\n\t/**\n\t * Extract significant words from a prompt\n\t * @param {string} prompt - The search prompt\n\t * @returns {Array<string>} Array of significant words\n\t */\n\t_extractPromptWords(prompt) {\n\t\treturn prompt\n\t\t\t.toLowerCase()\n\t\t\t.replace(/[^\\w\\s-]/g, ' ') // Replace non-alphanumeric chars with spaces\n\t\t\t.split(/\\s+/)\n\t\t\t.filter((word) => word.length > 3); // Words at least 4 chars\n\t}\n\n\t/**\n\t * Find tasks related to a prompt using fuzzy search\n\t * @param {string} prompt - The search prompt\n\t * @param {Object} options - Search options\n\t * @param {number} [options.maxResults=8] - Maximum number of results to return\n\t * @param {boolean} [options.includeRecent=true] - Include recent tasks in results\n\t * @param {boolean} [options.includeCategoryMatches=true] - Include category-based matches\n\t * @returns {Object} Search results with relevance breakdown\n\t */\n\tfindRelevantTasks(prompt, options = {}) {\n\t\tconst {\n\t\t\tmaxResults = 8,\n\t\t\tincludeRecent = true,\n\t\t\tincludeCategoryMatches = true\n\t\t} = options;\n\n\t\t// Extract significant words from prompt\n\t\tconst promptWords = this._extractPromptWords(prompt);\n\n\t\t// Perform fuzzy search with full prompt\n\t\tconst fuzzyResults = this.fuse.search(prompt);\n\n\t\t// Also search for each significant word to catch different aspects\n\t\tlet wordResults = [];\n\t\tfor (const word of promptWords) {\n\t\t\tif (word.length > 5) {\n\t\t\t\t// Only use significant words\n\t\t\t\tconst results = this.fuse.search(word);\n\t\t\t\tif (results.length > 0) {\n\t\t\t\t\twordResults.push(...results);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Merge and deduplicate results\n\t\tconst mergedResults = [...fuzzyResults];\n\n\t\t// Add word results that aren't already in fuzzyResults\n\t\tfor (const wordResult of wordResults) {\n\t\t\tif (!mergedResults.some((r) => r.item.id === wordResult.item.id)) {\n\t\t\t\tmergedResults.push(wordResult);\n\t\t\t}\n\t\t}\n\n\t\t// Group search results by relevance\n\t\tconst highRelevance = mergedResults\n\t\t\t.filter((result) => result.score < RELEVANCE_THRESHOLDS.high)\n\t\t\t.map((result) => ({ ...result.item, score: result.score }));\n\n\t\tconst mediumRelevance = mergedResults\n\t\t\t.filter(\n\t\t\t\t(result) =>\n\t\t\t\t\tresult.score >= RELEVANCE_THRESHOLDS.high &&\n\t\t\t\t\tresult.score < RELEVANCE_THRESHOLDS.medium\n\t\t\t)\n\t\t\t.map((result) => ({ ...result.item, score: result.score }));\n\n\t\tconst lowRelevance = mergedResults\n\t\t\t.filter(\n\t\t\t\t(result) =>\n\t\t\t\t\tresult.score >= RELEVANCE_THRESHOLDS.medium &&\n\t\t\t\t\tresult.score < RELEVANCE_THRESHOLDS.low\n\t\t\t)\n\t\t\t.map((result) => ({ ...result.item, score: result.score }));\n\n\t\t// Get recent tasks (newest first) if requested\n\t\tconst recentTasks = includeRecent\n\t\t\t? [...this.tasks].sort((a, b) => b.id - a.id).slice(0, 5)\n\t\t\t: [];\n\n\t\t// Find category-based matches if requested\n\t\tlet categoryTasks = [];\n\t\tlet promptCategory = null;\n\t\tif (includeCategoryMatches) {\n\t\t\tpromptCategory = PURPOSE_CATEGORIES.find((cat) =>\n\t\t\t\tcat.pattern.test(prompt)\n\t\t\t);\n\t\t\tcategoryTasks = promptCategory\n\t\t\t\t? this.tasks\n\t\t\t\t\t\t.filter(\n\t\t\t\t\t\t\t(t) =>\n\t\t\t\t\t\t\t\tpromptCategory.pattern.test(t.title) ||\n\t\t\t\t\t\t\t\tpromptCategory.pattern.test(t.description) ||\n\t\t\t\t\t\t\t\t(t.details && promptCategory.pattern.test(t.details))\n\t\t\t\t\t\t)\n\t\t\t\t\t\t.slice(0, 3)\n\t\t\t\t: [];\n\t\t}\n\n\t\t// Combine all relevant tasks, prioritizing by relevance\n\t\tconst allRelevantTasks = [...highRelevance];\n\n\t\t// Add medium relevance if not already included\n\t\tfor (const task of mediumRelevance) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add low relevance if not already included\n\t\tfor (const task of lowRelevance) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add category tasks if not already included\n\t\tfor (const task of categoryTasks) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Add recent tasks if not already included\n\t\tfor (const task of recentTasks) {\n\t\t\tif (!allRelevantTasks.some((t) => t.id === task.id)) {\n\t\t\t\tallRelevantTasks.push(task);\n\t\t\t}\n\t\t}\n\n\t\t// Get top N results for final output\n\t\tconst finalResults = allRelevantTasks.slice(0, maxResults);\n\n\t\treturn {\n\t\t\tresults: finalResults,\n\t\t\tbreakdown: {\n\t\t\t\thighRelevance,\n\t\t\t\tmediumRelevance,\n\t\t\t\tlowRelevance,\n\t\t\t\tcategoryTasks,\n\t\t\t\trecentTasks,\n\t\t\t\tpromptCategory,\n\t\t\t\tpromptWords\n\t\t\t},\n\t\t\tmetadata: {\n\t\t\t\ttotalSearched: this.tasks.length,\n\t\t\t\tfuzzyMatches: fuzzyResults.length,\n\t\t\t\twordMatches: wordResults.length,\n\t\t\t\tfinalCount: finalResults.length\n\t\t\t}\n\t\t};\n\t}\n\n\t/**\n\t * Get task IDs from search results\n\t * @param {Object} searchResults - Results from findRelevantTasks\n\t * @returns {Array<string>} Array of task ID strings\n\t */\n\tgetTaskIds(searchResults) {\n\t\treturn searchResults.results.map((task) => task.id.toString());\n\t}\n\n\t/**\n\t * Get task IDs including subtasks from search results\n\t * @param {Object} searchResults - Results from findRelevantTasks\n\t * @param {boolean} [includeSubtasks=false] - Whether to include subtask IDs\n\t * @returns {Array<string>} Array of task and subtask ID strings\n\t */\n\tgetTaskIdsWithSubtasks(searchResults, includeSubtasks = false) {\n\t\tconst taskIds = [];\n\n\t\tfor (const task of searchResults.results) {\n\t\t\ttaskIds.push(task.id.toString());\n\n\t\t\tif (includeSubtasks && task.subtasks && task.subtasks.length > 0) {\n\t\t\t\tfor (const subtask of task.subtasks) {\n\t\t\t\t\ttaskIds.push(`${task.id}.${subtask.id}`);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn taskIds;\n\t}\n\n\t/**\n\t * Format search results for display\n\t * @param {Object} searchResults - Results from findRelevantTasks\n\t * @param {Object} options - Formatting options\n\t * @returns {string} Formatted search results summary\n\t */\n\tformatSearchSummary(searchResults, options = {}) {\n\t\tconst { includeScores = false, includeBreakdown = false } = options;\n\t\tconst { results, breakdown, metadata } = searchResults;\n\n\t\tlet summary = `Found ${results.length} relevant tasks from ${metadata.totalSearched} total tasks`;\n\n\t\tif (includeBreakdown && breakdown) {\n\t\t\tconst parts = [];\n\t\t\tif (breakdown.highRelevance.length > 0)\n\t\t\t\tparts.push(`${breakdown.highRelevance.length} high relevance`);\n\t\t\tif (breakdown.mediumRelevance.length > 0)\n\t\t\t\tparts.push(`${breakdown.mediumRelevance.length} medium relevance`);\n\t\t\tif (breakdown.lowRelevance.length > 0)\n\t\t\t\tparts.push(`${breakdown.lowRelevance.length} low relevance`);\n\t\t\tif (breakdown.categoryTasks.length > 0)\n\t\t\t\tparts.push(`${breakdown.categoryTasks.length} category matches`);\n\n\t\t\tif (parts.length > 0) {\n\t\t\t\tsummary += ` (${parts.join(', ')})`;\n\t\t\t}\n\n\t\t\tif (breakdown.promptCategory) {\n\t\t\t\tsummary += `\\nCategory detected: ${breakdown.promptCategory.label}`;\n\t\t\t}\n\t\t}\n\n\t\treturn summary;\n\t}\n}\n\n/**\n * Factory function to create a fuzzy search instance\n * @param {Array} tasks - Array of task objects\n * @param {string} [searchType='default'] - Type of search configuration to use\n * @returns {FuzzyTaskSearch} Fuzzy search instance\n */\nexport function createFuzzyTaskSearch(tasks, searchType = 'default') {\n\treturn new FuzzyTaskSearch(tasks, searchType);\n}\n\n/**\n * Quick utility function to find relevant task IDs for a prompt\n * @param {Array} tasks - Array of task objects\n * @param {string} prompt - Search prompt\n * @param {Object} options - Search options\n * @returns {Array<string>} Array of relevant task ID strings\n */\nexport function findRelevantTaskIds(tasks, prompt, options = {}) {\n\tconst {\n\t\tsearchType = 'default',\n\t\tmaxResults = 8,\n\t\tincludeSubtasks = false\n\t} = options;\n\n\tconst fuzzySearch = new FuzzyTaskSearch(tasks, searchType);\n\tconst results = fuzzySearch.findRelevantTasks(prompt, { maxResults });\n\n\treturn includeSubtasks\n\t\t? fuzzySearch.getTaskIdsWithSubtasks(results, true)\n\t\t: fuzzySearch.getTaskIds(results);\n}\n\nexport default FuzzyTaskSearch;\n"], ["/claude-task-master/scripts/modules/ai-services-unified.js", "/**\n * ai-services-unified.js\n * Centralized AI service layer using provider modules and config-manager.\n */\n\n// Vercel AI SDK functions are NOT called directly anymore.\n// import { generateText, streamText, generateObject } from 'ai';\n\n// --- Core Dependencies ---\nimport {\n\tMODEL_MAP,\n\tgetAzureBaseURL,\n\tgetBaseUrlForRole,\n\tgetBedrockBaseURL,\n\tgetDebugFlag,\n\tgetFallbackModelId,\n\tgetFallbackProvider,\n\tgetMainModelId,\n\tgetMainProvider,\n\tgetOllamaBaseURL,\n\tgetParametersForRole,\n\tgetResearchModelId,\n\tgetResearchProvider,\n\tgetResponseLanguage,\n\tgetUserId,\n\tgetVertexLocation,\n\tgetVertexProjectId,\n\tisApiKeySet,\n\tprovidersWithoutApiKeys\n} from './config-manager.js';\nimport {\n\tfindProjectRoot,\n\tgetCurrentTag,\n\tlog,\n\tresolveEnvVariable\n} from './utils.js';\n\n// Import provider classes\nimport {\n\tAnthropicAIProvider,\n\tAzureProvider,\n\tBedrockAIProvider,\n\tClaudeCodeProvider,\n\tGeminiCliProvider,\n\tGoogleAIProvider,\n\tGroqProvider,\n\tOllamaAIProvider,\n\tOpenAIProvider,\n\tOpenRouterAIProvider,\n\tPerplexityAIProvider,\n\tVertexAIProvider,\n\tXAIProvider\n} from '../../src/ai-providers/index.js';\n\n// Import the provider registry\nimport ProviderRegistry from '../../src/provider-registry/index.js';\n\n// Create provider instances\nconst PROVIDERS = {\n\tanthropic: new AnthropicAIProvider(),\n\tperplexity: new PerplexityAIProvider(),\n\tgoogle: new GoogleAIProvider(),\n\topenai: new OpenAIProvider(),\n\txai: new XAIProvider(),\n\tgroq: new GroqProvider(),\n\topenrouter: new OpenRouterAIProvider(),\n\tollama: new OllamaAIProvider(),\n\tbedrock: new BedrockAIProvider(),\n\tazure: new AzureProvider(),\n\tvertex: new VertexAIProvider(),\n\t'claude-code': new ClaudeCodeProvider(),\n\t'gemini-cli': new GeminiCliProvider()\n};\n\nfunction _getProvider(providerName) {\n\t// First check the static PROVIDERS object\n\tif (PROVIDERS[providerName]) {\n\t\treturn PROVIDERS[providerName];\n\t}\n\n\t// If not found, check the provider registry\n\tconst providerRegistry = ProviderRegistry.getInstance();\n\tif (providerRegistry.hasProvider(providerName)) {\n\t\tlog('debug', `Provider \"${providerName}\" found in dynamic registry`);\n\t\treturn providerRegistry.getProvider(providerName);\n\t}\n\n\t// Provider not found in either location\n\treturn null;\n}\n\n// Helper function to get cost for a specific model\nfunction _getCostForModel(providerName, modelId) {\n\tif (!MODEL_MAP || !MODEL_MAP[providerName]) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Provider \"${providerName}\" not found in MODEL_MAP. Cannot determine cost for model ${modelId}.`\n\t\t);\n\t\treturn { inputCost: 0, outputCost: 0, currency: 'USD' }; // Default to zero cost\n\t}\n\n\tconst modelData = MODEL_MAP[providerName].find((m) => m.id === modelId);\n\n\tif (!modelData || !modelData.cost_per_1m_tokens) {\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`Cost data not found for model \"${modelId}\" under provider \"${providerName}\". Assuming zero cost.`\n\t\t);\n\t\treturn { inputCost: 0, outputCost: 0, currency: 'USD' }; // Default to zero cost\n\t}\n\n\t// Ensure currency is part of the returned object, defaulting if not present\n\tconst currency = modelData.cost_per_1m_tokens.currency || 'USD';\n\n\treturn {\n\t\tinputCost: modelData.cost_per_1m_tokens.input || 0,\n\t\toutputCost: modelData.cost_per_1m_tokens.output || 0,\n\t\tcurrency: currency\n\t};\n}\n\n// Helper function to get tag information for responses\nfunction _getTagInfo(projectRoot) {\n\ttry {\n\t\tif (!projectRoot) {\n\t\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t\t}\n\n\t\tconst currentTag = getCurrentTag(projectRoot);\n\n\t\t// Read available tags from tasks.json\n\t\tlet availableTags = ['master']; // Default fallback\n\t\ttry {\n\t\t\tconst path = require('path');\n\t\t\tconst fs = require('fs');\n\t\t\tconst tasksPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster',\n\t\t\t\t'tasks',\n\t\t\t\t'tasks.json'\n\t\t\t);\n\n\t\t\tif (fs.existsSync(tasksPath)) {\n\t\t\t\tconst tasksData = JSON.parse(fs.readFileSync(tasksPath, 'utf8'));\n\t\t\t\tif (tasksData && typeof tasksData === 'object') {\n\t\t\t\t\t// Check if it's tagged format (has tag-like keys with tasks arrays)\n\t\t\t\t\tconst potentialTags = Object.keys(tasksData).filter(\n\t\t\t\t\t\t(key) =>\n\t\t\t\t\t\t\ttasksData[key] &&\n\t\t\t\t\t\t\ttypeof tasksData[key] === 'object' &&\n\t\t\t\t\t\t\tArray.isArray(tasksData[key].tasks)\n\t\t\t\t\t);\n\n\t\t\t\t\tif (potentialTags.length > 0) {\n\t\t\t\t\t\tavailableTags = potentialTags;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (readError) {\n\t\t\t// Silently fall back to default if we can't read tasks file\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Could not read tasks file for available tags: ${readError.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\treturn {\n\t\t\tcurrentTag: currentTag || 'master',\n\t\t\tavailableTags: availableTags\n\t\t};\n\t} catch (error) {\n\t\tif (getDebugFlag()) {\n\t\t\tlog('debug', `Error getting tag information: ${error.message}`);\n\t\t}\n\t\treturn { currentTag: 'master', availableTags: ['master'] };\n\t}\n}\n\n// --- Configuration for Retries ---\nconst MAX_RETRIES = 2;\nconst INITIAL_RETRY_DELAY_MS = 1000;\n\n// Helper function to check if an error is retryable\nfunction isRetryableError(error) {\n\tconst errorMessage = error.message?.toLowerCase() || '';\n\treturn (\n\t\terrorMessage.includes('rate limit') ||\n\t\terrorMessage.includes('overloaded') ||\n\t\terrorMessage.includes('service temporarily unavailable') ||\n\t\terrorMessage.includes('timeout') ||\n\t\terrorMessage.includes('network error') ||\n\t\terror.status === 429 ||\n\t\terror.status >= 500\n\t);\n}\n\n/**\n * Extracts a user-friendly error message from a potentially complex AI error object.\n * Prioritizes nested messages and falls back to the top-level message.\n * @param {Error | object | any} error - The error object.\n * @returns {string} A concise error message.\n */\nfunction _extractErrorMessage(error) {\n\ttry {\n\t\t// Attempt 1: Look for Vercel SDK specific nested structure (common)\n\t\tif (error?.data?.error?.message) {\n\t\t\treturn error.data.error.message;\n\t\t}\n\n\t\t// Attempt 2: Look for nested error message directly in the error object\n\t\tif (error?.error?.message) {\n\t\t\treturn error.error.message;\n\t\t}\n\n\t\t// Attempt 3: Look for nested error message in response body if it's JSON string\n\t\tif (typeof error?.responseBody === 'string') {\n\t\t\ttry {\n\t\t\t\tconst body = JSON.parse(error.responseBody);\n\t\t\t\tif (body?.error?.message) {\n\t\t\t\t\treturn body.error.message;\n\t\t\t\t}\n\t\t\t} catch (parseError) {\n\t\t\t\t// Ignore if responseBody is not valid JSON\n\t\t\t}\n\t\t}\n\n\t\t// Attempt 4: Use the top-level message if it exists\n\t\tif (typeof error?.message === 'string' && error.message) {\n\t\t\treturn error.message;\n\t\t}\n\n\t\t// Attempt 5: Handle simple string errors\n\t\tif (typeof error === 'string') {\n\t\t\treturn error;\n\t\t}\n\n\t\t// Fallback\n\t\treturn 'An unknown AI service error occurred.';\n\t} catch (e) {\n\t\t// Safety net\n\t\treturn 'Failed to extract error message.';\n\t}\n}\n\n/**\n * Internal helper to resolve the API key for a given provider.\n * @param {string} providerName - The name of the provider (lowercase).\n * @param {object|null} session - Optional MCP session object.\n * @param {string|null} projectRoot - Optional project root path for .env fallback.\n * @returns {string|null} The API key or null if not found/needed.\n * @throws {Error} If a required API key is missing.\n */\nfunction _resolveApiKey(providerName, session, projectRoot = null) {\n\t// Get provider instance\n\tconst provider = _getProvider(providerName);\n\tif (!provider) {\n\t\tthrow new Error(\n\t\t\t`Unknown provider '${providerName}' for API key resolution.`\n\t\t);\n\t}\n\n\t// All providers must implement getRequiredApiKeyName()\n\tconst envVarName = provider.getRequiredApiKeyName();\n\n\t// If envVarName is null (like for MCP), return null directly\n\tif (envVarName === null) {\n\t\treturn null;\n\t}\n\n\tconst apiKey = resolveEnvVariable(envVarName, session, projectRoot);\n\n\t// Special handling for providers that can use alternative auth or no API key\n\tif (!provider.isRequiredApiKey()) {\n\t\treturn apiKey || null;\n\t}\n\n\tif (!apiKey) {\n\t\tthrow new Error(\n\t\t\t`Required API key ${envVarName} for provider '${providerName}' is not set in environment, session, or .env file.`\n\t\t);\n\t}\n\treturn apiKey;\n}\n\n/**\n * Internal helper to attempt a provider-specific AI API call with retries.\n *\n * @param {function} providerApiFn - The specific provider function to call (e.g., generateAnthropicText).\n * @param {object} callParams - Parameters object for the provider function.\n * @param {string} providerName - Name of the provider (for logging).\n * @param {string} modelId - Specific model ID (for logging).\n * @param {string} attemptRole - The role being attempted (for logging).\n * @returns {Promise<object>} The result from the successful API call.\n * @throws {Error} If the call fails after all retries.\n */\nasync function _attemptProviderCallWithRetries(\n\tprovider,\n\tserviceType,\n\tcallParams,\n\tproviderName,\n\tmodelId,\n\tattemptRole\n) {\n\tlet retries = 0;\n\tconst fnName = serviceType;\n\n\twhile (retries <= MAX_RETRIES) {\n\t\ttry {\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Attempt ${retries + 1}/${MAX_RETRIES + 1} calling ${fnName} (Provider: ${providerName}, Model: ${modelId}, Role: ${attemptRole})`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Call the appropriate method on the provider instance\n\t\t\tconst result = await provider[serviceType](callParams);\n\n\t\t\tif (getDebugFlag()) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${fnName} succeeded for role ${attemptRole} (Provider: ${providerName}) on attempt ${retries + 1}`\n\t\t\t\t);\n\t\t\t}\n\t\t\treturn result;\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'warn',\n\t\t\t\t`Attempt ${retries + 1} failed for role ${attemptRole} (${fnName} / ${providerName}): ${error.message}`\n\t\t\t);\n\n\t\t\tif (isRetryableError(error) && retries < MAX_RETRIES) {\n\t\t\t\tretries++;\n\t\t\t\tconst delay = INITIAL_RETRY_DELAY_MS * 2 ** (retries - 1);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Something went wrong on the provider side. Retrying in ${delay / 1000}s...`\n\t\t\t\t);\n\t\t\t\tawait new Promise((resolve) => setTimeout(resolve, delay));\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`Something went wrong on the provider side. Max retries reached for role ${attemptRole} (${fnName} / ${providerName}).`\n\t\t\t\t);\n\t\t\t\tthrow error;\n\t\t\t}\n\t\t}\n\t}\n\t// Should not be reached due to throw in the else block\n\tthrow new Error(\n\t\t`Exhausted all retries for role ${attemptRole} (${fnName} / ${providerName})`\n\t);\n}\n\n/**\n * Base logic for unified service functions.\n * @param {string} serviceType - Type of service ('generateText', 'streamText', 'generateObject').\n * @param {object} params - Original parameters passed to the service function.\n * @param {string} params.role - The initial client role.\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot] - Optional project root path.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} params.outputType - 'cli' or 'mcp'.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} [params.prompt] - The prompt for the AI.\n * @param {string} [params.schema] - The Zod schema for the expected object.\n * @param {string} [params.objectName] - Name for object/tool.\n * @returns {Promise<any>} Result from the underlying provider call.\n */\nasync function _unifiedServiceRunner(serviceType, params) {\n\tconst {\n\t\trole: initialRole,\n\t\tsession,\n\t\tprojectRoot,\n\t\tsystemPrompt,\n\t\tprompt,\n\t\tschema,\n\t\tobjectName,\n\t\tcommandName,\n\t\toutputType,\n\t\t...restApiParams\n\t} = params;\n\tif (getDebugFlag()) {\n\t\tlog('info', `${serviceType}Service called`, {\n\t\t\trole: initialRole,\n\t\t\tcommandName,\n\t\t\toutputType,\n\t\t\tprojectRoot\n\t\t});\n\t}\n\n\tconst effectiveProjectRoot = projectRoot || findProjectRoot();\n\tconst userId = getUserId(effectiveProjectRoot);\n\n\tlet sequence;\n\tif (initialRole === 'main') {\n\t\tsequence = ['main', 'fallback', 'research'];\n\t} else if (initialRole === 'research') {\n\t\tsequence = ['research', 'fallback', 'main'];\n\t} else if (initialRole === 'fallback') {\n\t\tsequence = ['fallback', 'main', 'research'];\n\t} else {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Unknown initial role: ${initialRole}. Defaulting to main -> fallback -> research sequence.`\n\t\t);\n\t\tsequence = ['main', 'fallback', 'research'];\n\t}\n\n\tlet lastError = null;\n\tlet lastCleanErrorMessage =\n\t\t'AI service call failed for all configured roles.';\n\n\tfor (const currentRole of sequence) {\n\t\tlet providerName;\n\t\tlet modelId;\n\t\tlet apiKey;\n\t\tlet roleParams;\n\t\tlet provider;\n\t\tlet baseURL;\n\t\tlet providerResponse;\n\t\tlet telemetryData = null;\n\n\t\ttry {\n\t\t\tlog('info', `New AI service call with role: ${currentRole}`);\n\n\t\t\tif (currentRole === 'main') {\n\t\t\t\tproviderName = getMainProvider(effectiveProjectRoot);\n\t\t\t\tmodelId = getMainModelId(effectiveProjectRoot);\n\t\t\t} else if (currentRole === 'research') {\n\t\t\t\tproviderName = getResearchProvider(effectiveProjectRoot);\n\t\t\t\tmodelId = getResearchModelId(effectiveProjectRoot);\n\t\t\t} else if (currentRole === 'fallback') {\n\t\t\t\tproviderName = getFallbackProvider(effectiveProjectRoot);\n\t\t\t\tmodelId = getFallbackModelId(effectiveProjectRoot);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`Unknown role encountered in _unifiedServiceRunner: ${currentRole}`\n\t\t\t\t);\n\t\t\t\tlastError =\n\t\t\t\t\tlastError || new Error(`Unknown AI role specified: ${currentRole}`);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (!providerName || !modelId) {\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Skipping role '${currentRole}': Provider or Model ID not configured.`\n\t\t\t\t);\n\t\t\t\tlastError =\n\t\t\t\t\tlastError ||\n\t\t\t\t\tnew Error(\n\t\t\t\t\t\t`Configuration missing for role '${currentRole}'. Provider: ${providerName}, Model: ${modelId}`\n\t\t\t\t\t);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Get provider instance\n\t\t\tprovider = _getProvider(providerName?.toLowerCase());\n\t\t\tif (!provider) {\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Skipping role '${currentRole}': Provider '${providerName}' not supported.`\n\t\t\t\t);\n\t\t\t\tlastError =\n\t\t\t\t\tlastError ||\n\t\t\t\t\tnew Error(`Unsupported provider configured: ${providerName}`);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Check API key if needed\n\t\t\tif (!providersWithoutApiKeys.includes(providerName?.toLowerCase())) {\n\t\t\t\tif (!isApiKeySet(providerName, session, effectiveProjectRoot)) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t`Skipping role '${currentRole}' (Provider: ${providerName}): API key not set or invalid.`\n\t\t\t\t\t);\n\t\t\t\t\tlastError =\n\t\t\t\t\t\tlastError ||\n\t\t\t\t\t\tnew Error(\n\t\t\t\t\t\t\t`API key for provider '${providerName}' (role: ${currentRole}) is not set.`\n\t\t\t\t\t\t);\n\t\t\t\t\tcontinue; // Skip to the next role in the sequence\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Get base URL if configured (optional for most providers)\n\t\t\tbaseURL = getBaseUrlForRole(currentRole, effectiveProjectRoot);\n\n\t\t\t// For Azure, use the global Azure base URL if role-specific URL is not configured\n\t\t\tif (providerName?.toLowerCase() === 'azure' && !baseURL) {\n\t\t\t\tbaseURL = getAzureBaseURL(effectiveProjectRoot);\n\t\t\t\tlog('debug', `Using global Azure base URL: ${baseURL}`);\n\t\t\t} else if (providerName?.toLowerCase() === 'ollama' && !baseURL) {\n\t\t\t\t// For Ollama, use the global Ollama base URL if role-specific URL is not configured\n\t\t\t\tbaseURL = getOllamaBaseURL(effectiveProjectRoot);\n\t\t\t\tlog('debug', `Using global Ollama base URL: ${baseURL}`);\n\t\t\t} else if (providerName?.toLowerCase() === 'bedrock' && !baseURL) {\n\t\t\t\t// For Bedrock, use the global Bedrock base URL if role-specific URL is not configured\n\t\t\t\tbaseURL = getBedrockBaseURL(effectiveProjectRoot);\n\t\t\t\tlog('debug', `Using global Bedrock base URL: ${baseURL}`);\n\t\t\t}\n\n\t\t\t// Get AI parameters for the current role\n\t\t\troleParams = getParametersForRole(currentRole, effectiveProjectRoot);\n\t\t\tapiKey = _resolveApiKey(\n\t\t\t\tproviderName?.toLowerCase(),\n\t\t\t\tsession,\n\t\t\t\teffectiveProjectRoot\n\t\t\t);\n\n\t\t\t// Prepare provider-specific configuration\n\t\t\tlet providerSpecificParams = {};\n\n\t\t\t// Handle Vertex AI specific configuration\n\t\t\tif (providerName?.toLowerCase() === 'vertex') {\n\t\t\t\t// Get Vertex project ID and location\n\t\t\t\tconst projectId =\n\t\t\t\t\tgetVertexProjectId(effectiveProjectRoot) ||\n\t\t\t\t\tresolveEnvVariable(\n\t\t\t\t\t\t'VERTEX_PROJECT_ID',\n\t\t\t\t\t\tsession,\n\t\t\t\t\t\teffectiveProjectRoot\n\t\t\t\t\t);\n\n\t\t\t\tconst location =\n\t\t\t\t\tgetVertexLocation(effectiveProjectRoot) ||\n\t\t\t\t\tresolveEnvVariable(\n\t\t\t\t\t\t'VERTEX_LOCATION',\n\t\t\t\t\t\tsession,\n\t\t\t\t\t\teffectiveProjectRoot\n\t\t\t\t\t) ||\n\t\t\t\t\t'us-central1';\n\n\t\t\t\t// Get credentials path if available\n\t\t\t\tconst credentialsPath = resolveEnvVariable(\n\t\t\t\t\t'GOOGLE_APPLICATION_CREDENTIALS',\n\t\t\t\t\tsession,\n\t\t\t\t\teffectiveProjectRoot\n\t\t\t\t);\n\n\t\t\t\t// Add Vertex-specific parameters\n\t\t\t\tproviderSpecificParams = {\n\t\t\t\t\tprojectId,\n\t\t\t\t\tlocation,\n\t\t\t\t\t...(credentialsPath && { credentials: { credentialsFromEnv: true } })\n\t\t\t\t};\n\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Using Vertex AI configuration: Project ID=${projectId}, Location=${location}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst messages = [];\n\t\t\tconst responseLanguage = getResponseLanguage(effectiveProjectRoot);\n\t\t\tconst systemPromptWithLanguage = `${systemPrompt} \\n\\n Always respond in ${responseLanguage}.`;\n\t\t\tmessages.push({\n\t\t\t\trole: 'system',\n\t\t\t\tcontent: systemPromptWithLanguage.trim()\n\t\t\t});\n\n\t\t\t// IN THE FUTURE WHEN DOING CONTEXT IMPROVEMENTS\n\t\t\t// {\n\t\t\t// type: 'text',\n\t\t\t// text: 'Large cached context here like a tasks json',\n\t\t\t// providerOptions: {\n\t\t\t// anthropic: { cacheControl: { type: 'ephemeral' } }\n\t\t\t// }\n\t\t\t// }\n\n\t\t\t// Example\n\t\t\t// if (params.context) { // context is a json string of a tasks object or some other stu\n\t\t\t// messages.push({\n\t\t\t// type: 'text',\n\t\t\t// text: params.context,\n\t\t\t// providerOptions: { anthropic: { cacheControl: { type: 'ephemeral' } } }\n\t\t\t// });\n\t\t\t// }\n\n\t\t\tif (prompt) {\n\t\t\t\tmessages.push({ role: 'user', content: prompt });\n\t\t\t} else {\n\t\t\t\tthrow new Error('User prompt content is missing.');\n\t\t\t}\n\n\t\t\tconst callParams = {\n\t\t\t\tapiKey,\n\t\t\t\tmodelId,\n\t\t\t\tmaxTokens: roleParams.maxTokens,\n\t\t\t\ttemperature: roleParams.temperature,\n\t\t\t\tmessages,\n\t\t\t\t...(baseURL && { baseURL }),\n\t\t\t\t...(serviceType === 'generateObject' && { schema, objectName }),\n\t\t\t\t...providerSpecificParams,\n\t\t\t\t...restApiParams\n\t\t\t};\n\n\t\t\tproviderResponse = await _attemptProviderCallWithRetries(\n\t\t\t\tprovider,\n\t\t\t\tserviceType,\n\t\t\t\tcallParams,\n\t\t\t\tproviderName,\n\t\t\t\tmodelId,\n\t\t\t\tcurrentRole\n\t\t\t);\n\n\t\t\tif (userId && providerResponse && providerResponse.usage) {\n\t\t\t\ttry {\n\t\t\t\t\ttelemetryData = await logAiUsage({\n\t\t\t\t\t\tuserId,\n\t\t\t\t\t\tcommandName,\n\t\t\t\t\t\tproviderName,\n\t\t\t\t\t\tmodelId,\n\t\t\t\t\t\tinputTokens: providerResponse.usage.inputTokens,\n\t\t\t\t\t\toutputTokens: providerResponse.usage.outputTokens,\n\t\t\t\t\t\toutputType\n\t\t\t\t\t});\n\t\t\t\t} catch (telemetryError) {\n\t\t\t\t\t// logAiUsage already logs its own errors and returns null on failure\n\t\t\t\t\t// No need to log again here, telemetryData will remain null\n\t\t\t\t}\n\t\t\t} else if (userId && providerResponse && !providerResponse.usage) {\n\t\t\t\tlog(\n\t\t\t\t\t'warn',\n\t\t\t\t\t`Cannot log telemetry for ${commandName} (${providerName}/${modelId}): AI result missing 'usage' data. (May be expected for streams)`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tlet finalMainResult;\n\t\t\tif (serviceType === 'generateText') {\n\t\t\t\tfinalMainResult = providerResponse.text;\n\t\t\t} else if (serviceType === 'generateObject') {\n\t\t\t\tfinalMainResult = providerResponse.object;\n\t\t\t} else if (serviceType === 'streamText') {\n\t\t\t\tfinalMainResult = providerResponse;\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`Unknown serviceType in _unifiedServiceRunner: ${serviceType}`\n\t\t\t\t);\n\t\t\t\tfinalMainResult = providerResponse;\n\t\t\t}\n\n\t\t\t// Get tag information for the response\n\t\t\tconst tagInfo = _getTagInfo(effectiveProjectRoot);\n\n\t\t\treturn {\n\t\t\t\tmainResult: finalMainResult,\n\t\t\t\ttelemetryData: telemetryData,\n\t\t\t\ttagInfo: tagInfo\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tconst cleanMessage = _extractErrorMessage(error);\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`Service call failed for role ${currentRole} (Provider: ${providerName || 'unknown'}, Model: ${modelId || 'unknown'}): ${cleanMessage}`\n\t\t\t);\n\t\t\tlastError = error;\n\t\t\tlastCleanErrorMessage = cleanMessage;\n\n\t\t\tif (serviceType === 'generateObject') {\n\t\t\t\tconst lowerCaseMessage = cleanMessage.toLowerCase();\n\t\t\t\tif (\n\t\t\t\t\tlowerCaseMessage.includes(\n\t\t\t\t\t\t'no endpoints found that support tool use'\n\t\t\t\t\t) ||\n\t\t\t\t\tlowerCaseMessage.includes('does not support tool_use') ||\n\t\t\t\t\tlowerCaseMessage.includes('tool use is not supported') ||\n\t\t\t\t\tlowerCaseMessage.includes('tools are not supported') ||\n\t\t\t\t\tlowerCaseMessage.includes('function calling is not supported') ||\n\t\t\t\t\tlowerCaseMessage.includes('tool use is not supported')\n\t\t\t\t) {\n\t\t\t\t\tconst specificErrorMsg = `Model '${modelId || 'unknown'}' via provider '${providerName || 'unknown'}' does not support the 'tool use' required by generateObjectService. Please configure a model that supports tool/function calling for the '${currentRole}' role, or use generateTextService if structured output is not strictly required.`;\n\t\t\t\t\tlog('error', `[Tool Support Error] ${specificErrorMsg}`);\n\t\t\t\t\tthrow new Error(specificErrorMsg);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tlog('error', `All roles in the sequence [${sequence.join(', ')}] failed.`);\n\tthrow new Error(lastCleanErrorMessage);\n}\n\n/**\n * Unified service function for generating text.\n * Handles client retrieval, retries, and fallback sequence.\n *\n * @param {object} params - Parameters for the service call.\n * @param {string} params.role - The initial client role ('main', 'research', 'fallback').\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.\n * @param {string} params.prompt - The prompt for the AI.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'.\n * @returns {Promise<object>} Result object containing generated text and usage data.\n */\nasync function generateTextService(params) {\n\t// Ensure default outputType if not provided\n\tconst defaults = { outputType: 'cli' };\n\tconst combinedParams = { ...defaults, ...params };\n\t// TODO: Validate commandName exists?\n\treturn _unifiedServiceRunner('generateText', combinedParams);\n}\n\n/**\n * Unified service function for streaming text.\n * Handles client retrieval, retries, and fallback sequence.\n *\n * @param {object} params - Parameters for the service call.\n * @param {string} params.role - The initial client role ('main', 'research', 'fallback').\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.\n * @param {string} params.prompt - The prompt for the AI.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'.\n * @returns {Promise<object>} Result object containing the stream and usage data.\n */\nasync function streamTextService(params) {\n\tconst defaults = { outputType: 'cli' };\n\tconst combinedParams = { ...defaults, ...params };\n\t// TODO: Validate commandName exists?\n\t// NOTE: Telemetry for streaming might be tricky as usage data often comes at the end.\n\t// The current implementation logs *after* the stream is returned.\n\t// We might need to adjust how usage is captured/logged for streams.\n\treturn _unifiedServiceRunner('streamText', combinedParams);\n}\n\n/**\n * Unified service function for generating structured objects.\n * Handles client retrieval, retries, and fallback sequence.\n *\n * @param {object} params - Parameters for the service call.\n * @param {string} params.role - The initial client role ('main', 'research', 'fallback').\n * @param {object} [params.session=null] - Optional MCP session object.\n * @param {string} [params.projectRoot=null] - Optional project root path for .env fallback.\n * @param {import('zod').ZodSchema} params.schema - The Zod schema for the expected object.\n * @param {string} params.prompt - The prompt for the AI.\n * @param {string} [params.systemPrompt] - Optional system prompt.\n * @param {string} [params.objectName='generated_object'] - Name for object/tool.\n * @param {number} [params.maxRetries=3] - Max retries for object generation.\n * @param {string} params.commandName - Name of the command invoking the service.\n * @param {string} [params.outputType='cli'] - 'cli' or 'mcp'.\n * @returns {Promise<object>} Result object containing the generated object and usage data.\n */\nasync function generateObjectService(params) {\n\tconst defaults = {\n\t\tobjectName: 'generated_object',\n\t\tmaxRetries: 3,\n\t\toutputType: 'cli'\n\t};\n\tconst combinedParams = { ...defaults, ...params };\n\t// TODO: Validate commandName exists?\n\treturn _unifiedServiceRunner('generateObject', combinedParams);\n}\n\n// --- Telemetry Function ---\n/**\n * Logs AI usage telemetry data.\n * For now, it just logs to the console. Sending will be implemented later.\n * @param {object} params - Telemetry parameters.\n * @param {string} params.userId - Unique user identifier.\n * @param {string} params.commandName - The command that triggered the AI call.\n * @param {string} params.providerName - The AI provider used (e.g., 'openai').\n * @param {string} params.modelId - The specific AI model ID used.\n * @param {number} params.inputTokens - Number of input tokens.\n * @param {number} params.outputTokens - Number of output tokens.\n */\nasync function logAiUsage({\n\tuserId,\n\tcommandName,\n\tproviderName,\n\tmodelId,\n\tinputTokens,\n\toutputTokens,\n\toutputType\n}) {\n\ttry {\n\t\tconst isMCP = outputType === 'mcp';\n\t\tconst timestamp = new Date().toISOString();\n\t\tconst totalTokens = (inputTokens || 0) + (outputTokens || 0);\n\n\t\t// Destructure currency along with costs\n\t\tconst { inputCost, outputCost, currency } = _getCostForModel(\n\t\t\tproviderName,\n\t\t\tmodelId\n\t\t);\n\n\t\tconst totalCost =\n\t\t\t((inputTokens || 0) / 1_000_000) * inputCost +\n\t\t\t((outputTokens || 0) / 1_000_000) * outputCost;\n\n\t\tconst telemetryData = {\n\t\t\ttimestamp,\n\t\t\tuserId,\n\t\t\tcommandName,\n\t\t\tmodelUsed: modelId, // Consistent field name from requirements\n\t\t\tproviderName, // Keep provider name for context\n\t\t\tinputTokens: inputTokens || 0,\n\t\t\toutputTokens: outputTokens || 0,\n\t\t\ttotalTokens,\n\t\t\ttotalCost: parseFloat(totalCost.toFixed(6)),\n\t\t\tcurrency // Add currency to the telemetry data\n\t\t};\n\n\t\tif (getDebugFlag()) {\n\t\t\tlog('info', 'AI Usage Telemetry:', telemetryData);\n\t\t}\n\n\t\t// TODO (Subtask 77.2): Send telemetryData securely to the external endpoint.\n\n\t\treturn telemetryData;\n\t} catch (error) {\n\t\tlog('error', `Failed to log AI usage telemetry: ${error.message}`, {\n\t\t\terror\n\t\t});\n\t\t// Don't re-throw; telemetry failure shouldn't block core functionality.\n\t\treturn null;\n\t}\n}\n\nexport {\n\tgenerateTextService,\n\tstreamTextService,\n\tgenerateObjectService,\n\tlogAiUsage\n};\n"], ["/claude-task-master/scripts/modules/task-manager/models.js", "/**\n * models.js\n * Core functionality for managing AI model configurations\n */\n\nimport https from 'https';\nimport http from 'http';\nimport {\n\tgetMainModelId,\n\tgetResearchModelId,\n\tgetFallbackModelId,\n\tgetAvailableModels,\n\tgetMainProvider,\n\tgetResearchProvider,\n\tgetFallbackProvider,\n\tisApiKeySet,\n\tgetMcpApiKeyStatus,\n\tgetConfig,\n\twriteConfig,\n\tisConfigFilePresent,\n\tgetAllProviders,\n\tgetBaseUrlForRole\n} from '../config-manager.js';\nimport { findConfigPath } from '../../../src/utils/path-utils.js';\nimport { log } from '../utils.js';\nimport { CUSTOM_PROVIDERS } from '../../../src/constants/providers.js';\n\n// Constants\nconst CONFIG_MISSING_ERROR =\n\t'The configuration file is missing. Run \"task-master init\" to create it.';\n\n/**\n * Fetches the list of models from OpenRouter API.\n * @returns {Promise<Array|null>} A promise that resolves with the list of model IDs or null if fetch fails.\n */\nfunction fetchOpenRouterModels() {\n\treturn new Promise((resolve) => {\n\t\tconst options = {\n\t\t\thostname: 'openrouter.ai',\n\t\t\tpath: '/api/v1/models',\n\t\t\tmethod: 'GET',\n\t\t\theaders: {\n\t\t\t\tAccept: 'application/json'\n\t\t\t}\n\t\t};\n\n\t\tconst req = https.request(options, (res) => {\n\t\t\tlet data = '';\n\t\t\tres.on('data', (chunk) => {\n\t\t\t\tdata += chunk;\n\t\t\t});\n\t\t\tres.on('end', () => {\n\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\tresolve(parsedData.data || []); // Return the array of models\n\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\tconsole.error('Error parsing OpenRouter response:', e);\n\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t`OpenRouter API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t);\n\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\n\t\treq.on('error', (e) => {\n\t\t\tconsole.error('Error fetching OpenRouter models:', e);\n\t\t\tresolve(null); // Indicate failure\n\t\t});\n\t\treq.end();\n\t});\n}\n\n/**\n * Fetches the list of models from Ollama instance.\n * @param {string} baseURL - The base URL for the Ollama API (e.g., \"http://localhost:11434/api\")\n * @returns {Promise<Array|null>} A promise that resolves with the list of model objects or null if fetch fails.\n */\nfunction fetchOllamaModels(baseURL = 'http://localhost:11434/api') {\n\treturn new Promise((resolve) => {\n\t\ttry {\n\t\t\t// Parse the base URL to extract hostname, port, and base path\n\t\t\tconst url = new URL(baseURL);\n\t\t\tconst isHttps = url.protocol === 'https:';\n\t\t\tconst port = url.port || (isHttps ? 443 : 80);\n\t\t\tconst basePath = url.pathname.endsWith('/')\n\t\t\t\t? url.pathname.slice(0, -1)\n\t\t\t\t: url.pathname;\n\n\t\t\tconst options = {\n\t\t\t\thostname: url.hostname,\n\t\t\t\tport: parseInt(port, 10),\n\t\t\t\tpath: `${basePath}/tags`,\n\t\t\t\tmethod: 'GET',\n\t\t\t\theaders: {\n\t\t\t\t\tAccept: 'application/json'\n\t\t\t\t}\n\t\t\t};\n\n\t\t\tconst requestLib = isHttps ? https : http;\n\t\t\tconst req = requestLib.request(options, (res) => {\n\t\t\t\tlet data = '';\n\t\t\t\tres.on('data', (chunk) => {\n\t\t\t\t\tdata += chunk;\n\t\t\t\t});\n\t\t\t\tres.on('end', () => {\n\t\t\t\t\tif (res.statusCode === 200) {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tconst parsedData = JSON.parse(data);\n\t\t\t\t\t\t\tresolve(parsedData.models || []); // Return the array of models\n\t\t\t\t\t\t} catch (e) {\n\t\t\t\t\t\t\tconsole.error('Error parsing Ollama response:', e);\n\t\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tconsole.error(\n\t\t\t\t\t\t\t`Ollama API request failed with status code: ${res.statusCode}`\n\t\t\t\t\t\t);\n\t\t\t\t\t\tresolve(null); // Indicate failure\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\n\t\t\treq.on('error', (e) => {\n\t\t\t\tconsole.error('Error fetching Ollama models:', e);\n\t\t\t\tresolve(null); // Indicate failure\n\t\t\t});\n\t\t\treq.end();\n\t\t} catch (e) {\n\t\t\tconsole.error('Error parsing Ollama base URL:', e);\n\t\t\tresolve(null); // Indicate failure\n\t\t}\n\t});\n}\n\n/**\n * Get the current model configuration\n * @param {Object} [options] - Options for the operation\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with current model configuration\n */\nasync function getModelConfiguration(options = {}) {\n\tconst { mcpLog, projectRoot, session } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\tif (!projectRoot) {\n\t\tthrow new Error('Project root is required but not found.');\n\t}\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\tthrow new Error(CONFIG_MISSING_ERROR);\n\t}\n\n\ttry {\n\t\t// Get current settings - these should use the config from the found path automatically\n\t\tconst mainProvider = getMainProvider(projectRoot);\n\t\tconst mainModelId = getMainModelId(projectRoot);\n\t\tconst researchProvider = getResearchProvider(projectRoot);\n\t\tconst researchModelId = getResearchModelId(projectRoot);\n\t\tconst fallbackProvider = getFallbackProvider(projectRoot);\n\t\tconst fallbackModelId = getFallbackModelId(projectRoot);\n\n\t\t// Check API keys\n\t\tconst mainCliKeyOk = isApiKeySet(mainProvider, session, projectRoot);\n\t\tconst mainMcpKeyOk = getMcpApiKeyStatus(mainProvider, projectRoot);\n\t\tconst researchCliKeyOk = isApiKeySet(\n\t\t\tresearchProvider,\n\t\t\tsession,\n\t\t\tprojectRoot\n\t\t);\n\t\tconst researchMcpKeyOk = getMcpApiKeyStatus(researchProvider, projectRoot);\n\t\tconst fallbackCliKeyOk = fallbackProvider\n\t\t\t? isApiKeySet(fallbackProvider, session, projectRoot)\n\t\t\t: true;\n\t\tconst fallbackMcpKeyOk = fallbackProvider\n\t\t\t? getMcpApiKeyStatus(fallbackProvider, projectRoot)\n\t\t\t: true;\n\n\t\t// Get available models to find detailed info\n\t\tconst availableModels = getAvailableModels(projectRoot);\n\n\t\t// Find model details\n\t\tconst mainModelData = availableModels.find((m) => m.id === mainModelId);\n\t\tconst researchModelData = availableModels.find(\n\t\t\t(m) => m.id === researchModelId\n\t\t);\n\t\tconst fallbackModelData = fallbackModelId\n\t\t\t? availableModels.find((m) => m.id === fallbackModelId)\n\t\t\t: null;\n\n\t\t// Return structured configuration data\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tactiveModels: {\n\t\t\t\t\tmain: {\n\t\t\t\t\t\tprovider: mainProvider,\n\t\t\t\t\t\tmodelId: mainModelId,\n\t\t\t\t\t\tsweScore: mainModelData?.swe_score || null,\n\t\t\t\t\t\tcost: mainModelData?.cost_per_1m_tokens || null,\n\t\t\t\t\t\tkeyStatus: {\n\t\t\t\t\t\t\tcli: mainCliKeyOk,\n\t\t\t\t\t\t\tmcp: mainMcpKeyOk\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\tresearch: {\n\t\t\t\t\t\tprovider: researchProvider,\n\t\t\t\t\t\tmodelId: researchModelId,\n\t\t\t\t\t\tsweScore: researchModelData?.swe_score || null,\n\t\t\t\t\t\tcost: researchModelData?.cost_per_1m_tokens || null,\n\t\t\t\t\t\tkeyStatus: {\n\t\t\t\t\t\t\tcli: researchCliKeyOk,\n\t\t\t\t\t\t\tmcp: researchMcpKeyOk\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\tfallback: fallbackProvider\n\t\t\t\t\t\t? {\n\t\t\t\t\t\t\t\tprovider: fallbackProvider,\n\t\t\t\t\t\t\t\tmodelId: fallbackModelId,\n\t\t\t\t\t\t\t\tsweScore: fallbackModelData?.swe_score || null,\n\t\t\t\t\t\t\t\tcost: fallbackModelData?.cost_per_1m_tokens || null,\n\t\t\t\t\t\t\t\tkeyStatus: {\n\t\t\t\t\t\t\t\t\tcli: fallbackCliKeyOk,\n\t\t\t\t\t\t\t\t\tmcp: fallbackMcpKeyOk\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t: null\n\t\t\t\t},\n\t\t\t\tmessage: 'Successfully retrieved current model configuration'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error getting model configuration: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CONFIG_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\n/**\n * Get all available models not currently in use\n * @param {Object} [options] - Options for the operation\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with available models\n */\nasync function getAvailableModelsList(options = {}) {\n\tconst { mcpLog, projectRoot } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\tif (!projectRoot) {\n\t\tthrow new Error('Project root is required but not found.');\n\t}\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\tthrow new Error(CONFIG_MISSING_ERROR);\n\t}\n\n\ttry {\n\t\t// Get all available models\n\t\tconst allAvailableModels = getAvailableModels(projectRoot);\n\n\t\tif (!allAvailableModels || allAvailableModels.length === 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: true,\n\t\t\t\tdata: {\n\t\t\t\t\tmodels: [],\n\t\t\t\t\tmessage: 'No available models found'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Get currently used model IDs\n\t\tconst mainModelId = getMainModelId(projectRoot);\n\t\tconst researchModelId = getResearchModelId(projectRoot);\n\t\tconst fallbackModelId = getFallbackModelId(projectRoot);\n\n\t\t// Filter out placeholder models and active models\n\t\tconst activeIds = [mainModelId, researchModelId, fallbackModelId].filter(\n\t\t\tBoolean\n\t\t);\n\t\tconst otherAvailableModels = allAvailableModels.map((model) => ({\n\t\t\tprovider: model.provider || 'N/A',\n\t\t\tmodelId: model.id,\n\t\t\tsweScore: model.swe_score || null,\n\t\t\tcost: model.cost_per_1m_tokens || null,\n\t\t\tallowedRoles: model.allowed_roles || []\n\t\t}));\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tmodels: otherAvailableModels,\n\t\t\t\tmessage: `Successfully retrieved ${otherAvailableModels.length} available models`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error getting available models: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'MODELS_LIST_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\n/**\n * Update a specific model in the configuration\n * @param {string} role - The model role to update ('main', 'research', 'fallback')\n * @param {string} modelId - The model ID to set for the role\n * @param {Object} [options] - Options for the operation\n * @param {string} [options.providerHint] - Provider hint if already determined ('openrouter' or 'ollama')\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with result of update operation\n */\nasync function setModel(role, modelId, options = {}) {\n\tconst { mcpLog, projectRoot, providerHint } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\tif (!projectRoot) {\n\t\tthrow new Error('Project root is required but not found.');\n\t}\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\tthrow new Error(CONFIG_MISSING_ERROR);\n\t}\n\n\t// Validate role\n\tif (!['main', 'research', 'fallback'].includes(role)) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_ROLE',\n\t\t\t\tmessage: `Invalid role: ${role}. Must be one of: main, research, fallback.`\n\t\t\t}\n\t\t};\n\t}\n\n\t// Validate model ID\n\tif (typeof modelId !== 'string' || modelId.trim() === '') {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_MODEL_ID',\n\t\t\t\tmessage: `Invalid model ID: ${modelId}. Must be a non-empty string.`\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tconst availableModels = getAvailableModels(projectRoot);\n\t\tconst currentConfig = getConfig(projectRoot);\n\t\tlet determinedProvider = null; // Initialize provider\n\t\tlet warningMessage = null;\n\n\t\t// Find the model data in internal list initially to see if it exists at all\n\t\tlet modelData = availableModels.find((m) => m.id === modelId);\n\n\t\t// --- Revised Logic: Prioritize providerHint --- //\n\n\t\tif (providerHint) {\n\t\t\t// Hint provided (--ollama or --openrouter flag used)\n\t\t\tif (modelData && modelData.provider === providerHint) {\n\t\t\t\t// Found internally AND provider matches the hint\n\t\t\t\tdeterminedProvider = providerHint;\n\t\t\t\treport(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Model ${modelId} found internally with matching provider hint ${determinedProvider}.`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Either not found internally, OR found but under a DIFFERENT provider than hinted.\n\t\t\t\t// Proceed with custom logic based ONLY on the hint.\n\t\t\t\tif (providerHint === CUSTOM_PROVIDERS.OPENROUTER) {\n\t\t\t\t\t// Check OpenRouter ONLY because hint was openrouter\n\t\t\t\t\treport('info', `Checking OpenRouter for ${modelId} (as hinted)...`);\n\t\t\t\t\tconst openRouterModels = await fetchOpenRouterModels();\n\n\t\t\t\t\tif (\n\t\t\t\t\t\topenRouterModels &&\n\t\t\t\t\t\topenRouterModels.some((m) => m.id === modelId)\n\t\t\t\t\t) {\n\t\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.OPENROUTER;\n\n\t\t\t\t\t\t// Check if this is a free model (ends with :free)\n\t\t\t\t\t\tif (modelId.endsWith(':free')) {\n\t\t\t\t\t\t\twarningMessage = `Warning: OpenRouter free model '${modelId}' selected. Free models have significant limitations including lower context windows, reduced rate limits, and may not support advanced features like tool_use. Consider using the paid version '${modelId.replace(':free', '')}' for full functionality.`;\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\twarningMessage = `Warning: Custom OpenRouter model '${modelId}' set. This model is not officially validated by Taskmaster and may not function as expected.`;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Hinted as OpenRouter but not found in live check\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Model ID \"${modelId}\" not found in the live OpenRouter model list. Please verify the ID and ensure it's available on OpenRouter.`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.OLLAMA) {\n\t\t\t\t\t// Check Ollama ONLY because hint was ollama\n\t\t\t\t\treport('info', `Checking Ollama for ${modelId} (as hinted)...`);\n\n\t\t\t\t\t// Get the Ollama base URL from config\n\t\t\t\t\tconst ollamaBaseURL = getBaseUrlForRole(role, projectRoot);\n\t\t\t\t\tconst ollamaModels = await fetchOllamaModels(ollamaBaseURL);\n\n\t\t\t\t\tif (ollamaModels === null) {\n\t\t\t\t\t\t// Connection failed - server probably not running\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Unable to connect to Ollama server at ${ollamaBaseURL}. Please ensure Ollama is running and try again.`\n\t\t\t\t\t\t);\n\t\t\t\t\t} else if (ollamaModels.some((m) => m.model === modelId)) {\n\t\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.OLLAMA;\n\t\t\t\t\t\twarningMessage = `Warning: Custom Ollama model '${modelId}' set. Ensure your Ollama server is running and has pulled this model. Taskmaster cannot guarantee compatibility.`;\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Server is running but model not found\n\t\t\t\t\t\tconst tagsUrl = `${ollamaBaseURL}/tags`;\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Model ID \"${modelId}\" not found in the Ollama instance. Please verify the model is pulled and available. You can check available models with: curl ${tagsUrl}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.BEDROCK) {\n\t\t\t\t\t// Set provider without model validation since Bedrock models are managed by AWS\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.BEDROCK;\n\t\t\t\t\twarningMessage = `Warning: Custom Bedrock model '${modelId}' set. Please ensure the model ID is valid and accessible in your AWS account.`;\n\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.CLAUDE_CODE) {\n\t\t\t\t\t// Claude Code provider - check if model exists in our list\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.CLAUDE_CODE;\n\t\t\t\t\t// Re-find modelData specifically for claude-code provider\n\t\t\t\t\tconst claudeCodeModels = availableModels.filter(\n\t\t\t\t\t\t(m) => m.provider === 'claude-code'\n\t\t\t\t\t);\n\t\t\t\t\tconst claudeCodeModelData = claudeCodeModels.find(\n\t\t\t\t\t\t(m) => m.id === modelId\n\t\t\t\t\t);\n\t\t\t\t\tif (claudeCodeModelData) {\n\t\t\t\t\t\t// Update modelData to the found claude-code model\n\t\t\t\t\t\tmodelData = claudeCodeModelData;\n\t\t\t\t\t\treport('info', `Setting Claude Code model '${modelId}'.`);\n\t\t\t\t\t} else {\n\t\t\t\t\t\twarningMessage = `Warning: Claude Code model '${modelId}' not found in supported models. Setting without validation.`;\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t}\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.AZURE) {\n\t\t\t\t\t// Set provider without model validation since Azure models are managed by Azure\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.AZURE;\n\t\t\t\t\twarningMessage = `Warning: Custom Azure model '${modelId}' set. Please ensure the model deployment is valid and accessible in your Azure account.`;\n\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.VERTEX) {\n\t\t\t\t\t// Set provider without model validation since Vertex models are managed by Google Cloud\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.VERTEX;\n\t\t\t\t\twarningMessage = `Warning: Custom Vertex AI model '${modelId}' set. Please ensure the model is valid and accessible in your Google Cloud project.`;\n\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t} else if (providerHint === CUSTOM_PROVIDERS.GEMINI_CLI) {\n\t\t\t\t\t// Gemini CLI provider - check if model exists in our list\n\t\t\t\t\tdeterminedProvider = CUSTOM_PROVIDERS.GEMINI_CLI;\n\t\t\t\t\t// Re-find modelData specifically for gemini-cli provider\n\t\t\t\t\tconst geminiCliModels = availableModels.filter(\n\t\t\t\t\t\t(m) => m.provider === 'gemini-cli'\n\t\t\t\t\t);\n\t\t\t\t\tconst geminiCliModelData = geminiCliModels.find(\n\t\t\t\t\t\t(m) => m.id === modelId\n\t\t\t\t\t);\n\t\t\t\t\tif (geminiCliModelData) {\n\t\t\t\t\t\t// Update modelData to the found gemini-cli model\n\t\t\t\t\t\tmodelData = geminiCliModelData;\n\t\t\t\t\t\treport('info', `Setting Gemini CLI model '${modelId}'.`);\n\t\t\t\t\t} else {\n\t\t\t\t\t\twarningMessage = `Warning: Gemini CLI model '${modelId}' not found in supported models. Setting without validation.`;\n\t\t\t\t\t\treport('warn', warningMessage);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Invalid provider hint - should not happen with our constants\n\t\t\t\t\tthrow new Error(`Invalid provider hint received: ${providerHint}`);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// No hint provided (flags not used)\n\t\t\tif (modelData) {\n\t\t\t\t// Found internally, use the provider from the internal list\n\t\t\t\tdeterminedProvider = modelData.provider;\n\t\t\t\treport(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Model ${modelId} found internally with provider ${determinedProvider}.`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\t// Model not found and no provider hint was given\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'MODEL_NOT_FOUND_NO_HINT',\n\t\t\t\t\t\tmessage: `Model ID \"${modelId}\" not found in Taskmaster's supported models. If this is a custom model, please specify the provider using --openrouter, --ollama, --bedrock, --azure, or --vertex.`\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t}\n\n\t\t// --- End of Revised Logic --- //\n\n\t\t// At this point, we should have a determinedProvider if the model is valid (internally or custom)\n\t\tif (!determinedProvider) {\n\t\t\t// This case acts as a safeguard\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'PROVIDER_UNDETERMINED',\n\t\t\t\t\tmessage: `Could not determine the provider for model ID \"${modelId}\".`\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\t// Update configuration\n\t\tcurrentConfig.models[role] = {\n\t\t\t...currentConfig.models[role], // Keep existing params like temperature\n\t\t\tprovider: determinedProvider,\n\t\t\tmodelId: modelId\n\t\t};\n\n\t\t// If model data is available, update maxTokens from supported-models.json\n\t\tif (modelData && modelData.max_tokens) {\n\t\t\tcurrentConfig.models[role].maxTokens = modelData.max_tokens;\n\t\t}\n\n\t\t// Write updated configuration\n\t\tconst writeResult = writeConfig(currentConfig, projectRoot);\n\t\tif (!writeResult) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'CONFIG_WRITE_ERROR',\n\t\t\t\t\tmessage: 'Error writing updated configuration to configuration file'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst successMessage = `Successfully set ${role} model to ${modelId} (Provider: ${determinedProvider})`;\n\t\treport('info', successMessage);\n\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\trole,\n\t\t\t\tprovider: determinedProvider,\n\t\t\t\tmodelId,\n\t\t\t\tmessage: successMessage,\n\t\t\t\twarning: warningMessage // Include warning in the response data\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error setting ${role} model: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'SET_MODEL_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\n/**\n * Get API key status for all known providers.\n * @param {Object} [options] - Options for the operation\n * @param {Object} [options.session] - Session object containing environment variables (for MCP)\n * @param {Function} [options.mcpLog] - MCP logger object (for MCP)\n * @param {string} [options.projectRoot] - Project root directory\n * @returns {Object} RESTful response with API key status report\n */\nasync function getApiKeyStatusReport(options = {}) {\n\tconst { mcpLog, projectRoot, session } = options;\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\ttry {\n\t\tconst providers = getAllProviders();\n\t\tconst providersToCheck = providers.filter(\n\t\t\t(p) => p.toLowerCase() !== 'ollama'\n\t\t); // Ollama is not a provider, it's a service, doesn't need an api key usually\n\t\tconst statusReport = providersToCheck.map((provider) => {\n\t\t\t// Use provided projectRoot for MCP status check\n\t\t\tconst cliOk = isApiKeySet(provider, session, projectRoot); // Pass session and projectRoot for CLI check\n\t\t\tconst mcpOk = getMcpApiKeyStatus(provider, projectRoot);\n\t\t\treturn {\n\t\t\t\tprovider,\n\t\t\t\tcli: cliOk,\n\t\t\t\tmcp: mcpOk\n\t\t\t};\n\t\t});\n\n\t\treport('info', 'Successfully generated API key status report.');\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\treport: statusReport,\n\t\t\t\tmessage: 'API key status report generated.'\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error generating API key status report: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'API_KEY_STATUS_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\nexport {\n\tgetModelConfiguration,\n\tgetAvailableModelsList,\n\tsetModel,\n\tgetApiKeyStatusReport\n};\n"], ["/claude-task-master/scripts/modules/task-manager/update-single-task-status.js", "import chalk from 'chalk';\n\nimport { log } from '../utils.js';\nimport { isValidTaskStatus } from '../../../src/constants/task-status.js';\n\n/**\n * Update the status of a single task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} taskIdInput - Task ID to update\n * @param {string} newStatus - New status\n * @param {Object} data - Tasks data\n * @param {boolean} showUi - Whether to show UI elements\n */\nasync function updateSingleTaskStatus(\n\ttasksPath,\n\ttaskIdInput,\n\tnewStatus,\n\tdata,\n\tshowUi = true\n) {\n\tif (!isValidTaskStatus(newStatus)) {\n\t\tthrow new Error(\n\t\t\t`Error: Invalid status value: ${newStatus}. Use one of: ${TASK_STATUS_OPTIONS.join(', ')}`\n\t\t);\n\t}\n\n\t// Check if it's a subtask (e.g., \"1.2\")\n\tif (taskIdInput.includes('.')) {\n\t\tconst [parentId, subtaskId] = taskIdInput\n\t\t\t.split('.')\n\t\t\t.map((id) => parseInt(id, 10));\n\n\t\t// Find the parent task\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(`Parent task ${parentId} not found`);\n\t\t}\n\n\t\t// Find the subtask\n\t\tif (!parentTask.subtasks) {\n\t\t\tthrow new Error(`Parent task ${parentId} has no subtasks`);\n\t\t}\n\n\t\tconst subtask = parentTask.subtasks.find((st) => st.id === subtaskId);\n\t\tif (!subtask) {\n\t\t\tthrow new Error(\n\t\t\t\t`Subtask ${subtaskId} not found in parent task ${parentId}`\n\t\t\t);\n\t\t}\n\n\t\t// Update the subtask status\n\t\tconst oldStatus = subtask.status || 'pending';\n\t\tsubtask.status = newStatus;\n\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Updated subtask ${parentId}.${subtaskId} status from '${oldStatus}' to '${newStatus}'`\n\t\t);\n\n\t\t// Check if all subtasks are done (if setting to 'done')\n\t\tif (\n\t\t\tnewStatus.toLowerCase() === 'done' ||\n\t\t\tnewStatus.toLowerCase() === 'completed'\n\t\t) {\n\t\t\tconst allSubtasksDone = parentTask.subtasks.every(\n\t\t\t\t(st) => st.status === 'done' || st.status === 'completed'\n\t\t\t);\n\n\t\t\t// Suggest updating parent task if all subtasks are done\n\t\t\tif (\n\t\t\t\tallSubtasksDone &&\n\t\t\t\tparentTask.status !== 'done' &&\n\t\t\t\tparentTask.status !== 'completed'\n\t\t\t) {\n\t\t\t\t// Only show suggestion in CLI mode\n\t\t\t\tif (showUi) {\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`All subtasks of parent task ${parentId} are now marked as done.`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t\tconsole.log(\n\t\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t\t`Consider updating the parent task status with: task-master set-status --id=${parentId} --status=done`\n\t\t\t\t\t\t)\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Handle regular task\n\t\tconst taskId = parseInt(taskIdInput, 10);\n\t\tconst task = data.tasks.find((t) => t.id === taskId);\n\n\t\tif (!task) {\n\t\t\tthrow new Error(`Task ${taskId} not found`);\n\t\t}\n\n\t\t// Update the task status\n\t\tconst oldStatus = task.status || 'pending';\n\t\ttask.status = newStatus;\n\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Updated task ${taskId} status from '${oldStatus}' to '${newStatus}'`\n\t\t);\n\n\t\t// If marking as done, also mark all subtasks as done\n\t\tif (\n\t\t\t(newStatus.toLowerCase() === 'done' ||\n\t\t\t\tnewStatus.toLowerCase() === 'completed') &&\n\t\t\ttask.subtasks &&\n\t\t\ttask.subtasks.length > 0\n\t\t) {\n\t\t\tconst pendingSubtasks = task.subtasks.filter(\n\t\t\t\t(st) => st.status !== 'done' && st.status !== 'completed'\n\t\t\t);\n\n\t\t\tif (pendingSubtasks.length > 0) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Also marking ${pendingSubtasks.length} subtasks as '${newStatus}'`\n\t\t\t\t);\n\n\t\t\t\tpendingSubtasks.forEach((subtask) => {\n\t\t\t\t\tsubtask.status = newStatus;\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport default updateSingleTaskStatus;\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/complexity-report.js", "/**\n * complexity-report.js\n * Direct function implementation for displaying complexity analysis report\n */\n\nimport {\n\treadComplexityReport,\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\n\n/**\n * Direct function wrapper for displaying the complexity report with error handling and caching.\n *\n * @param {Object} args - Command arguments containing reportPath.\n * @param {string} args.reportPath - Explicit path to the complexity report file.\n * @param {Object} log - Logger object\n * @returns {Promise<Object>} - Result object with success status and data/error information\n */\nexport async function complexityReportDirect(args, log) {\n\t// Destructure expected args\n\tconst { reportPath } = args;\n\ttry {\n\t\tlog.info(`Getting complexity report with args: ${JSON.stringify(args)}`);\n\n\t\t// Check if reportPath was provided\n\t\tif (!reportPath) {\n\t\t\tlog.error('complexityReportDirect called without reportPath');\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: { code: 'MISSING_ARGUMENT', message: 'reportPath is required' }\n\t\t\t};\n\t\t}\n\n\t\t// Use the provided report path\n\t\tlog.info(`Looking for complexity report at: ${reportPath}`);\n\n\t\t// Generate cache key based on report path\n\t\tconst cacheKey = `complexityReport:${reportPath}`;\n\n\t\t// Define the core action function to read the report\n\t\tconst coreActionFn = async () => {\n\t\t\ttry {\n\t\t\t\t// Enable silent mode to prevent console logs from interfering with JSON response\n\t\t\t\tenableSilentMode();\n\n\t\t\t\tconst report = readComplexityReport(reportPath);\n\n\t\t\t\t// Restore normal logging\n\t\t\t\tdisableSilentMode();\n\n\t\t\t\tif (!report) {\n\t\t\t\t\tlog.warn(`No complexity report found at ${reportPath}`);\n\t\t\t\t\treturn {\n\t\t\t\t\t\tsuccess: false,\n\t\t\t\t\t\terror: {\n\t\t\t\t\t\t\tcode: 'FILE_NOT_FOUND_ERROR',\n\t\t\t\t\t\t\tmessage: `No complexity report found at ${reportPath}. Run 'analyze-complexity' first.`\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\t}\n\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: true,\n\t\t\t\t\tdata: {\n\t\t\t\t\t\treport,\n\t\t\t\t\t\treportPath\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t} catch (error) {\n\t\t\t\t// Make sure to restore normal logging even if there's an error\n\t\t\t\tdisableSilentMode();\n\n\t\t\t\tlog.error(`Error reading complexity report: ${error.message}`);\n\t\t\t\treturn {\n\t\t\t\t\tsuccess: false,\n\t\t\t\t\terror: {\n\t\t\t\t\t\tcode: 'READ_ERROR',\n\t\t\t\t\t\tmessage: error.message\n\t\t\t\t\t}\n\t\t\t\t};\n\t\t\t}\n\t\t};\n\n\t\t// Use the caching utility\n\t\ttry {\n\t\t\tconst result = await coreActionFn();\n\t\t\tlog.info('complexityReportDirect completed');\n\t\t\treturn result;\n\t\t} catch (error) {\n\t\t\t// Ensure silent mode is disabled\n\t\t\tdisableSilentMode();\n\n\t\t\tlog.error(`Unexpected error during complexityReport: ${error.message}`);\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\t\tmessage: error.message\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t} catch (error) {\n\t\t// Ensure silent mode is disabled if an outer error occurs\n\t\tdisableSilentMode();\n\n\t\tlog.error(`Error in complexityReportDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'UNEXPECTED_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/list-tags.js", "/**\n * tools/list-tags.js\n * Tool to list all available tags\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { listTagsDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the listTags tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerListTagsTool(server) {\n\tserver.addTool({\n\t\tname: 'list_tags',\n\t\tdescription: 'List all available tags with task counts and metadata',\n\t\tparameters: z.object({\n\t\t\tshowMetadata: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Whether to include metadata in the output (default: false)'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting list-tags with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await listTagsDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tshowMetadata: args.showMetadata,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error listing tags',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in list-tags tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/task-manager/find-next-task.js", "import { log } from '../utils.js';\nimport { addComplexityToTask } from '../utils.js';\n\n/**\n * Return the next work item:\n * • Prefer an eligible SUBTASK that belongs to any parent task\n * whose own status is `in-progress`.\n * • If no such subtask exists, fall back to the best top-level task\n * (previous behaviour).\n *\n * The function still exports the same name (`findNextTask`) so callers\n * don't need to change. It now always returns an object with\n * ─ id → number (task) or \"parentId.subId\" (subtask)\n * ─ title → string\n * ─ status → string\n * ─ priority → string (\"high\" | \"medium\" | \"low\")\n * ─ dependencies → array (all IDs expressed in the same dotted form)\n * ─ parentId → number (present only when it's a subtask)\n *\n * @param {Object[]} tasks – full array of top-level tasks, each may contain .subtasks[]\n * @param {Object} [complexityReport=null] - Optional complexity report object\n * @returns {Object|null} – next work item or null if nothing is eligible\n */\nfunction findNextTask(tasks, complexityReport = null) {\n\t// ---------- helpers ----------------------------------------------------\n\tconst priorityValues = { high: 3, medium: 2, low: 1 };\n\n\tconst toFullSubId = (parentId, maybeDotId) => {\n\t\t// \"12.3\" -> \"12.3\"\n\t\t// 4 -> \"12.4\" (numeric / short form)\n\t\tif (typeof maybeDotId === 'string' && maybeDotId.includes('.')) {\n\t\t\treturn maybeDotId;\n\t\t}\n\t\treturn `${parentId}.${maybeDotId}`;\n\t};\n\n\t// ---------- build completed-ID set (tasks *and* subtasks) --------------\n\tconst completedIds = new Set();\n\ttasks.forEach((t) => {\n\t\tif (t.status === 'done' || t.status === 'completed') {\n\t\t\tcompletedIds.add(String(t.id));\n\t\t}\n\t\tif (Array.isArray(t.subtasks)) {\n\t\t\tt.subtasks.forEach((st) => {\n\t\t\t\tif (st.status === 'done' || st.status === 'completed') {\n\t\t\t\t\tcompletedIds.add(`${t.id}.${st.id}`);\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t});\n\n\t// ---------- 1) look for eligible subtasks ------------------------------\n\tconst candidateSubtasks = [];\n\n\ttasks\n\t\t.filter((t) => t.status === 'in-progress' && Array.isArray(t.subtasks))\n\t\t.forEach((parent) => {\n\t\t\tparent.subtasks.forEach((st) => {\n\t\t\t\tconst stStatus = (st.status || 'pending').toLowerCase();\n\t\t\t\tif (stStatus !== 'pending' && stStatus !== 'in-progress') return;\n\n\t\t\t\tconst fullDeps =\n\t\t\t\t\tst.dependencies?.map((d) => toFullSubId(parent.id, d)) ?? [];\n\n\t\t\t\tconst depsSatisfied =\n\t\t\t\t\tfullDeps.length === 0 ||\n\t\t\t\t\tfullDeps.every((depId) => completedIds.has(String(depId)));\n\n\t\t\t\tif (depsSatisfied) {\n\t\t\t\t\tcandidateSubtasks.push({\n\t\t\t\t\t\tid: `${parent.id}.${st.id}`,\n\t\t\t\t\t\ttitle: st.title || `Subtask ${st.id}`,\n\t\t\t\t\t\tstatus: st.status || 'pending',\n\t\t\t\t\t\tpriority: st.priority || parent.priority || 'medium',\n\t\t\t\t\t\tdependencies: fullDeps,\n\t\t\t\t\t\tparentId: parent.id\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\n\tif (candidateSubtasks.length > 0) {\n\t\t// sort by priority → dep-count → parent-id → sub-id\n\t\tcandidateSubtasks.sort((a, b) => {\n\t\t\tconst pa = priorityValues[a.priority] ?? 2;\n\t\t\tconst pb = priorityValues[b.priority] ?? 2;\n\t\t\tif (pb !== pa) return pb - pa;\n\n\t\t\tif (a.dependencies.length !== b.dependencies.length)\n\t\t\t\treturn a.dependencies.length - b.dependencies.length;\n\n\t\t\t// compare parent then sub-id numerically\n\t\t\tconst [aPar, aSub] = a.id.split('.').map(Number);\n\t\t\tconst [bPar, bSub] = b.id.split('.').map(Number);\n\t\t\tif (aPar !== bPar) return aPar - bPar;\n\t\t\treturn aSub - bSub;\n\t\t});\n\t\tconst nextTask = candidateSubtasks[0];\n\n\t\t// Add complexity to the task before returning\n\t\tif (nextTask && complexityReport) {\n\t\t\taddComplexityToTask(nextTask, complexityReport);\n\t\t}\n\n\t\treturn nextTask;\n\t}\n\n\t// ---------- 2) fall back to top-level tasks (original logic) ------------\n\tconst eligibleTasks = tasks.filter((task) => {\n\t\tconst status = (task.status || 'pending').toLowerCase();\n\t\tif (status !== 'pending' && status !== 'in-progress') return false;\n\t\tconst deps = task.dependencies ?? [];\n\t\treturn deps.every((depId) => completedIds.has(String(depId)));\n\t});\n\n\tif (eligibleTasks.length === 0) return null;\n\n\tconst nextTask = eligibleTasks.sort((a, b) => {\n\t\tconst pa = priorityValues[a.priority || 'medium'] ?? 2;\n\t\tconst pb = priorityValues[b.priority || 'medium'] ?? 2;\n\t\tif (pb !== pa) return pb - pa;\n\n\t\tconst da = (a.dependencies ?? []).length;\n\t\tconst db = (b.dependencies ?? []).length;\n\t\tif (da !== db) return da - db;\n\n\t\treturn a.id - b.id;\n\t})[0];\n\n\t// Add complexity to the task before returning\n\tif (nextTask && complexityReport) {\n\t\taddComplexityToTask(nextTask, complexityReport);\n\t}\n\n\treturn nextTask;\n}\n\nexport default findNextTask;\n"], ["/claude-task-master/scripts/modules/task-manager/remove-subtask.js", "import path from 'path';\nimport { log, readJSON, writeJSON } from '../utils.js';\nimport generateTaskFiles from './generate-task-files.js';\n\n/**\n * Remove a subtask from its parent task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {string} subtaskId - ID of the subtask to remove in format \"parentId.subtaskId\"\n * @param {boolean} convertToTask - Whether to convert the subtask to a standalone task\n * @param {boolean} generateFiles - Whether to regenerate task files after removing the subtask\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} [context.projectRoot] - Project root path\n * @param {string} [context.tag] - Tag for the task\n * @returns {Object|null} The removed subtask if convertToTask is true, otherwise null\n */\nasync function removeSubtask(\n\ttasksPath,\n\tsubtaskId,\n\tconvertToTask = false,\n\tgenerateFiles = false,\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\ttry {\n\t\tlog('info', `Removing subtask ${subtaskId}...`);\n\n\t\t// Read the existing tasks with proper context\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`Invalid or missing tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Parse the subtask ID (format: \"parentId.subtaskId\")\n\t\tif (!subtaskId.includes('.')) {\n\t\t\tthrow new Error(\n\t\t\t\t`Invalid subtask ID format: ${subtaskId}. Expected format: \"parentId.subtaskId\"`\n\t\t\t);\n\t\t}\n\n\t\tconst [parentIdStr, subtaskIdStr] = subtaskId.split('.');\n\t\tconst parentId = parseInt(parentIdStr, 10);\n\t\tconst subtaskIdNum = parseInt(subtaskIdStr, 10);\n\n\t\t// Find the parent task\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentId);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(`Parent task with ID ${parentId} not found`);\n\t\t}\n\n\t\t// Check if parent has subtasks\n\t\tif (!parentTask.subtasks || parentTask.subtasks.length === 0) {\n\t\t\tthrow new Error(`Parent task ${parentId} has no subtasks`);\n\t\t}\n\n\t\t// Find the subtask to remove\n\t\tconst subtaskIndex = parentTask.subtasks.findIndex(\n\t\t\t(st) => st.id === subtaskIdNum\n\t\t);\n\t\tif (subtaskIndex === -1) {\n\t\t\tthrow new Error(`Subtask ${subtaskId} not found`);\n\t\t}\n\n\t\t// Get a copy of the subtask before removing it\n\t\tconst removedSubtask = { ...parentTask.subtasks[subtaskIndex] };\n\n\t\t// Remove the subtask from the parent\n\t\tparentTask.subtasks.splice(subtaskIndex, 1);\n\n\t\t// If parent has no more subtasks, remove the subtasks array\n\t\tif (parentTask.subtasks.length === 0) {\n\t\t\tparentTask.subtasks = undefined;\n\t\t}\n\n\t\tlet convertedTask = null;\n\n\t\t// Convert the subtask to a standalone task if requested\n\t\tif (convertToTask) {\n\t\t\tlog('info', `Converting subtask ${subtaskId} to a standalone task...`);\n\n\t\t\t// Find the highest task ID to determine the next ID\n\t\t\tconst highestId = Math.max(...data.tasks.map((t) => t.id));\n\t\t\tconst newTaskId = highestId + 1;\n\n\t\t\t// Create the new task from the subtask\n\t\t\tconvertedTask = {\n\t\t\t\tid: newTaskId,\n\t\t\t\ttitle: removedSubtask.title,\n\t\t\t\tdescription: removedSubtask.description || '',\n\t\t\t\tdetails: removedSubtask.details || '',\n\t\t\t\tstatus: removedSubtask.status || 'pending',\n\t\t\t\tdependencies: removedSubtask.dependencies || [],\n\t\t\t\tpriority: parentTask.priority || 'medium' // Inherit priority from parent\n\t\t\t};\n\n\t\t\t// Add the parent task as a dependency if not already present\n\t\t\tif (!convertedTask.dependencies.includes(parentId)) {\n\t\t\t\tconvertedTask.dependencies.push(parentId);\n\t\t\t}\n\n\t\t\t// Add the converted task to the tasks array\n\t\t\tdata.tasks.push(convertedTask);\n\n\t\t\tlog('info', `Created new task ${newTaskId} from subtask ${subtaskId}`);\n\t\t} else {\n\t\t\tlog('info', `Subtask ${subtaskId} deleted`);\n\t\t}\n\n\t\t// Write the updated tasks back to the file with proper context\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Generate task files if requested\n\t\tif (generateFiles) {\n\t\t\tlog('info', 'Regenerating task files...');\n\t\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), context);\n\t\t}\n\n\t\treturn convertedTask;\n\t} catch (error) {\n\t\tlog('error', `Error removing subtask: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\nexport default removeSubtask;\n"], ["/claude-task-master/scripts/modules/sync-readme.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport { log, findProjectRoot } from './utils.js';\nimport { getProjectName } from './config-manager.js';\nimport listTasks from './task-manager/list-tasks.js';\n\n/**\n * Creates a basic README structure if one doesn't exist\n * @param {string} projectName - Name of the project\n * @returns {string} - Basic README content\n */\nfunction createBasicReadme(projectName) {\n\treturn `# ${projectName}\n\nThis project is managed using Task Master.\n\n`;\n}\n\n/**\n * Create UTM tracking URL for task-master.dev\n * @param {string} projectRoot - The project root path\n * @returns {string} - UTM tracked URL\n */\nfunction createTaskMasterUrl(projectRoot) {\n\t// Get the actual folder name from the project root path\n\tconst folderName = path.basename(projectRoot);\n\n\t// Clean folder name for UTM (replace spaces/special chars with hyphens)\n\tconst cleanFolderName = folderName\n\t\t.toLowerCase()\n\t\t.replace(/[^a-z0-9]/g, '-')\n\t\t.replace(/-+/g, '-')\n\t\t.replace(/^-|-$/g, '');\n\n\tconst utmParams = new URLSearchParams({\n\t\tutm_source: 'github-readme',\n\t\tutm_medium: 'readme-export',\n\t\tutm_campaign: cleanFolderName || 'task-sync',\n\t\tutm_content: 'task-export-link'\n\t});\n\n\treturn `https://task-master.dev?${utmParams.toString()}`;\n}\n\n/**\n * Create the start marker with metadata\n * @param {Object} options - Export options\n * @returns {string} - Formatted start marker\n */\nfunction createStartMarker(options) {\n\tconst { timestamp, withSubtasks, status, projectRoot } = options;\n\n\t// Format status filter text\n\tconst statusText = status\n\t\t? `Status filter: ${status}`\n\t\t: 'Status filter: none';\n\tconst subtasksText = withSubtasks ? 'with subtasks' : 'without subtasks';\n\n\t// Create the export info content\n\tconst exportInfo =\n\t\t`🎯 **Taskmaster Export** - ${timestamp}\\n` +\n\t\t`📋 Export: ${subtasksText} • ${statusText}\\n` +\n\t\t`🔗 Powered by [Task Master](${createTaskMasterUrl(projectRoot)})`;\n\n\t// Create a markdown box using code blocks and emojis to mimic our UI style\n\tconst boxContent =\n\t\t`<!-- TASKMASTER_EXPORT_START -->\\n` +\n\t\t`> ${exportInfo.split('\\n').join('\\n> ')}\\n\\n`;\n\n\treturn boxContent;\n}\n\n/**\n * Create the end marker\n * @returns {string} - Formatted end marker\n */\nfunction createEndMarker() {\n\treturn (\n\t\t`\\n> 📋 **End of Taskmaster Export** - Tasks are synced from your project using the \\`sync-readme\\` command.\\n` +\n\t\t`<!-- TASKMASTER_EXPORT_END -->\\n`\n\t);\n}\n\n/**\n * Syncs the current task list to README.md at the project root\n * @param {string} projectRoot - Path to the project root directory\n * @param {Object} options - Options for syncing\n * @param {boolean} options.withSubtasks - Include subtasks in the output (default: false)\n * @param {string} options.status - Filter by status (e.g., 'pending', 'done')\n * @param {string} options.tasksPath - Custom path to tasks.json\n * @returns {boolean} - True if sync was successful, false otherwise\n * TODO: Add tag support - this is not currently supported how we want to handle this - Parthy\n */\nexport async function syncTasksToReadme(projectRoot = null, options = {}) {\n\ttry {\n\t\tconst actualProjectRoot = projectRoot || findProjectRoot() || '.';\n\t\tconst { withSubtasks = false, status, tasksPath, tag } = options;\n\n\t\t// Get current tasks using the list-tasks functionality with markdown-readme format\n\t\tconst tasksOutput = await listTasks(\n\t\t\ttasksPath ||\n\t\t\t\tpath.join(actualProjectRoot, '.taskmaster', 'tasks', 'tasks.json'),\n\t\t\tstatus,\n\t\t\tnull,\n\t\t\twithSubtasks,\n\t\t\t'markdown-readme',\n\t\t\t{ projectRoot, tag }\n\t\t);\n\n\t\tif (!tasksOutput) {\n\t\t\tconsole.log(chalk.red('❌ Failed to generate task output'));\n\t\t\treturn false;\n\t\t}\n\n\t\t// Generate timestamp and metadata\n\t\tconst timestamp =\n\t\t\tnew Date().toISOString().replace('T', ' ').substring(0, 19) + ' UTC';\n\t\tconst projectName = getProjectName(actualProjectRoot);\n\n\t\t// Create the export markers with metadata\n\t\tconst startMarker = createStartMarker({\n\t\t\ttimestamp,\n\t\t\twithSubtasks,\n\t\t\tstatus,\n\t\t\tprojectRoot: actualProjectRoot\n\t\t});\n\n\t\tconst endMarker = createEndMarker();\n\n\t\t// Create the complete task section\n\t\tconst taskSection = startMarker + tasksOutput + endMarker;\n\n\t\t// Read current README content\n\t\tconst readmePath = path.join(actualProjectRoot, 'README.md');\n\t\tlet readmeContent = '';\n\t\ttry {\n\t\t\treadmeContent = fs.readFileSync(readmePath, 'utf8');\n\t\t} catch (err) {\n\t\t\tif (err.code === 'ENOENT') {\n\t\t\t\t// Create basic README if it doesn't exist\n\t\t\t\treadmeContent = createBasicReadme(projectName);\n\t\t\t} else {\n\t\t\t\tthrow err;\n\t\t\t}\n\t\t}\n\n\t\t// Check if export markers exist and replace content between them\n\t\tconst startComment = '<!-- TASKMASTER_EXPORT_START -->';\n\t\tconst endComment = '<!-- TASKMASTER_EXPORT_END -->';\n\n\t\tlet updatedContent;\n\t\tconst startIndex = readmeContent.indexOf(startComment);\n\t\tconst endIndex = readmeContent.indexOf(endComment);\n\n\t\tif (startIndex !== -1 && endIndex !== -1) {\n\t\t\t// Replace existing task section\n\t\t\tconst beforeTasks = readmeContent.substring(0, startIndex);\n\t\t\tconst afterTasks = readmeContent.substring(endIndex + endComment.length);\n\t\t\tupdatedContent = beforeTasks + taskSection + afterTasks;\n\t\t} else {\n\t\t\t// Append to end of README\n\t\t\tupdatedContent = readmeContent + '\\n' + taskSection;\n\t\t}\n\n\t\t// Write updated content to README\n\t\tfs.writeFileSync(readmePath, updatedContent, 'utf8');\n\n\t\tconsole.log(chalk.green('✅ Successfully synced tasks to README.md'));\n\t\tconsole.log(\n\t\t\tchalk.cyan(\n\t\t\t\t`📋 Export details: ${withSubtasks ? 'with' : 'without'} subtasks${status ? `, status: ${status}` : ''}`\n\t\t\t)\n\t\t);\n\t\tconsole.log(chalk.gray(`📍 Location: ${readmePath}`));\n\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.log(chalk.red('❌ Failed to sync tasks to README:'), error.message);\n\t\tlog('error', `README sync error: ${error.message}`);\n\t\treturn false;\n\t}\n}\n\nexport default syncTasksToReadme;\n"], ["/claude-task-master/mcp-server/src/tools/set-task-status.js", "/**\n * tools/setTaskStatus.js\n * Tool to set the status of a task\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport {\n\tsetTaskStatusDirect,\n\tnextTaskDirect\n} from '../core/task-master-core.js';\nimport {\n\tfindTasksPath,\n\tfindComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { TASK_STATUS_OPTIONS } from '../../../src/constants/task-status.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the setTaskStatus tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerSetTaskStatusTool(server) {\n\tserver.addTool({\n\t\tname: 'set_task_status',\n\t\tdescription: 'Set the status of one or more tasks or subtasks.',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Task ID or subtask ID (e.g., '15', '15.2'). Can be comma-separated to update multiple tasks/subtasks at once.\"\n\t\t\t\t),\n\t\t\tstatus: z\n\t\t\t\t.enum(TASK_STATUS_OPTIONS)\n\t\t\t\t.describe(\n\t\t\t\t\t\"New status to set (e.g., 'pending', 'done', 'in-progress', 'review', 'deferred', 'cancelled'.\"\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Optional tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Setting status of task(s) ${args.id} to: ${args.status} ${\n\t\t\t\t\t\targs.tag ? `in tag: ${args.tag}` : 'in current tag'\n\t\t\t\t\t}`\n\t\t\t\t);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = findComplexityReportPath(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\t\tcomplexityReport: args.complexityReport,\n\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t}\n\n\t\t\t\tconst result = await setTaskStatusDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tstatus: args.status,\n\t\t\t\t\t\tcomplexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(\n\t\t\t\t\t\t`Successfully updated status for task(s) ${args.id} to \"${args.status}\": ${result.data.message}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to update task status: ${result.error?.message || 'Unknown error'}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error setting task status',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in setTaskStatus tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Error setting task status: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/ai-providers/gemini-cli.js", "/**\n * src/ai-providers/gemini-cli.js\n *\n * Implementation for interacting with Gemini models via Gemini CLI\n * using the ai-sdk-provider-gemini-cli package.\n */\n\nimport { generateObject, generateText, streamText } from 'ai';\nimport { parse } from 'jsonc-parser';\nimport { BaseAIProvider } from './base-provider.js';\nimport { log } from '../../scripts/modules/utils.js';\n\nlet createGeminiProvider;\n\nasync function loadGeminiCliModule() {\n\tif (!createGeminiProvider) {\n\t\ttry {\n\t\t\tconst mod = await import('ai-sdk-provider-gemini-cli');\n\t\t\tcreateGeminiProvider = mod.createGeminiProvider;\n\t\t} catch (err) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Gemini CLI SDK is not installed. Please install 'ai-sdk-provider-gemini-cli' to use the gemini-cli provider.\"\n\t\t\t);\n\t\t}\n\t}\n}\n\nexport class GeminiCliProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Gemini CLI';\n\t}\n\n\t/**\n\t * Override validateAuth to handle Gemini CLI authentication options\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Gemini CLI is designed to use pre-configured OAuth authentication\n\t\t// Users choose gemini-cli specifically to leverage their existing\n\t\t// gemini auth login credentials, not to use API keys.\n\t\t// We support API keys for compatibility, but the expected usage\n\t\t// is through CLI authentication (no API key required).\n\t\t// No validation needed - the SDK will handle auth internally\n\t}\n\n\t/**\n\t * Creates and returns a Gemini CLI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.apiKey] - Optional Gemini API key (rarely used with gemini-cli)\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Promise<Function>} Gemini CLI client function\n\t * @throws {Error} If initialization fails\n\t */\n\tasync getClient(params) {\n\t\ttry {\n\t\t\t// Load the Gemini CLI module dynamically\n\t\t\tawait loadGeminiCliModule();\n\t\t\t// Primary use case: Use existing gemini CLI authentication\n\t\t\t// Secondary use case: Direct API key (for compatibility)\n\t\t\tlet authOptions = {};\n\n\t\t\tif (params.apiKey && params.apiKey !== 'gemini-cli-no-key-required') {\n\t\t\t\t// API key provided - use it for compatibility\n\t\t\t\tauthOptions = {\n\t\t\t\t\tauthType: 'api-key',\n\t\t\t\t\tapiKey: params.apiKey\n\t\t\t\t};\n\t\t\t} else {\n\t\t\t\t// Expected case: Use gemini CLI authentication via OAuth\n\t\t\t\tauthOptions = {\n\t\t\t\t\tauthType: 'oauth-personal'\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Add baseURL if provided (for custom endpoints)\n\t\t\tif (params.baseURL) {\n\t\t\t\tauthOptions.baseURL = params.baseURL;\n\t\t\t}\n\n\t\t\t// Create and return the provider\n\t\t\treturn createGeminiProvider(authOptions);\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\t/**\n\t * Extracts system messages from the messages array and returns them separately.\n\t * This is needed because ai-sdk-provider-gemini-cli expects system prompts as a separate parameter.\n\t * @param {Array} messages - Array of message objects\n\t * @param {Object} options - Options for system prompt enhancement\n\t * @param {boolean} options.enforceJsonOutput - Whether to add JSON enforcement to system prompt\n\t * @returns {Object} - {systemPrompt: string|undefined, messages: Array}\n\t */\n\t_extractSystemMessage(messages, options = {}) {\n\t\tif (!messages || !Array.isArray(messages)) {\n\t\t\treturn { systemPrompt: undefined, messages: messages || [] };\n\t\t}\n\n\t\tconst systemMessages = messages.filter((msg) => msg.role === 'system');\n\t\tconst nonSystemMessages = messages.filter((msg) => msg.role !== 'system');\n\n\t\t// Combine multiple system messages if present\n\t\tlet systemPrompt =\n\t\t\tsystemMessages.length > 0\n\t\t\t\t? systemMessages.map((msg) => msg.content).join('\\n\\n')\n\t\t\t\t: undefined;\n\n\t\t// Add Gemini CLI specific JSON enforcement if requested\n\t\tif (options.enforceJsonOutput) {\n\t\t\tconst jsonEnforcement = this._getJsonEnforcementPrompt();\n\t\t\tsystemPrompt = systemPrompt\n\t\t\t\t? `${systemPrompt}\\n\\n${jsonEnforcement}`\n\t\t\t\t: jsonEnforcement;\n\t\t}\n\n\t\treturn { systemPrompt, messages: nonSystemMessages };\n\t}\n\n\t/**\n\t * Gets a Gemini CLI specific system prompt to enforce strict JSON output\n\t * @returns {string} JSON enforcement system prompt\n\t */\n\t_getJsonEnforcementPrompt() {\n\t\treturn `CRITICAL: You MUST respond with ONLY valid JSON. Do not include any explanatory text, markdown formatting, code block markers, or conversational phrases like \"Here is\" or \"Of course\". Your entire response must be parseable JSON that starts with { or [ and ends with } or ]. No exceptions.`;\n\t}\n\n\t/**\n\t * Checks if a string is valid JSON\n\t * @param {string} text - Text to validate\n\t * @returns {boolean} True if valid JSON\n\t */\n\t_isValidJson(text) {\n\t\tif (!text || typeof text !== 'string') {\n\t\t\treturn false;\n\t\t}\n\n\t\ttry {\n\t\t\tJSON.parse(text.trim());\n\t\t\treturn true;\n\t\t} catch {\n\t\t\treturn false;\n\t\t}\n\t}\n\n\t/**\n\t * Detects if the user prompt is requesting JSON output\n\t * @param {Array} messages - Array of message objects\n\t * @returns {boolean} True if JSON output is likely expected\n\t */\n\t_detectJsonRequest(messages) {\n\t\tconst userMessages = messages.filter((msg) => msg.role === 'user');\n\t\tconst combinedText = userMessages\n\t\t\t.map((msg) => msg.content)\n\t\t\t.join(' ')\n\t\t\t.toLowerCase();\n\n\t\t// Look for indicators that JSON output is expected\n\t\tconst jsonIndicators = [\n\t\t\t'json',\n\t\t\t'respond only with',\n\t\t\t'return only',\n\t\t\t'output only',\n\t\t\t'format:',\n\t\t\t'structure:',\n\t\t\t'schema:',\n\t\t\t'{\"',\n\t\t\t'[{',\n\t\t\t'subtasks',\n\t\t\t'array',\n\t\t\t'object'\n\t\t];\n\n\t\treturn jsonIndicators.some((indicator) => combinedText.includes(indicator));\n\t}\n\n\t/**\n\t * Simplifies complex prompts for gemini-cli to improve JSON output compliance\n\t * @param {Array} messages - Array of message objects\n\t * @returns {Array} Simplified messages array\n\t */\n\t_simplifyJsonPrompts(messages) {\n\t\t// First, check if this is an expand-task operation by looking at the system message\n\t\tconst systemMsg = messages.find((m) => m.role === 'system');\n\t\tconst isExpandTask =\n\t\t\tsystemMsg &&\n\t\t\tsystemMsg.content.includes(\n\t\t\t\t'You are an AI assistant helping with task breakdown. Generate exactly'\n\t\t\t);\n\n\t\tif (!isExpandTask) {\n\t\t\treturn messages; // Not an expand task, return unchanged\n\t\t}\n\n\t\t// Extract subtask count from system message\n\t\tconst subtaskCountMatch = systemMsg.content.match(\n\t\t\t/Generate exactly (\\d+) subtasks/\n\t\t);\n\t\tconst subtaskCount = subtaskCountMatch ? subtaskCountMatch[1] : '10';\n\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`${this.name} detected expand-task operation, simplifying for ${subtaskCount} subtasks`\n\t\t);\n\n\t\treturn messages.map((msg) => {\n\t\t\tif (msg.role !== 'user') {\n\t\t\t\treturn msg;\n\t\t\t}\n\n\t\t\t// For expand-task user messages, create a much simpler, more direct prompt\n\t\t\t// that doesn't depend on specific task content\n\t\t\tconst simplifiedPrompt = `Generate exactly ${subtaskCount} subtasks in the following JSON format.\n\nCRITICAL INSTRUCTION: You must respond with ONLY valid JSON. No explanatory text, no \"Here is\", no \"Of course\", no markdown - just the JSON object.\n\nRequired JSON structure:\n{\n \"subtasks\": [\n {\n \"id\": 1,\n \"title\": \"Specific actionable task title\",\n \"description\": \"Clear task description\",\n \"dependencies\": [],\n \"details\": \"Implementation details and guidance\",\n \"testStrategy\": \"Testing approach\"\n }\n ]\n}\n\nGenerate ${subtaskCount} subtasks based on the original task context. Return ONLY the JSON object.`;\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} simplified user prompt for better JSON compliance`\n\t\t\t);\n\t\t\treturn { ...msg, content: simplifiedPrompt };\n\t\t});\n\t}\n\n\t/**\n\t * Extract JSON from Gemini's response using a tolerant parser.\n\t *\n\t * Optimized approach that progressively tries different parsing strategies:\n\t * 1. Direct parsing after cleanup\n\t * 2. Smart boundary detection with single-pass analysis\n\t * 3. Limited character-by-character fallback for edge cases\n\t *\n\t * @param {string} text - Raw text which may contain JSON\n\t * @returns {string} A valid JSON string if extraction succeeds, otherwise the original text\n\t */\n\textractJson(text) {\n\t\tif (!text || typeof text !== 'string') {\n\t\t\treturn text;\n\t\t}\n\n\t\tlet content = text.trim();\n\n\t\t// Early exit for very short content\n\t\tif (content.length < 2) {\n\t\t\treturn text;\n\t\t}\n\n\t\t// Strip common wrappers in a single pass\n\t\tcontent = content\n\t\t\t// Remove markdown fences\n\t\t\t.replace(/^.*?```(?:json)?\\s*([\\s\\S]*?)\\s*```.*$/i, '$1')\n\t\t\t// Remove variable declarations\n\t\t\t.replace(/^\\s*(?:const|let|var)\\s+\\w+\\s*=\\s*([\\s\\S]*?)(?:;|\\s*)$/i, '$1')\n\t\t\t// Remove common prefixes\n\t\t\t.replace(/^(?:Here's|The)\\s+(?:the\\s+)?JSON.*?[:]\\s*/i, '')\n\t\t\t.trim();\n\n\t\t// Find the first JSON-like structure\n\t\tconst firstObj = content.indexOf('{');\n\t\tconst firstArr = content.indexOf('[');\n\n\t\tif (firstObj === -1 && firstArr === -1) {\n\t\t\treturn text;\n\t\t}\n\n\t\tconst start =\n\t\t\tfirstArr === -1\n\t\t\t\t? firstObj\n\t\t\t\t: firstObj === -1\n\t\t\t\t\t? firstArr\n\t\t\t\t\t: Math.min(firstObj, firstArr);\n\t\tcontent = content.slice(start);\n\n\t\t// Optimized parsing function with error collection\n\t\tconst tryParse = (value) => {\n\t\t\tif (!value || value.length < 2) return undefined;\n\n\t\t\tconst errors = [];\n\t\t\ttry {\n\t\t\t\tconst result = parse(value, errors, {\n\t\t\t\t\tallowTrailingComma: true,\n\t\t\t\t\tallowEmptyContent: false\n\t\t\t\t});\n\t\t\t\tif (errors.length === 0 && result !== undefined) {\n\t\t\t\t\treturn JSON.stringify(result, null, 2);\n\t\t\t\t}\n\t\t\t} catch {\n\t\t\t\t// Parsing failed completely\n\t\t\t}\n\t\t\treturn undefined;\n\t\t};\n\n\t\t// Try parsing the full content first\n\t\tconst fullParse = tryParse(content);\n\t\tif (fullParse !== undefined) {\n\t\t\treturn fullParse;\n\t\t}\n\n\t\t// Smart boundary detection - single pass with optimizations\n\t\tconst openChar = content[0];\n\t\tconst closeChar = openChar === '{' ? '}' : ']';\n\n\t\tlet depth = 0;\n\t\tlet inString = false;\n\t\tlet escapeNext = false;\n\t\tlet lastValidEnd = -1;\n\n\t\t// Single-pass boundary detection with early termination\n\t\tfor (let i = 0; i < content.length && i < 10000; i++) {\n\t\t\t// Limit scan for performance\n\t\t\tconst char = content[i];\n\n\t\t\tif (escapeNext) {\n\t\t\t\tescapeNext = false;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (char === '\\\\') {\n\t\t\t\tescapeNext = true;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (char === '\"') {\n\t\t\t\tinString = !inString;\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tif (inString) continue;\n\n\t\t\tif (char === openChar) {\n\t\t\t\tdepth++;\n\t\t\t} else if (char === closeChar) {\n\t\t\t\tdepth--;\n\t\t\t\tif (depth === 0) {\n\t\t\t\t\tlastValidEnd = i + 1;\n\t\t\t\t\t// Try parsing immediately on first valid boundary\n\t\t\t\t\tconst candidate = content.slice(0, lastValidEnd);\n\t\t\t\t\tconst parsed = tryParse(candidate);\n\t\t\t\t\tif (parsed !== undefined) {\n\t\t\t\t\t\treturn parsed;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// If we found valid boundaries but parsing failed, try limited fallback\n\t\tif (lastValidEnd > 0) {\n\t\t\tconst maxAttempts = Math.min(5, Math.floor(lastValidEnd / 100)); // Limit attempts\n\t\t\tfor (let i = 0; i < maxAttempts; i++) {\n\t\t\t\tconst testEnd = Math.max(\n\t\t\t\t\tlastValidEnd - i * 50,\n\t\t\t\t\tMath.floor(lastValidEnd * 0.8)\n\t\t\t\t);\n\t\t\t\tconst candidate = content.slice(0, testEnd);\n\t\t\t\tconst parsed = tryParse(candidate);\n\t\t\t\tif (parsed !== undefined) {\n\t\t\t\t\treturn parsed;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn text;\n\t}\n\n\t/**\n\t * Generates text using Gemini CLI model\n\t * Overrides base implementation to properly handle system messages and enforce JSON output when needed\n\t */\n\tasync generateText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Generating ${this.name} text with model: ${params.modelId}`\n\t\t\t);\n\n\t\t\t// Detect if JSON output is expected and enforce it for better gemini-cli compatibility\n\t\t\tconst enforceJsonOutput = this._detectJsonRequest(params.messages);\n\n\t\t\t// Debug logging to understand what's happening\n\t\t\tlog('debug', `${this.name} JSON detection analysis:`, {\n\t\t\t\tenforceJsonOutput,\n\t\t\t\tmessageCount: params.messages.length,\n\t\t\t\tmessages: params.messages.map((msg) => ({\n\t\t\t\t\trole: msg.role,\n\t\t\t\t\tcontentPreview: msg.content\n\t\t\t\t\t\t? msg.content.substring(0, 200) + '...'\n\t\t\t\t\t\t: 'empty'\n\t\t\t\t}))\n\t\t\t});\n\n\t\t\tif (enforceJsonOutput) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`${this.name} detected JSON request - applying strict JSON enforcement system prompt`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// For gemini-cli, simplify complex prompts before processing\n\t\t\tlet processedMessages = params.messages;\n\t\t\tif (enforceJsonOutput) {\n\t\t\t\tprocessedMessages = this._simplifyJsonPrompts(params.messages);\n\t\t\t}\n\n\t\t\t// Extract system messages for separate handling with optional JSON enforcement\n\t\t\tconst { systemPrompt, messages } = this._extractSystemMessage(\n\t\t\t\tprocessedMessages,\n\t\t\t\t{ enforceJsonOutput }\n\t\t\t);\n\n\t\t\t// Debug the final system prompt being sent\n\t\t\tlog('debug', `${this.name} final system prompt:`, {\n\t\t\t\tsystemPromptLength: systemPrompt ? systemPrompt.length : 0,\n\t\t\t\tsystemPromptPreview: systemPrompt\n\t\t\t\t\t? systemPrompt.substring(0, 300) + '...'\n\t\t\t\t\t: 'none',\n\t\t\t\tfinalMessageCount: messages.length\n\t\t\t});\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst result = await generateText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tsystem: systemPrompt,\n\t\t\t\tmessages: messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\t// If we detected a JSON request and gemini-cli returned conversational text,\n\t\t\t// attempt to extract JSON from the response\n\t\t\tlet finalText = result.text;\n\t\t\tif (enforceJsonOutput && result.text && !this._isValidJson(result.text)) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`${this.name} response appears conversational, attempting JSON extraction`\n\t\t\t\t);\n\n\t\t\t\t// Log first 1000 chars of the response to see what Gemini actually returned\n\t\t\t\tlog('debug', `${this.name} raw response preview:`, {\n\t\t\t\t\tresponseLength: result.text.length,\n\t\t\t\t\tresponseStart: result.text.substring(0, 1000)\n\t\t\t\t});\n\n\t\t\t\tconst extractedJson = this.extractJson(result.text);\n\t\t\t\tif (this._isValidJson(extractedJson)) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`${this.name} successfully extracted JSON from conversational response`\n\t\t\t\t\t);\n\t\t\t\t\tfinalText = extractedJson;\n\t\t\t\t} else {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`${this.name} JSON extraction failed, returning original response`\n\t\t\t\t\t);\n\n\t\t\t\t\t// Log what extraction returned to debug why it failed\n\t\t\t\t\tlog('debug', `${this.name} extraction result preview:`, {\n\t\t\t\t\t\textractedLength: extractedJson ? extractedJson.length : 0,\n\t\t\t\t\t\textractedStart: extractedJson\n\t\t\t\t\t\t\t? extractedJson.substring(0, 500)\n\t\t\t\t\t\t\t: 'null'\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} generateText completed successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\ttext: finalText,\n\t\t\t\tusage: {\n\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthis.handleError('text generation', error);\n\t\t}\n\t}\n\n\t/**\n\t * Streams text using Gemini CLI model\n\t * Overrides base implementation to properly handle system messages and enforce JSON output when needed\n\t */\n\tasync streamText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog('debug', `Streaming ${this.name} text with model: ${params.modelId}`);\n\n\t\t\t// Detect if JSON output is expected and enforce it for better gemini-cli compatibility\n\t\t\tconst enforceJsonOutput = this._detectJsonRequest(params.messages);\n\n\t\t\t// Debug logging to understand what's happening\n\t\t\tlog('debug', `${this.name} JSON detection analysis:`, {\n\t\t\t\tenforceJsonOutput,\n\t\t\t\tmessageCount: params.messages.length,\n\t\t\t\tmessages: params.messages.map((msg) => ({\n\t\t\t\t\trole: msg.role,\n\t\t\t\t\tcontentPreview: msg.content\n\t\t\t\t\t\t? msg.content.substring(0, 200) + '...'\n\t\t\t\t\t\t: 'empty'\n\t\t\t\t}))\n\t\t\t});\n\n\t\t\tif (enforceJsonOutput) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`${this.name} detected JSON request - applying strict JSON enforcement system prompt`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Extract system messages for separate handling with optional JSON enforcement\n\t\t\tconst { systemPrompt, messages } = this._extractSystemMessage(\n\t\t\t\tparams.messages,\n\t\t\t\t{ enforceJsonOutput }\n\t\t\t);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst stream = await streamText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tsystem: systemPrompt,\n\t\t\t\tmessages: messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} streamText initiated successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\t// Note: For streaming, we can't intercept and modify the response in real-time\n\t\t\t// The JSON extraction would need to happen on the consuming side\n\t\t\treturn stream;\n\t\t} catch (error) {\n\t\t\tthis.handleError('text streaming', error);\n\t\t}\n\t}\n\n\t/**\n\t * Generates a structured object using Gemini CLI model\n\t * Overrides base implementation to handle Gemini-specific JSON formatting issues and system messages\n\t */\n\tasync generateObject(params) {\n\t\ttry {\n\t\t\t// First try the standard generateObject from base class\n\t\t\treturn await super.generateObject(params);\n\t\t} catch (error) {\n\t\t\t// If it's a JSON parsing error, try to extract and parse JSON manually\n\t\t\tif (error.message?.includes('JSON') || error.message?.includes('parse')) {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Gemini CLI generateObject failed with parsing error, attempting manual extraction`\n\t\t\t\t);\n\n\t\t\t\ttry {\n\t\t\t\t\t// Validate params first\n\t\t\t\t\tthis.validateParams(params);\n\t\t\t\t\tthis.validateMessages(params.messages);\n\n\t\t\t\t\tif (!params.schema) {\n\t\t\t\t\t\tthrow new Error('Schema is required for object generation');\n\t\t\t\t\t}\n\t\t\t\t\tif (!params.objectName) {\n\t\t\t\t\t\tthrow new Error('Object name is required for object generation');\n\t\t\t\t\t}\n\n\t\t\t\t\t// Extract system messages for separate handling with JSON enforcement\n\t\t\t\t\tconst { systemPrompt, messages } = this._extractSystemMessage(\n\t\t\t\t\t\tparams.messages,\n\t\t\t\t\t\t{ enforceJsonOutput: true }\n\t\t\t\t\t);\n\n\t\t\t\t\t// Call generateObject directly with our client\n\t\t\t\t\tconst client = await this.getClient(params);\n\t\t\t\t\tconst result = await generateObject({\n\t\t\t\t\t\tmodel: client(params.modelId),\n\t\t\t\t\t\tsystem: systemPrompt,\n\t\t\t\t\t\tmessages: messages,\n\t\t\t\t\t\tschema: params.schema,\n\t\t\t\t\t\tmode: 'json', // Use json mode instead of auto for Gemini\n\t\t\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\t\t\ttemperature: params.temperature\n\t\t\t\t\t});\n\n\t\t\t\t\t// If we get rawResponse text, try to extract JSON from it\n\t\t\t\t\tif (result.rawResponse?.text && !result.object) {\n\t\t\t\t\t\tconst extractedJson = this.extractJson(result.rawResponse.text);\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tresult.object = JSON.parse(extractedJson);\n\t\t\t\t\t\t} catch (parseError) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'error',\n\t\t\t\t\t\t\t\t`Failed to parse extracted JSON: ${parseError.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t\t\t`Extracted JSON: ${extractedJson.substring(0, 500)}...`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t\t`Gemini CLI returned invalid JSON that could not be parsed: ${parseError.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn {\n\t\t\t\t\t\tobject: result.object,\n\t\t\t\t\t\tusage: {\n\t\t\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t\t\t}\n\t\t\t\t\t};\n\t\t\t\t} catch (retryError) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'error',\n\t\t\t\t\t\t`Gemini CLI manual JSON extraction failed: ${retryError.message}`\n\t\t\t\t\t);\n\t\t\t\t\t// Re-throw the original error with more context\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t`${this.name} failed to generate valid JSON object: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// For non-parsing errors, just re-throw\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\tgetRequiredApiKeyName() {\n\t\treturn 'GEMINI_API_KEY';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/language-model.js", "/**\n * @fileoverview Claude Code Language Model implementation\n */\n\nimport { NoSuchModelError } from '@ai-sdk/provider';\nimport { generateId } from '@ai-sdk/provider-utils';\nimport { convertToClaudeCodeMessages } from './message-converter.js';\nimport { extractJson } from './json-extractor.js';\nimport { createAPICallError, createAuthenticationError } from './errors.js';\n\nlet query;\nlet AbortError;\n\nasync function loadClaudeCodeModule() {\n\tif (!query || !AbortError) {\n\t\ttry {\n\t\t\tconst mod = await import('@anthropic-ai/claude-code');\n\t\t\tquery = mod.query;\n\t\t\tAbortError = mod.AbortError;\n\t\t} catch (err) {\n\t\t\tthrow new Error(\n\t\t\t\t\"Claude Code SDK is not installed. Please install '@anthropic-ai/claude-code' to use the claude-code provider.\"\n\t\t\t);\n\t\t}\n\t}\n}\n\n/**\n * @typedef {import('./types.js').ClaudeCodeSettings} ClaudeCodeSettings\n * @typedef {import('./types.js').ClaudeCodeModelId} ClaudeCodeModelId\n * @typedef {import('./types.js').ClaudeCodeLanguageModelOptions} ClaudeCodeLanguageModelOptions\n */\n\nconst modelMap = {\n\topus: 'opus',\n\tsonnet: 'sonnet'\n};\n\nexport class ClaudeCodeLanguageModel {\n\tspecificationVersion = 'v1';\n\tdefaultObjectGenerationMode = 'json';\n\tsupportsImageUrls = false;\n\tsupportsStructuredOutputs = false;\n\n\t/** @type {ClaudeCodeModelId} */\n\tmodelId;\n\n\t/** @type {ClaudeCodeSettings} */\n\tsettings;\n\n\t/** @type {string|undefined} */\n\tsessionId;\n\n\t/**\n\t * @param {ClaudeCodeLanguageModelOptions} options\n\t */\n\tconstructor(options) {\n\t\tthis.modelId = options.id;\n\t\tthis.settings = options.settings ?? {};\n\n\t\t// Validate model ID format\n\t\tif (\n\t\t\t!this.modelId ||\n\t\t\ttypeof this.modelId !== 'string' ||\n\t\t\tthis.modelId.trim() === ''\n\t\t) {\n\t\t\tthrow new NoSuchModelError({\n\t\t\t\tmodelId: this.modelId,\n\t\t\t\tmodelType: 'languageModel'\n\t\t\t});\n\t\t}\n\t}\n\n\tget provider() {\n\t\treturn 'claude-code';\n\t}\n\n\t/**\n\t * Get the model name for Claude Code CLI\n\t * @returns {string}\n\t */\n\tgetModel() {\n\t\tconst mapped = modelMap[this.modelId];\n\t\treturn mapped ?? this.modelId;\n\t}\n\n\t/**\n\t * Generate unsupported parameter warnings\n\t * @param {Object} options - Generation options\n\t * @returns {Array} Warnings array\n\t */\n\tgenerateUnsupportedWarnings(options) {\n\t\tconst warnings = [];\n\t\tconst unsupportedParams = [];\n\n\t\t// Check for unsupported parameters\n\t\tif (options.temperature !== undefined)\n\t\t\tunsupportedParams.push('temperature');\n\t\tif (options.maxTokens !== undefined) unsupportedParams.push('maxTokens');\n\t\tif (options.topP !== undefined) unsupportedParams.push('topP');\n\t\tif (options.topK !== undefined) unsupportedParams.push('topK');\n\t\tif (options.presencePenalty !== undefined)\n\t\t\tunsupportedParams.push('presencePenalty');\n\t\tif (options.frequencyPenalty !== undefined)\n\t\t\tunsupportedParams.push('frequencyPenalty');\n\t\tif (options.stopSequences !== undefined && options.stopSequences.length > 0)\n\t\t\tunsupportedParams.push('stopSequences');\n\t\tif (options.seed !== undefined) unsupportedParams.push('seed');\n\n\t\tif (unsupportedParams.length > 0) {\n\t\t\t// Add a warning for each unsupported parameter\n\t\t\tfor (const param of unsupportedParams) {\n\t\t\t\twarnings.push({\n\t\t\t\t\ttype: 'unsupported-setting',\n\t\t\t\t\tsetting: param,\n\t\t\t\t\tdetails: `Claude Code CLI does not support the ${param} parameter. It will be ignored.`\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\treturn warnings;\n\t}\n\n\t/**\n\t * Generate text using Claude Code\n\t * @param {Object} options - Generation options\n\t * @returns {Promise<Object>}\n\t */\n\tasync doGenerate(options) {\n\t\tawait loadClaudeCodeModule();\n\t\tconst { messagesPrompt } = convertToClaudeCodeMessages(\n\t\t\toptions.prompt,\n\t\t\toptions.mode\n\t\t);\n\n\t\tconst abortController = new AbortController();\n\t\tif (options.abortSignal) {\n\t\t\toptions.abortSignal.addEventListener('abort', () =>\n\t\t\t\tabortController.abort()\n\t\t\t);\n\t\t}\n\n\t\tconst queryOptions = {\n\t\t\tmodel: this.getModel(),\n\t\t\tabortController,\n\t\t\tresume: this.sessionId,\n\t\t\tpathToClaudeCodeExecutable: this.settings.pathToClaudeCodeExecutable,\n\t\t\tcustomSystemPrompt: this.settings.customSystemPrompt,\n\t\t\tappendSystemPrompt: this.settings.appendSystemPrompt,\n\t\t\tmaxTurns: this.settings.maxTurns,\n\t\t\tmaxThinkingTokens: this.settings.maxThinkingTokens,\n\t\t\tcwd: this.settings.cwd,\n\t\t\texecutable: this.settings.executable,\n\t\t\texecutableArgs: this.settings.executableArgs,\n\t\t\tpermissionMode: this.settings.permissionMode,\n\t\t\tpermissionPromptToolName: this.settings.permissionPromptToolName,\n\t\t\tcontinue: this.settings.continue,\n\t\t\tallowedTools: this.settings.allowedTools,\n\t\t\tdisallowedTools: this.settings.disallowedTools,\n\t\t\tmcpServers: this.settings.mcpServers\n\t\t};\n\n\t\tlet text = '';\n\t\tlet usage = { promptTokens: 0, completionTokens: 0 };\n\t\tlet finishReason = 'stop';\n\t\tlet costUsd;\n\t\tlet durationMs;\n\t\tlet rawUsage;\n\t\tconst warnings = this.generateUnsupportedWarnings(options);\n\n\t\ttry {\n\t\t\tconst response = query({\n\t\t\t\tprompt: messagesPrompt,\n\t\t\t\toptions: queryOptions\n\t\t\t});\n\n\t\t\tfor await (const message of response) {\n\t\t\t\tif (message.type === 'assistant') {\n\t\t\t\t\ttext += message.message.content\n\t\t\t\t\t\t.map((c) => (c.type === 'text' ? c.text : ''))\n\t\t\t\t\t\t.join('');\n\t\t\t\t} else if (message.type === 'result') {\n\t\t\t\t\tthis.sessionId = message.session_id;\n\t\t\t\t\tcostUsd = message.total_cost_usd;\n\t\t\t\t\tdurationMs = message.duration_ms;\n\n\t\t\t\t\tif ('usage' in message) {\n\t\t\t\t\t\trawUsage = message.usage;\n\t\t\t\t\t\tusage = {\n\t\t\t\t\t\t\tpromptTokens:\n\t\t\t\t\t\t\t\t(message.usage.cache_creation_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t(message.usage.cache_read_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t(message.usage.input_tokens ?? 0),\n\t\t\t\t\t\t\tcompletionTokens: message.usage.output_tokens ?? 0\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\n\t\t\t\t\tif (message.subtype === 'error_max_turns') {\n\t\t\t\t\t\tfinishReason = 'length';\n\t\t\t\t\t} else if (message.subtype === 'error_during_execution') {\n\t\t\t\t\t\tfinishReason = 'error';\n\t\t\t\t\t}\n\t\t\t\t} else if (message.type === 'system' && message.subtype === 'init') {\n\t\t\t\t\tthis.sessionId = message.session_id;\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// -------------------------------------------------------------\n\t\t\t// Work-around for Claude-Code CLI/SDK JSON truncation bug (#913)\n\t\t\t// -------------------------------------------------------------\n\t\t\t// If the SDK throws a JSON SyntaxError *but* we already hold some\n\t\t\t// buffered text, assume the response was truncated by the CLI.\n\t\t\t// We keep the accumulated text, mark the finish reason, push a\n\t\t\t// provider-warning and *skip* the normal error handling so Task\n\t\t\t// Master can continue processing.\n\t\t\tconst isJsonTruncation =\n\t\t\t\terror instanceof SyntaxError &&\n\t\t\t\t/JSON/i.test(error.message || '') &&\n\t\t\t\t(error.message.includes('position') ||\n\t\t\t\t\terror.message.includes('Unexpected end'));\n\t\t\tif (isJsonTruncation && text && text.length > 0) {\n\t\t\t\twarnings.push({\n\t\t\t\t\ttype: 'provider-warning',\n\t\t\t\t\tdetails:\n\t\t\t\t\t\t'Claude Code SDK emitted a JSON parse error but Task Master recovered buffered text (possible CLI truncation).'\n\t\t\t\t});\n\t\t\t\tfinishReason = 'truncated';\n\t\t\t\t// Skip re-throwing: fall through so the caller receives usable data\n\t\t\t} else {\n\t\t\t\tif (error instanceof AbortError) {\n\t\t\t\t\tthrow options.abortSignal?.aborted\n\t\t\t\t\t\t? options.abortSignal.reason\n\t\t\t\t\t\t: error;\n\t\t\t\t}\n\n\t\t\t\t// Check for authentication errors\n\t\t\t\tif (\n\t\t\t\t\terror.message?.includes('not logged in') ||\n\t\t\t\t\terror.message?.includes('authentication') ||\n\t\t\t\t\terror.exitCode === 401\n\t\t\t\t) {\n\t\t\t\t\tthrow createAuthenticationError({\n\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\terror.message ||\n\t\t\t\t\t\t\t'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'\n\t\t\t\t\t});\n\t\t\t\t}\n\n\t\t\t\t// Wrap other errors with API call error\n\t\t\t\tthrow createAPICallError({\n\t\t\t\t\tmessage: error.message || 'Claude Code CLI error',\n\t\t\t\t\tcode: error.code,\n\t\t\t\t\texitCode: error.exitCode,\n\t\t\t\t\tstderr: error.stderr,\n\t\t\t\t\tpromptExcerpt: messagesPrompt.substring(0, 200),\n\t\t\t\t\tisRetryable: error.code === 'ENOENT' || error.code === 'ECONNREFUSED'\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\n\t\t// Extract JSON if in object-json mode\n\t\tif (options.mode?.type === 'object-json' && text) {\n\t\t\ttext = extractJson(text);\n\t\t}\n\n\t\treturn {\n\t\t\ttext: text || undefined,\n\t\t\tusage,\n\t\t\tfinishReason,\n\t\t\trawCall: {\n\t\t\t\trawPrompt: messagesPrompt,\n\t\t\t\trawSettings: queryOptions\n\t\t\t},\n\t\t\twarnings: warnings.length > 0 ? warnings : undefined,\n\t\t\tresponse: {\n\t\t\t\tid: generateId(),\n\t\t\t\ttimestamp: new Date(),\n\t\t\t\tmodelId: this.modelId\n\t\t\t},\n\t\t\trequest: {\n\t\t\t\tbody: messagesPrompt\n\t\t\t},\n\t\t\tproviderMetadata: {\n\t\t\t\t'claude-code': {\n\t\t\t\t\t...(this.sessionId !== undefined && { sessionId: this.sessionId }),\n\t\t\t\t\t...(costUsd !== undefined && { costUsd }),\n\t\t\t\t\t...(durationMs !== undefined && { durationMs }),\n\t\t\t\t\t...(rawUsage !== undefined && { rawUsage })\n\t\t\t\t}\n\t\t\t}\n\t\t};\n\t}\n\n\t/**\n\t * Stream text using Claude Code\n\t * @param {Object} options - Stream options\n\t * @returns {Promise<Object>}\n\t */\n\tasync doStream(options) {\n\t\tawait loadClaudeCodeModule();\n\t\tconst { messagesPrompt } = convertToClaudeCodeMessages(\n\t\t\toptions.prompt,\n\t\t\toptions.mode\n\t\t);\n\n\t\tconst abortController = new AbortController();\n\t\tif (options.abortSignal) {\n\t\t\toptions.abortSignal.addEventListener('abort', () =>\n\t\t\t\tabortController.abort()\n\t\t\t);\n\t\t}\n\n\t\tconst queryOptions = {\n\t\t\tmodel: this.getModel(),\n\t\t\tabortController,\n\t\t\tresume: this.sessionId,\n\t\t\tpathToClaudeCodeExecutable: this.settings.pathToClaudeCodeExecutable,\n\t\t\tcustomSystemPrompt: this.settings.customSystemPrompt,\n\t\t\tappendSystemPrompt: this.settings.appendSystemPrompt,\n\t\t\tmaxTurns: this.settings.maxTurns,\n\t\t\tmaxThinkingTokens: this.settings.maxThinkingTokens,\n\t\t\tcwd: this.settings.cwd,\n\t\t\texecutable: this.settings.executable,\n\t\t\texecutableArgs: this.settings.executableArgs,\n\t\t\tpermissionMode: this.settings.permissionMode,\n\t\t\tpermissionPromptToolName: this.settings.permissionPromptToolName,\n\t\t\tcontinue: this.settings.continue,\n\t\t\tallowedTools: this.settings.allowedTools,\n\t\t\tdisallowedTools: this.settings.disallowedTools,\n\t\t\tmcpServers: this.settings.mcpServers\n\t\t};\n\n\t\tconst warnings = this.generateUnsupportedWarnings(options);\n\n\t\tconst stream = new ReadableStream({\n\t\t\tstart: async (controller) => {\n\t\t\t\ttry {\n\t\t\t\t\tconst response = query({\n\t\t\t\t\t\tprompt: messagesPrompt,\n\t\t\t\t\t\toptions: queryOptions\n\t\t\t\t\t});\n\n\t\t\t\t\tlet usage = { promptTokens: 0, completionTokens: 0 };\n\t\t\t\t\tlet accumulatedText = '';\n\n\t\t\t\t\tfor await (const message of response) {\n\t\t\t\t\t\tif (message.type === 'assistant') {\n\t\t\t\t\t\t\tconst text = message.message.content\n\t\t\t\t\t\t\t\t.map((c) => (c.type === 'text' ? c.text : ''))\n\t\t\t\t\t\t\t\t.join('');\n\n\t\t\t\t\t\t\tif (text) {\n\t\t\t\t\t\t\t\taccumulatedText += text;\n\n\t\t\t\t\t\t\t\t// In object-json mode, we need to accumulate the full text\n\t\t\t\t\t\t\t\t// and extract JSON at the end, so don't stream individual deltas\n\t\t\t\t\t\t\t\tif (options.mode?.type !== 'object-json') {\n\t\t\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\t\t\ttype: 'text-delta',\n\t\t\t\t\t\t\t\t\t\ttextDelta: text\n\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if (message.type === 'result') {\n\t\t\t\t\t\t\tlet rawUsage;\n\t\t\t\t\t\t\tif ('usage' in message) {\n\t\t\t\t\t\t\t\trawUsage = message.usage;\n\t\t\t\t\t\t\t\tusage = {\n\t\t\t\t\t\t\t\t\tpromptTokens:\n\t\t\t\t\t\t\t\t\t\t(message.usage.cache_creation_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t\t\t(message.usage.cache_read_input_tokens ?? 0) +\n\t\t\t\t\t\t\t\t\t\t(message.usage.input_tokens ?? 0),\n\t\t\t\t\t\t\t\t\tcompletionTokens: message.usage.output_tokens ?? 0\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tlet finishReason = 'stop';\n\t\t\t\t\t\t\tif (message.subtype === 'error_max_turns') {\n\t\t\t\t\t\t\t\tfinishReason = 'length';\n\t\t\t\t\t\t\t} else if (message.subtype === 'error_during_execution') {\n\t\t\t\t\t\t\t\tfinishReason = 'error';\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Store session ID in the model instance\n\t\t\t\t\t\t\tthis.sessionId = message.session_id;\n\n\t\t\t\t\t\t\t// In object-json mode, extract JSON and send the full text at once\n\t\t\t\t\t\t\tif (options.mode?.type === 'object-json' && accumulatedText) {\n\t\t\t\t\t\t\t\tconst extractedJson = extractJson(accumulatedText);\n\t\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\t\ttype: 'text-delta',\n\t\t\t\t\t\t\t\t\ttextDelta: extractedJson\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\ttype: 'finish',\n\t\t\t\t\t\t\t\tfinishReason,\n\t\t\t\t\t\t\t\tusage,\n\t\t\t\t\t\t\t\tproviderMetadata: {\n\t\t\t\t\t\t\t\t\t'claude-code': {\n\t\t\t\t\t\t\t\t\t\tsessionId: message.session_id,\n\t\t\t\t\t\t\t\t\t\t...(message.total_cost_usd !== undefined && {\n\t\t\t\t\t\t\t\t\t\t\tcostUsd: message.total_cost_usd\n\t\t\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\t\t\t...(message.duration_ms !== undefined && {\n\t\t\t\t\t\t\t\t\t\t\tdurationMs: message.duration_ms\n\t\t\t\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\t\t\t\t...(rawUsage !== undefined && { rawUsage })\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t} else if (\n\t\t\t\t\t\t\tmessage.type === 'system' &&\n\t\t\t\t\t\t\tmessage.subtype === 'init'\n\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t// Store session ID for future use\n\t\t\t\t\t\t\tthis.sessionId = message.session_id;\n\n\t\t\t\t\t\t\t// Emit response metadata when session is initialized\n\t\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\t\ttype: 'response-metadata',\n\t\t\t\t\t\t\t\tid: message.session_id,\n\t\t\t\t\t\t\t\ttimestamp: new Date(),\n\t\t\t\t\t\t\t\tmodelId: this.modelId\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// -------------------------------------------------------------\n\t\t\t\t\t// Work-around for Claude-Code CLI/SDK JSON truncation bug (#913)\n\t\t\t\t\t// -------------------------------------------------------------\n\t\t\t\t\t// If we hit the SDK JSON SyntaxError but have buffered text, finalize\n\t\t\t\t\t// the stream gracefully instead of emitting an error.\n\t\t\t\t\tconst isJsonTruncation =\n\t\t\t\t\t\terror instanceof SyntaxError &&\n\t\t\t\t\t\t/JSON/i.test(error.message || '') &&\n\t\t\t\t\t\t(error.message.includes('position') ||\n\t\t\t\t\t\t\terror.message.includes('Unexpected end'));\n\n\t\t\t\t\tif (\n\t\t\t\t\t\tisJsonTruncation &&\n\t\t\t\t\t\taccumulatedText &&\n\t\t\t\t\t\taccumulatedText.length > 0\n\t\t\t\t\t) {\n\t\t\t\t\t\t// Prepare final text payload\n\t\t\t\t\t\tconst finalText =\n\t\t\t\t\t\t\toptions.mode?.type === 'object-json'\n\t\t\t\t\t\t\t\t? extractJson(accumulatedText)\n\t\t\t\t\t\t\t\t: accumulatedText;\n\n\t\t\t\t\t\t// Emit any remaining text\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: 'text-delta',\n\t\t\t\t\t\t\ttextDelta: finalText\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\t// Emit finish with truncated reason and warning\n\t\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\t\ttype: 'finish',\n\t\t\t\t\t\t\tfinishReason: 'truncated',\n\t\t\t\t\t\t\tusage,\n\t\t\t\t\t\t\tproviderMetadata: { 'claude-code': { truncated: true } },\n\t\t\t\t\t\t\twarnings: [\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\ttype: 'provider-warning',\n\t\t\t\t\t\t\t\t\tdetails:\n\t\t\t\t\t\t\t\t\t\t'Claude Code SDK JSON truncation detected; stream recovered.'\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t});\n\n\t\t\t\t\t\tcontroller.close();\n\t\t\t\t\t\treturn; // Skip normal error path\n\t\t\t\t\t}\n\n\t\t\t\t\tcontroller.close();\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlet errorToEmit;\n\n\t\t\t\t\tif (error instanceof AbortError) {\n\t\t\t\t\t\terrorToEmit = options.abortSignal?.aborted\n\t\t\t\t\t\t\t? options.abortSignal.reason\n\t\t\t\t\t\t\t: error;\n\t\t\t\t\t} else if (\n\t\t\t\t\t\terror.message?.includes('not logged in') ||\n\t\t\t\t\t\terror.message?.includes('authentication') ||\n\t\t\t\t\t\terror.exitCode === 401\n\t\t\t\t\t) {\n\t\t\t\t\t\terrorToEmit = createAuthenticationError({\n\t\t\t\t\t\t\tmessage:\n\t\t\t\t\t\t\t\terror.message ||\n\t\t\t\t\t\t\t\t'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'\n\t\t\t\t\t\t});\n\t\t\t\t\t} else {\n\t\t\t\t\t\terrorToEmit = createAPICallError({\n\t\t\t\t\t\t\tmessage: error.message || 'Claude Code CLI error',\n\t\t\t\t\t\t\tcode: error.code,\n\t\t\t\t\t\t\texitCode: error.exitCode,\n\t\t\t\t\t\t\tstderr: error.stderr,\n\t\t\t\t\t\t\tpromptExcerpt: messagesPrompt.substring(0, 200),\n\t\t\t\t\t\t\tisRetryable:\n\t\t\t\t\t\t\t\terror.code === 'ENOENT' || error.code === 'ECONNREFUSED'\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\n\t\t\t\t\t// Emit error as a stream part\n\t\t\t\t\tcontroller.enqueue({\n\t\t\t\t\t\ttype: 'error',\n\t\t\t\t\t\terror: errorToEmit\n\t\t\t\t\t});\n\n\t\t\t\t\tcontroller.close();\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\treturn {\n\t\t\tstream,\n\t\t\trawCall: {\n\t\t\t\trawPrompt: messagesPrompt,\n\t\t\t\trawSettings: queryOptions\n\t\t\t},\n\t\t\twarnings: warnings.length > 0 ? warnings : undefined,\n\t\t\trequest: {\n\t\t\t\tbody: messagesPrompt\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager/add-subtask.js", "import path from 'path';\n\nimport { log, readJSON, writeJSON, getCurrentTag } from '../utils.js';\nimport { isTaskDependentOn } from '../task-manager.js';\nimport generateTaskFiles from './generate-task-files.js';\n\n/**\n * Add a subtask to a parent task\n * @param {string} tasksPath - Path to the tasks.json file\n * @param {number|string} parentId - ID of the parent task\n * @param {number|string|null} existingTaskId - ID of an existing task to convert to subtask (optional)\n * @param {Object} newSubtaskData - Data for creating a new subtask (used if existingTaskId is null)\n * @param {boolean} generateFiles - Whether to regenerate task files after adding the subtask\n * @param {Object} context - Context object containing projectRoot and tag information\n * @param {string} context.projectRoot - Project root path\n * @param {string} context.tag - Tag for the task\n * @returns {Object} The newly created or converted subtask\n */\nasync function addSubtask(\n\ttasksPath,\n\tparentId,\n\texistingTaskId = null,\n\tnewSubtaskData = null,\n\tgenerateFiles = false,\n\tcontext = {}\n) {\n\tconst { projectRoot, tag } = context;\n\ttry {\n\t\tlog('info', `Adding subtask to parent task ${parentId}...`);\n\n\t\t// Read the existing tasks with proper context\n\t\tconst data = readJSON(tasksPath, projectRoot, tag);\n\t\tif (!data || !data.tasks) {\n\t\t\tthrow new Error(`Invalid or missing tasks file at ${tasksPath}`);\n\t\t}\n\n\t\t// Convert parent ID to number\n\t\tconst parentIdNum = parseInt(parentId, 10);\n\n\t\t// Find the parent task\n\t\tconst parentTask = data.tasks.find((t) => t.id === parentIdNum);\n\t\tif (!parentTask) {\n\t\t\tthrow new Error(`Parent task with ID ${parentIdNum} not found`);\n\t\t}\n\n\t\t// Initialize subtasks array if it doesn't exist\n\t\tif (!parentTask.subtasks) {\n\t\t\tparentTask.subtasks = [];\n\t\t}\n\n\t\tlet newSubtask;\n\n\t\t// Case 1: Convert an existing task to a subtask\n\t\tif (existingTaskId !== null) {\n\t\t\tconst existingTaskIdNum = parseInt(existingTaskId, 10);\n\n\t\t\t// Find the existing task\n\t\t\tconst existingTaskIndex = data.tasks.findIndex(\n\t\t\t\t(t) => t.id === existingTaskIdNum\n\t\t\t);\n\t\t\tif (existingTaskIndex === -1) {\n\t\t\t\tthrow new Error(`Task with ID ${existingTaskIdNum} not found`);\n\t\t\t}\n\n\t\t\tconst existingTask = data.tasks[existingTaskIndex];\n\n\t\t\t// Check if task is already a subtask\n\t\t\tif (existingTask.parentTaskId) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Task ${existingTaskIdNum} is already a subtask of task ${existingTask.parentTaskId}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Check for circular dependency\n\t\t\tif (existingTaskIdNum === parentIdNum) {\n\t\t\t\tthrow new Error(`Cannot make a task a subtask of itself`);\n\t\t\t}\n\n\t\t\t// Check if parent task is a subtask of the task we're converting\n\t\t\t// This would create a circular dependency\n\t\t\tif (isTaskDependentOn(data.tasks, parentTask, existingTaskIdNum)) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`Cannot create circular dependency: task ${parentIdNum} is already a subtask or dependent of task ${existingTaskIdNum}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Find the highest subtask ID to determine the next ID\n\t\t\tconst highestSubtaskId =\n\t\t\t\tparentTask.subtasks.length > 0\n\t\t\t\t\t? Math.max(...parentTask.subtasks.map((st) => st.id))\n\t\t\t\t\t: 0;\n\t\t\tconst newSubtaskId = highestSubtaskId + 1;\n\n\t\t\t// Clone the existing task to be converted to a subtask\n\t\t\tnewSubtask = {\n\t\t\t\t...existingTask,\n\t\t\t\tid: newSubtaskId,\n\t\t\t\tparentTaskId: parentIdNum\n\t\t\t};\n\n\t\t\t// Add to parent's subtasks\n\t\t\tparentTask.subtasks.push(newSubtask);\n\n\t\t\t// Remove the task from the main tasks array\n\t\t\tdata.tasks.splice(existingTaskIndex, 1);\n\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`Converted task ${existingTaskIdNum} to subtask ${parentIdNum}.${newSubtaskId}`\n\t\t\t);\n\t\t}\n\t\t// Case 2: Create a new subtask\n\t\telse if (newSubtaskData) {\n\t\t\t// Find the highest subtask ID to determine the next ID\n\t\t\tconst highestSubtaskId =\n\t\t\t\tparentTask.subtasks.length > 0\n\t\t\t\t\t? Math.max(...parentTask.subtasks.map((st) => st.id))\n\t\t\t\t\t: 0;\n\t\t\tconst newSubtaskId = highestSubtaskId + 1;\n\n\t\t\t// Create the new subtask object\n\t\t\tnewSubtask = {\n\t\t\t\tid: newSubtaskId,\n\t\t\t\ttitle: newSubtaskData.title,\n\t\t\t\tdescription: newSubtaskData.description || '',\n\t\t\t\tdetails: newSubtaskData.details || '',\n\t\t\t\tstatus: newSubtaskData.status || 'pending',\n\t\t\t\tdependencies: newSubtaskData.dependencies || [],\n\t\t\t\tparentTaskId: parentIdNum\n\t\t\t};\n\n\t\t\t// Add to parent's subtasks\n\t\t\tparentTask.subtasks.push(newSubtask);\n\n\t\t\tlog('info', `Created new subtask ${parentIdNum}.${newSubtaskId}`);\n\t\t} else {\n\t\t\tthrow new Error(\n\t\t\t\t'Either existingTaskId or newSubtaskData must be provided'\n\t\t\t);\n\t\t}\n\n\t\t// Write the updated tasks back to the file with proper context\n\t\twriteJSON(tasksPath, data, projectRoot, tag);\n\n\t\t// Generate task files if requested\n\t\tif (generateFiles) {\n\t\t\tlog('info', 'Regenerating task files...');\n\t\t\tawait generateTaskFiles(tasksPath, path.dirname(tasksPath), context);\n\t\t}\n\n\t\treturn newSubtask;\n\t} catch (error) {\n\t\tlog('error', `Error adding subtask: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\nexport default addSubtask;\n"], ["/claude-task-master/mcp-server/src/tools/get-tasks.js", "/**\n * tools/get-tasks.js\n * Tool to get all tasks from Task Master\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { listTasksDirect } from '../core/task-master-core.js';\nimport {\n\tresolveTasksPath,\n\tresolveComplexityReportPath\n} from '../core/utils/path-utils.js';\n\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the getTasks tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerListTasksTool(server) {\n\tserver.addTool({\n\t\tname: 'get_tasks',\n\t\tdescription:\n\t\t\t'Get all tasks from Task Master, optionally filtering by status and including subtasks.',\n\t\tparameters: z.object({\n\t\t\tstatus: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Filter tasks by status (e.g., 'pending', 'done') or multiple statuses separated by commas (e.g., 'blocked,deferred')\"\n\t\t\t\t),\n\t\t\twithSubtasks: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Include subtasks nested within their parent tasks in the response'\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the tasks file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Getting tasks with filters: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Resolve the path to tasks.json using new path utilities\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = resolveTasksPath(args, log);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Resolve the path to complexity report\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = resolveComplexityReportPath(\n\t\t\t\t\t\t{ ...args, tag: resolvedTag },\n\t\t\t\t\t\tsession\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t\t// This is optional, so we don't fail the operation\n\t\t\t\t\tcomplexityReportPath = null;\n\t\t\t\t}\n\n\t\t\t\tconst result = await listTasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tstatus: args.status,\n\t\t\t\t\t\twithSubtasks: args.withSubtasks,\n\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tlog.info(\n\t\t\t\t\t`Retrieved ${result.success ? result.data?.tasks?.length || 0 : 0} tasks`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error getting tasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error getting tasks: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n\n// We no longer need the formatTasksResponse function as we're returning raw JSON data\n"], ["/claude-task-master/mcp-server/src/tools/move-task.js", "/**\n * tools/move-task.js\n * Tool for moving tasks or subtasks to a new position\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { moveTaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the moveTask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerMoveTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'move_task',\n\t\tdescription: 'Move a task or subtask to a new position',\n\t\tparameters: z.object({\n\t\t\tfrom: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'ID of the task/subtask to move (e.g., \"5\" or \"5.2\"). Can be comma-separated to move multiple tasks (e.g., \"5,6,7\")'\n\t\t\t\t),\n\t\t\tto: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'ID of the destination (e.g., \"7\" or \"7.3\"). Must match the number of source IDs if comma-separated'\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Custom path to tasks.json file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'Root directory of the project (typically derived from session)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Find tasks.json path if not provided\n\t\t\t\tlet tasksJsonPath = args.file;\n\n\t\t\t\tif (!tasksJsonPath) {\n\t\t\t\t\ttasksJsonPath = findTasksPath(args, log);\n\t\t\t\t}\n\n\t\t\t\t// Parse comma-separated IDs\n\t\t\t\tconst fromIds = args.from.split(',').map((id) => id.trim());\n\t\t\t\tconst toIds = args.to.split(',').map((id) => id.trim());\n\n\t\t\t\t// Validate matching IDs count\n\t\t\t\tif (fromIds.length !== toIds.length) {\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t'The number of source and destination IDs must match',\n\t\t\t\t\t\t'MISMATCHED_ID_COUNT'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// If moving multiple tasks\n\t\t\t\tif (fromIds.length > 1) {\n\t\t\t\t\tconst results = [];\n\t\t\t\t\t// Move tasks one by one, only generate files on the last move\n\t\t\t\t\tfor (let i = 0; i < fromIds.length; i++) {\n\t\t\t\t\t\tconst fromId = fromIds[i];\n\t\t\t\t\t\tconst toId = toIds[i];\n\n\t\t\t\t\t\t// Skip if source and destination are the same\n\t\t\t\t\t\tif (fromId === toId) {\n\t\t\t\t\t\t\tlog.info(`Skipping ${fromId} -> ${toId} (same ID)`);\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tconst shouldGenerateFiles = i === fromIds.length - 1;\n\t\t\t\t\t\tconst result = await moveTaskDirect(\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tsourceId: fromId,\n\t\t\t\t\t\t\t\tdestinationId: toId,\n\t\t\t\t\t\t\t\ttasksJsonPath,\n\t\t\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tlog,\n\t\t\t\t\t\t\t{ session }\n\t\t\t\t\t\t);\n\n\t\t\t\t\t\tif (!result.success) {\n\t\t\t\t\t\t\tlog.error(\n\t\t\t\t\t\t\t\t`Failed to move ${fromId} to ${toId}: ${result.error.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tresults.push(result.data);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn handleApiResult(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tsuccess: true,\n\t\t\t\t\t\t\tdata: {\n\t\t\t\t\t\t\t\tmoves: results,\n\t\t\t\t\t\t\t\tmessage: `Successfully moved ${results.length} tasks`\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog,\n\t\t\t\t\t\t'Error moving multiple tasks',\n\t\t\t\t\t\tundefined,\n\t\t\t\t\t\targs.projectRoot\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\t// Moving a single task\n\t\t\t\t\treturn handleApiResult(\n\t\t\t\t\t\tawait moveTaskDirect(\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tsourceId: args.from,\n\t\t\t\t\t\t\t\tdestinationId: args.to,\n\t\t\t\t\t\t\t\ttasksJsonPath,\n\t\t\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tlog,\n\t\t\t\t\t\t\t{ session }\n\t\t\t\t\t\t),\n\t\t\t\t\t\tlog,\n\t\t\t\t\t\t'Error moving task',\n\t\t\t\t\t\tundefined,\n\t\t\t\t\t\targs.projectRoot\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} catch (error) {\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Failed to move task: ${error.message}`,\n\t\t\t\t\t'MOVE_TASK_ERROR'\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/init.js", "/**\n * Task Master\n * Copyright (c) 2025 Eyal Toledano, Ralph Khreish\n *\n * This software is licensed under the MIT License with Commons Clause.\n * You may use this software for any purpose, including commercial applications,\n * and modify and redistribute it freely, subject to the following restrictions:\n *\n * 1. You may not sell this software or offer it as a service.\n * 2. The origin of this software must not be misrepresented.\n * 3. Altered source versions must be plainly marked as such.\n *\n * For the full license text, see the LICENSE file in the root directory.\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport readline from 'readline';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\nimport chalk from 'chalk';\nimport figlet from 'figlet';\nimport boxen from 'boxen';\nimport gradient from 'gradient-string';\nimport { isSilentMode } from './modules/utils.js';\nimport { insideGitWorkTree } from './modules/utils/git-utils.js';\nimport { manageGitignoreFile } from '../src/utils/manage-gitignore.js';\nimport { RULE_PROFILES } from '../src/constants/profiles.js';\nimport {\n\tconvertAllRulesToProfileRules,\n\tgetRulesProfile\n} from '../src/utils/rule-transformer.js';\nimport { updateConfigMaxTokens } from './modules/update-config-tokens.js';\n\nimport { execSync } from 'child_process';\nimport {\n\tEXAMPLE_PRD_FILE,\n\tTASKMASTER_CONFIG_FILE,\n\tTASKMASTER_TEMPLATES_DIR,\n\tTASKMASTER_DIR,\n\tTASKMASTER_TASKS_DIR,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_REPORTS_DIR,\n\tTASKMASTER_STATE_FILE,\n\tENV_EXAMPLE_FILE,\n\tGITIGNORE_FILE\n} from '../src/constants/paths.js';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n// Define log levels\nconst LOG_LEVELS = {\n\tdebug: 0,\n\tinfo: 1,\n\twarn: 2,\n\terror: 3,\n\tsuccess: 4\n};\n\n// Determine log level from environment variable or default to 'info'\nconst LOG_LEVEL = process.env.TASKMASTER_LOG_LEVEL\n\t? LOG_LEVELS[process.env.TASKMASTER_LOG_LEVEL.toLowerCase()]\n\t: LOG_LEVELS.info; // Default to info\n\n// Create a color gradient for the banner\nconst coolGradient = gradient(['#00b4d8', '#0077b6', '#03045e']);\nconst warmGradient = gradient(['#fb8b24', '#e36414', '#9a031e']);\n\n// Display a fancy banner\nfunction displayBanner() {\n\tif (isSilentMode()) return;\n\n\tconsole.clear();\n\tconst bannerText = figlet.textSync('Task Master AI', {\n\t\tfont: 'Standard',\n\t\thorizontalLayout: 'default',\n\t\tverticalLayout: 'default'\n\t});\n\n\tconsole.log(coolGradient(bannerText));\n\n\t// Add creator credit line below the banner\n\tconsole.log(\n\t\tchalk.dim('by ') + chalk.cyan.underline('https://x.com/eyaltoledano')\n\t);\n\n\tconsole.log(\n\t\tboxen(chalk.white(`${chalk.bold('Initializing')} your new project`), {\n\t\t\tpadding: 1,\n\t\t\tmargin: { top: 0, bottom: 1 },\n\t\t\tborderStyle: 'round',\n\t\t\tborderColor: 'cyan'\n\t\t})\n\t);\n}\n\n// Logging function with icons and colors\nfunction log(level, ...args) {\n\tconst icons = {\n\t\tdebug: chalk.gray('🔍'),\n\t\tinfo: chalk.blue('ℹ️'),\n\t\twarn: chalk.yellow('⚠️'),\n\t\terror: chalk.red('❌'),\n\t\tsuccess: chalk.green('✅')\n\t};\n\n\tif (LOG_LEVELS[level] >= LOG_LEVEL) {\n\t\tconst icon = icons[level] || '';\n\n\t\t// Only output to console if not in silent mode\n\t\tif (!isSilentMode()) {\n\t\t\tif (level === 'error') {\n\t\t\t\tconsole.error(icon, chalk.red(...args));\n\t\t\t} else if (level === 'warn') {\n\t\t\t\tconsole.warn(icon, chalk.yellow(...args));\n\t\t\t} else if (level === 'success') {\n\t\t\t\tconsole.log(icon, chalk.green(...args));\n\t\t\t} else if (level === 'info') {\n\t\t\t\tconsole.log(icon, chalk.blue(...args));\n\t\t\t} else {\n\t\t\t\tconsole.log(icon, ...args);\n\t\t\t}\n\t\t}\n\t}\n\n\t// Write to debug log if DEBUG=true\n\tif (process.env.DEBUG === 'true') {\n\t\tconst logMessage = `[${level.toUpperCase()}] ${args.join(' ')}\\n`;\n\t\tfs.appendFileSync('init-debug.log', logMessage);\n\t}\n}\n\n// Function to create directory if it doesn't exist\nfunction ensureDirectoryExists(dirPath) {\n\tif (!fs.existsSync(dirPath)) {\n\t\tfs.mkdirSync(dirPath, { recursive: true });\n\t\tlog('info', `Created directory: ${dirPath}`);\n\t}\n}\n\n// Function to add shell aliases to the user's shell configuration\nfunction addShellAliases() {\n\tconst homeDir = process.env.HOME || process.env.USERPROFILE;\n\tlet shellConfigFile;\n\n\t// Determine which shell config file to use\n\tif (process.env.SHELL?.includes('zsh')) {\n\t\tshellConfigFile = path.join(homeDir, '.zshrc');\n\t} else if (process.env.SHELL?.includes('bash')) {\n\t\tshellConfigFile = path.join(homeDir, '.bashrc');\n\t} else {\n\t\tlog('warn', 'Could not determine shell type. Aliases not added.');\n\t\treturn false;\n\t}\n\n\ttry {\n\t\t// Check if file exists\n\t\tif (!fs.existsSync(shellConfigFile)) {\n\t\t\tlog(\n\t\t\t\t'warn',\n\t\t\t\t`Shell config file ${shellConfigFile} not found. Aliases not added.`\n\t\t\t);\n\t\t\treturn false;\n\t\t}\n\n\t\t// Check if aliases already exist\n\t\tconst configContent = fs.readFileSync(shellConfigFile, 'utf8');\n\t\tif (configContent.includes(\"alias tm='task-master'\")) {\n\t\t\tlog('info', 'Task Master aliases already exist in shell config.');\n\t\t\treturn true;\n\t\t}\n\n\t\t// Add aliases to the shell config file\n\t\tconst aliasBlock = `\n# Task Master aliases added on ${new Date().toLocaleDateString()}\nalias tm='task-master'\nalias taskmaster='task-master'\n`;\n\n\t\tfs.appendFileSync(shellConfigFile, aliasBlock);\n\t\tlog('success', `Added Task Master aliases to ${shellConfigFile}`);\n\t\tlog(\n\t\t\t'info',\n\t\t\t`To use the aliases in your current terminal, run: source ${shellConfigFile}`\n\t\t);\n\n\t\treturn true;\n\t} catch (error) {\n\t\tlog('error', `Failed to add aliases: ${error.message}`);\n\t\treturn false;\n\t}\n}\n\n// Function to create initial state.json file for tag management\nfunction createInitialStateFile(targetDir) {\n\tconst stateFilePath = path.join(targetDir, TASKMASTER_STATE_FILE);\n\n\t// Check if state.json already exists\n\tif (fs.existsSync(stateFilePath)) {\n\t\tlog('info', 'State file already exists, preserving current configuration');\n\t\treturn;\n\t}\n\n\t// Create initial state configuration\n\tconst initialState = {\n\t\tcurrentTag: 'master',\n\t\tlastSwitched: new Date().toISOString(),\n\t\tbranchTagMapping: {},\n\t\tmigrationNoticeShown: false\n\t};\n\n\ttry {\n\t\tfs.writeFileSync(stateFilePath, JSON.stringify(initialState, null, 2));\n\t\tlog('success', `Created initial state file: ${stateFilePath}`);\n\t\tlog('info', 'Default tag set to \"master\" for task organization');\n\t} catch (error) {\n\t\tlog('error', `Failed to create state file: ${error.message}`);\n\t}\n}\n\n// Function to copy a file from the package to the target directory\nfunction copyTemplateFile(templateName, targetPath, replacements = {}) {\n\t// Get the file content from the appropriate source directory\n\tlet sourcePath;\n\n\t// Map template names to their actual source paths\n\tswitch (templateName) {\n\t\t// case 'scripts_README.md':\n\t\t// \tsourcePath = path.join(__dirname, '..', 'assets', 'scripts_README.md');\n\t\t// \tbreak;\n\t\t// case 'README-task-master.md':\n\t\t// \tsourcePath = path.join(__dirname, '..', 'README-task-master.md');\n\t\t// \tbreak;\n\t\tdefault:\n\t\t\t// For other files like env.example, gitignore, etc. that don't have direct equivalents\n\t\t\tsourcePath = path.join(__dirname, '..', 'assets', templateName);\n\t}\n\n\t// Check if the source file exists\n\tif (!fs.existsSync(sourcePath)) {\n\t\t// Fall back to templates directory for files that might not have been moved yet\n\t\tsourcePath = path.join(__dirname, '..', 'assets', templateName);\n\t\tif (!fs.existsSync(sourcePath)) {\n\t\t\tlog('error', `Source file not found: ${sourcePath}`);\n\t\t\treturn;\n\t\t}\n\t}\n\n\tlet content = fs.readFileSync(sourcePath, 'utf8');\n\n\t// Replace placeholders with actual values\n\tObject.entries(replacements).forEach(([key, value]) => {\n\t\tconst regex = new RegExp(`\\\\{\\\\{${key}\\\\}\\\\}`, 'g');\n\t\tcontent = content.replace(regex, value);\n\t});\n\n\t// Handle special files that should be merged instead of overwritten\n\tif (fs.existsSync(targetPath)) {\n\t\tconst filename = path.basename(targetPath);\n\n\t\t// Handle .gitignore - append lines that don't exist\n\t\tif (filename === '.gitignore') {\n\t\t\tlog('info', `${targetPath} already exists, merging content...`);\n\t\t\tconst existingContent = fs.readFileSync(targetPath, 'utf8');\n\t\t\tconst existingLines = new Set(\n\t\t\t\texistingContent.split('\\n').map((line) => line.trim())\n\t\t\t);\n\t\t\tconst newLines = content\n\t\t\t\t.split('\\n')\n\t\t\t\t.filter((line) => !existingLines.has(line.trim()));\n\n\t\t\tif (newLines.length > 0) {\n\t\t\t\t// Add a comment to separate the original content from our additions\n\t\t\t\tconst updatedContent = `${existingContent.trim()}\\n\\n# Added by Task Master AI\\n${newLines.join('\\n')}`;\n\t\t\t\tfs.writeFileSync(targetPath, updatedContent);\n\t\t\t\tlog('success', `Updated ${targetPath} with additional entries`);\n\t\t\t} else {\n\t\t\t\tlog('info', `No new content to add to ${targetPath}`);\n\t\t\t}\n\t\t\treturn;\n\t\t}\n\n\t\t// Handle README.md - offer to preserve or create a different file\n\t\tif (filename === 'README-task-master.md') {\n\t\t\tlog('info', `${targetPath} already exists`);\n\t\t\t// Create a separate README file specifically for this project\n\t\t\tconst taskMasterReadmePath = path.join(\n\t\t\t\tpath.dirname(targetPath),\n\t\t\t\t'README-task-master.md'\n\t\t\t);\n\t\t\tfs.writeFileSync(taskMasterReadmePath, content);\n\t\t\tlog(\n\t\t\t\t'success',\n\t\t\t\t`Created ${taskMasterReadmePath} (preserved original README-task-master.md)`\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// For other files, warn and prompt before overwriting\n\t\tlog('warn', `${targetPath} already exists, skipping.`);\n\t\treturn;\n\t}\n\n\t// If the file doesn't exist, create it normally\n\tfs.writeFileSync(targetPath, content);\n\tlog('info', `Created file: ${targetPath}`);\n}\n\n// Main function to initialize a new project\nasync function initializeProject(options = {}) {\n\t// Receives options as argument\n\t// Only display banner if not in silent mode\n\tif (!isSilentMode()) {\n\t\tdisplayBanner();\n\t}\n\n\t// Debug logging only if not in silent mode\n\t// if (!isSilentMode()) {\n\t// \tconsole.log('===== DEBUG: INITIALIZE PROJECT OPTIONS RECEIVED =====');\n\t// \tconsole.log('Full options object:', JSON.stringify(options));\n\t// \tconsole.log('options.yes:', options.yes);\n\t// \tconsole.log('==================================================');\n\t// }\n\n\t// Handle boolean aliases flags\n\tif (options.aliases === true) {\n\t\toptions.addAliases = true; // --aliases flag provided\n\t} else if (options.aliases === false) {\n\t\toptions.addAliases = false; // --no-aliases flag provided\n\t}\n\t// If options.aliases and options.noAliases are undefined, we'll prompt for it\n\n\t// Handle boolean git flags\n\tif (options.git === true) {\n\t\toptions.initGit = true; // --git flag provided\n\t} else if (options.git === false) {\n\t\toptions.initGit = false; // --no-git flag provided\n\t}\n\t// If options.git and options.noGit are undefined, we'll prompt for it\n\n\t// Handle boolean gitTasks flags\n\tif (options.gitTasks === true) {\n\t\toptions.storeTasksInGit = true; // --git-tasks flag provided\n\t} else if (options.gitTasks === false) {\n\t\toptions.storeTasksInGit = false; // --no-git-tasks flag provided\n\t}\n\t// If options.gitTasks and options.noGitTasks are undefined, we'll prompt for it\n\n\tconst skipPrompts = options.yes || (options.name && options.description);\n\n\t// if (!isSilentMode()) {\n\t// \tconsole.log('Skip prompts determined:', skipPrompts);\n\t// }\n\n\tlet selectedRuleProfiles;\n\tif (options.rulesExplicitlyProvided) {\n\t\t// If --rules flag was used, always respect it.\n\t\tlog(\n\t\t\t'info',\n\t\t\t`Using rule profiles provided via command line: ${options.rules.join(', ')}`\n\t\t);\n\t\tselectedRuleProfiles = options.rules;\n\t} else if (skipPrompts) {\n\t\t// If non-interactive (e.g., --yes) and no rules specified, default to ALL.\n\t\tlog(\n\t\t\t'info',\n\t\t\t`No rules specified in non-interactive mode, defaulting to all profiles.`\n\t\t);\n\t\tselectedRuleProfiles = RULE_PROFILES;\n\t} else {\n\t\t// If interactive and no rules specified, default to NONE.\n\t\t// The 'rules --setup' wizard will handle selection.\n\t\tlog(\n\t\t\t'info',\n\t\t\t'No rules specified; interactive setup will be launched to select profiles.'\n\t\t);\n\t\tselectedRuleProfiles = [];\n\t}\n\n\tif (skipPrompts) {\n\t\tif (!isSilentMode()) {\n\t\t\tconsole.log('SKIPPING PROMPTS - Using defaults or provided values');\n\t\t}\n\n\t\t// Use provided options or defaults\n\t\tconst projectName = options.name || 'task-master-project';\n\t\tconst projectDescription =\n\t\t\toptions.description || 'A project managed with Task Master AI';\n\t\tconst projectVersion = options.version || '0.1.0';\n\t\tconst authorName = options.author || 'Vibe coder';\n\t\tconst dryRun = options.dryRun || false;\n\t\tconst addAliases =\n\t\t\toptions.addAliases !== undefined ? options.addAliases : true; // Default to true if not specified\n\t\tconst initGit = options.initGit !== undefined ? options.initGit : true; // Default to true if not specified\n\t\tconst storeTasksInGit =\n\t\t\toptions.storeTasksInGit !== undefined ? options.storeTasksInGit : true; // Default to true if not specified\n\n\t\tif (dryRun) {\n\t\t\tlog('info', 'DRY RUN MODE: No files will be modified');\n\t\t\tlog('info', 'Would initialize Task Master project');\n\t\t\tlog('info', 'Would create/update necessary project files');\n\n\t\t\t// Show flag-specific behavior\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`${addAliases ? 'Would add shell aliases (tm, taskmaster)' : 'Would skip shell aliases'}`\n\t\t\t);\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`${initGit ? 'Would initialize Git repository' : 'Would skip Git initialization'}`\n\t\t\t);\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`${storeTasksInGit ? 'Would store tasks in Git' : 'Would exclude tasks from Git'}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\tdryRun: true\n\t\t\t};\n\t\t}\n\n\t\tcreateProjectStructure(\n\t\t\taddAliases,\n\t\t\tinitGit,\n\t\t\tstoreTasksInGit,\n\t\t\tdryRun,\n\t\t\toptions,\n\t\t\tselectedRuleProfiles\n\t\t);\n\t} else {\n\t\t// Interactive logic\n\t\tlog('info', 'Required options not provided, proceeding with prompts.');\n\n\t\ttry {\n\t\t\tconst rl = readline.createInterface({\n\t\t\t\tinput: process.stdin,\n\t\t\t\toutput: process.stdout\n\t\t\t});\n\t\t\t// Prompt for shell aliases (skip if --aliases or --no-aliases flag was provided)\n\t\t\tlet addAliasesPrompted = true; // Default to true\n\t\t\tif (options.addAliases !== undefined) {\n\t\t\t\taddAliasesPrompted = options.addAliases; // Use flag value if provided\n\t\t\t} else {\n\t\t\t\tconst addAliasesInput = await promptQuestion(\n\t\t\t\t\trl,\n\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t'Add shell aliases for task-master? This lets you type \"tm\" instead of \"task-master\" (Y/n): '\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\taddAliasesPrompted = addAliasesInput.trim().toLowerCase() !== 'n';\n\t\t\t}\n\n\t\t\t// Prompt for Git initialization (skip if --git or --no-git flag was provided)\n\t\t\tlet initGitPrompted = true; // Default to true\n\t\t\tif (options.initGit !== undefined) {\n\t\t\t\tinitGitPrompted = options.initGit; // Use flag value if provided\n\t\t\t} else {\n\t\t\t\tconst gitInitInput = await promptQuestion(\n\t\t\t\t\trl,\n\t\t\t\t\tchalk.cyan('Initialize a Git repository in project root? (Y/n): ')\n\t\t\t\t);\n\t\t\t\tinitGitPrompted = gitInitInput.trim().toLowerCase() !== 'n';\n\t\t\t}\n\n\t\t\t// Prompt for Git tasks storage (skip if --git-tasks or --no-git-tasks flag was provided)\n\t\t\tlet storeGitPrompted = true; // Default to true\n\t\t\tif (options.storeTasksInGit !== undefined) {\n\t\t\t\tstoreGitPrompted = options.storeTasksInGit; // Use flag value if provided\n\t\t\t} else {\n\t\t\t\tconst gitTasksInput = await promptQuestion(\n\t\t\t\t\trl,\n\t\t\t\t\tchalk.cyan(\n\t\t\t\t\t\t'Store tasks in Git (tasks.json and tasks/ directory)? (Y/n): '\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tstoreGitPrompted = gitTasksInput.trim().toLowerCase() !== 'n';\n\t\t\t}\n\n\t\t\t// Confirm settings...\n\t\t\tconsole.log('\\nTask Master Project settings:');\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue(\n\t\t\t\t\t'Add shell aliases (so you can use \"tm\" instead of \"task-master\"):'\n\t\t\t\t),\n\t\t\t\tchalk.white(addAliasesPrompted ? 'Yes' : 'No')\n\t\t\t);\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue('Initialize Git repository in project root:'),\n\t\t\t\tchalk.white(initGitPrompted ? 'Yes' : 'No')\n\t\t\t);\n\t\t\tconsole.log(\n\t\t\t\tchalk.blue('Store tasks in Git (tasks.json and tasks/ directory):'),\n\t\t\t\tchalk.white(storeGitPrompted ? 'Yes' : 'No')\n\t\t\t);\n\n\t\t\tconst confirmInput = await promptQuestion(\n\t\t\t\trl,\n\t\t\t\tchalk.yellow('\\nDo you want to continue with these settings? (Y/n): ')\n\t\t\t);\n\t\t\tconst shouldContinue = confirmInput.trim().toLowerCase() !== 'n';\n\n\t\t\tif (!shouldContinue) {\n\t\t\t\trl.close();\n\t\t\t\tlog('info', 'Project initialization cancelled by user');\n\t\t\t\tprocess.exit(0);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Only run interactive rules if rules flag not provided via command line\n\t\t\tif (options.rulesExplicitlyProvided) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`Using rule profiles provided via command line: ${selectedRuleProfiles.join(', ')}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst dryRun = options.dryRun || false;\n\n\t\t\tif (dryRun) {\n\t\t\t\tlog('info', 'DRY RUN MODE: No files will be modified');\n\t\t\t\tlog('info', 'Would initialize Task Master project');\n\t\t\t\tlog('info', 'Would create/update necessary project files');\n\n\t\t\t\t// Show flag-specific behavior\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${addAliasesPrompted ? 'Would add shell aliases (tm, taskmaster)' : 'Would skip shell aliases'}`\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${initGitPrompted ? 'Would initialize Git repository' : 'Would skip Git initialization'}`\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`${storeGitPrompted ? 'Would store tasks in Git' : 'Would exclude tasks from Git'}`\n\t\t\t\t);\n\n\t\t\t\treturn {\n\t\t\t\t\tdryRun: true\n\t\t\t\t};\n\t\t\t}\n\n\t\t\t// Create structure using only necessary values\n\t\t\tcreateProjectStructure(\n\t\t\t\taddAliasesPrompted,\n\t\t\t\tinitGitPrompted,\n\t\t\t\tstoreGitPrompted,\n\t\t\t\tdryRun,\n\t\t\t\toptions,\n\t\t\t\tselectedRuleProfiles\n\t\t\t);\n\t\t\trl.close();\n\t\t} catch (error) {\n\t\t\tif (rl) {\n\t\t\t\trl.close();\n\t\t\t}\n\t\t\tlog('error', `Error during initialization process: ${error.message}`);\n\t\t\tprocess.exit(1);\n\t\t}\n\t}\n}\n\n// Helper function to promisify readline question\nfunction promptQuestion(rl, question) {\n\treturn new Promise((resolve) => {\n\t\trl.question(question, (answer) => {\n\t\t\tresolve(answer);\n\t\t});\n\t});\n}\n\n// Function to create the project structure\nfunction createProjectStructure(\n\taddAliases,\n\tinitGit,\n\tstoreTasksInGit,\n\tdryRun,\n\toptions,\n\tselectedRuleProfiles = RULE_PROFILES\n) {\n\tconst targetDir = process.cwd();\n\tlog('info', `Initializing project in ${targetDir}`);\n\n\t// Create NEW .taskmaster directory structure (using constants)\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_TASKS_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_DOCS_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_REPORTS_DIR));\n\tensureDirectoryExists(path.join(targetDir, TASKMASTER_TEMPLATES_DIR));\n\n\t// Create initial state.json file for tag management\n\tcreateInitialStateFile(targetDir);\n\n\t// Copy template files with replacements\n\tconst replacements = {\n\t\tyear: new Date().getFullYear()\n\t};\n\n\t// Helper function to create rule profiles\n\tfunction _processSingleProfile(profileName) {\n\t\tconst profile = getRulesProfile(profileName);\n\t\tif (profile) {\n\t\t\tconvertAllRulesToProfileRules(targetDir, profile);\n\t\t\t// Also triggers MCP config setup (if applicable)\n\t\t} else {\n\t\t\tlog('warn', `Unknown rule profile: ${profileName}`);\n\t\t}\n\t}\n\n\t// Copy .env.example\n\tcopyTemplateFile(\n\t\t'env.example',\n\t\tpath.join(targetDir, ENV_EXAMPLE_FILE),\n\t\treplacements\n\t);\n\n\t// Copy config.json with project name to NEW location\n\tcopyTemplateFile(\n\t\t'config.json',\n\t\tpath.join(targetDir, TASKMASTER_CONFIG_FILE),\n\t\t{\n\t\t\t...replacements\n\t\t}\n\t);\n\n\t// Update config.json with correct maxTokens values from supported-models.json\n\tconst configPath = path.join(targetDir, TASKMASTER_CONFIG_FILE);\n\tif (updateConfigMaxTokens(configPath)) {\n\t\tlog('info', 'Updated config with correct maxTokens values');\n\t} else {\n\t\tlog('warn', 'Could not update maxTokens in config');\n\t}\n\n\t// Copy .gitignore with GitTasks preference\n\ttry {\n\t\tconst gitignoreTemplatePath = path.join(\n\t\t\t__dirname,\n\t\t\t'..',\n\t\t\t'assets',\n\t\t\t'gitignore'\n\t\t);\n\t\tconst templateContent = fs.readFileSync(gitignoreTemplatePath, 'utf8');\n\t\tmanageGitignoreFile(\n\t\t\tpath.join(targetDir, GITIGNORE_FILE),\n\t\t\ttemplateContent,\n\t\t\tstoreTasksInGit,\n\t\t\tlog\n\t\t);\n\t} catch (error) {\n\t\tlog('error', `Failed to create .gitignore: ${error.message}`);\n\t}\n\n\t// Copy example_prd.txt to NEW location\n\tcopyTemplateFile('example_prd.txt', path.join(targetDir, EXAMPLE_PRD_FILE));\n\n\t// Initialize git repository if git is available\n\ttry {\n\t\tif (initGit === false) {\n\t\t\tlog('info', 'Git initialization skipped due to --no-git flag.');\n\t\t} else if (initGit === true) {\n\t\t\tif (insideGitWorkTree()) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'Existing Git repository detected – skipping git init despite --git flag.'\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog('info', 'Initializing Git repository due to --git flag...');\n\t\t\t\texecSync('git init', { cwd: targetDir, stdio: 'ignore' });\n\t\t\t\tlog('success', 'Git repository initialized');\n\t\t\t}\n\t\t} else {\n\t\t\t// Default behavior when no flag is provided (from interactive prompt)\n\t\t\tif (insideGitWorkTree()) {\n\t\t\t\tlog('info', 'Existing Git repository detected – skipping git init.');\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'No Git repository detected. Initializing one in project root...'\n\t\t\t\t);\n\t\t\t\texecSync('git init', { cwd: targetDir, stdio: 'ignore' });\n\t\t\t\tlog('success', 'Git repository initialized');\n\t\t\t}\n\t\t}\n\t} catch (error) {\n\t\tlog('warn', 'Git not available, skipping repository initialization');\n\t}\n\n\t// Only run the manual transformer if rules were provided via flags.\n\t// The interactive `rules --setup` wizard handles its own installation.\n\tif (options.rulesExplicitlyProvided || options.yes) {\n\t\tlog('info', 'Generating profile rules from command-line flags...');\n\t\tfor (const profileName of selectedRuleProfiles) {\n\t\t\t_processSingleProfile(profileName);\n\t\t}\n\t}\n\n\t// Add shell aliases if requested\n\tif (addAliases) {\n\t\taddShellAliases();\n\t}\n\n\t// Run npm install automatically\n\tconst npmInstallOptions = {\n\t\tcwd: targetDir,\n\t\t// Default to inherit for interactive CLI, change if silent\n\t\tstdio: 'inherit'\n\t};\n\n\tif (isSilentMode()) {\n\t\t// If silent (MCP mode), suppress npm install output\n\t\tnpmInstallOptions.stdio = 'ignore';\n\t\tlog('info', 'Running npm install silently...'); // Log our own message\n\t} else {\n\t\t// Interactive mode, show the boxen message\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Installing dependencies...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: 0.5,\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t}\n\n\t// === Add Rule Profiles Setup Step ===\n\tif (\n\t\t!isSilentMode() &&\n\t\t!dryRun &&\n\t\t!options?.yes &&\n\t\t!options.rulesExplicitlyProvided\n\t) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Configuring Rule Profiles...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: { top: 1, bottom: 0.5 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Running interactive rules setup. Please select which rule profiles to include.'\n\t\t);\n\t\ttry {\n\t\t\t// Correct command confirmed by you.\n\t\t\texecSync('npx task-master rules --setup', {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: targetDir\n\t\t\t});\n\t\t\tlog('success', 'Rule profiles configured.');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to configure rule profiles:', error.message);\n\t\t\tlog('warn', 'You may need to run \"task-master rules --setup\" manually.');\n\t\t}\n\t} else if (isSilentMode() || dryRun || options?.yes) {\n\t\t// This branch can log why setup was skipped, similar to the model setup logic.\n\t\tif (options.rulesExplicitlyProvided) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'Skipping interactive rules setup because --rules flag was used.'\n\t\t\t);\n\t\t} else {\n\t\t\tlog('info', 'Skipping interactive rules setup in non-interactive mode.');\n\t\t}\n\t}\n\t// =====================================\n\n\t// === Add Response Language Step ===\n\tif (!isSilentMode() && !dryRun && !options?.yes) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Configuring Response Language...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: { top: 1, bottom: 0.5 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Running interactive response language setup. Please input your preferred language.'\n\t\t);\n\t\ttry {\n\t\t\texecSync('npx task-master lang --setup', {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: targetDir\n\t\t\t});\n\t\t\tlog('success', 'Response Language configured.');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to configure response language:', error.message);\n\t\t\tlog('warn', 'You may need to run \"task-master lang --setup\" manually.');\n\t\t}\n\t} else if (isSilentMode() && !dryRun) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Skipping interactive response language setup in silent (MCP) mode.'\n\t\t);\n\t\tlog(\n\t\t\t'warn',\n\t\t\t'Please configure response language using \"task-master models --set-response-language\" or the \"models\" MCP tool.'\n\t\t);\n\t} else if (dryRun) {\n\t\tlog('info', 'DRY RUN: Skipping interactive response language setup.');\n\t}\n\t// =====================================\n\n\t// === Add Model Configuration Step ===\n\tif (!isSilentMode() && !dryRun && !options?.yes) {\n\t\tconsole.log(\n\t\t\tboxen(chalk.cyan('Configuring AI Models...'), {\n\t\t\t\tpadding: 0.5,\n\t\t\t\tmargin: { top: 1, bottom: 0.5 },\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tborderColor: 'blue'\n\t\t\t})\n\t\t);\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Running interactive model setup. Please select your preferred AI models.'\n\t\t);\n\t\ttry {\n\t\t\texecSync('npx task-master models --setup', {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: targetDir\n\t\t\t});\n\t\t\tlog('success', 'AI Models configured.');\n\t\t} catch (error) {\n\t\t\tlog('error', 'Failed to configure AI models:', error.message);\n\t\t\tlog('warn', 'You may need to run \"task-master models --setup\" manually.');\n\t\t}\n\t} else if (isSilentMode() && !dryRun) {\n\t\tlog('info', 'Skipping interactive model setup in silent (MCP) mode.');\n\t\tlog(\n\t\t\t'warn',\n\t\t\t'Please configure AI models using \"task-master models --set-...\" or the \"models\" MCP tool.'\n\t\t);\n\t} else if (dryRun) {\n\t\tlog('info', 'DRY RUN: Skipping interactive model setup.');\n\t} else if (options?.yes) {\n\t\tlog('info', 'Skipping interactive model setup due to --yes flag.');\n\t\tlog(\n\t\t\t'info',\n\t\t\t'Default AI models will be used. You can configure different models later using \"task-master models --setup\" or \"task-master models --set-...\" commands.'\n\t\t);\n\t}\n\t// ====================================\n\n\t// Add shell aliases if requested\n\tif (addAliases && !dryRun) {\n\t\tlog('info', 'Adding shell aliases...');\n\t\tconst aliasResult = addShellAliases();\n\t\tif (aliasResult) {\n\t\t\tlog('success', 'Shell aliases added successfully');\n\t\t}\n\t} else if (addAliases && dryRun) {\n\t\tlog('info', 'DRY RUN: Would add shell aliases (tm, taskmaster)');\n\t}\n\n\t// Display success message\n\tif (!isSilentMode()) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\t`${warmGradient.multiline(\n\t\t\t\t\tfiglet.textSync('Success!', { font: 'Standard' })\n\t\t\t\t)}\\n${chalk.green('Project initialized successfully!')}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tmargin: 1,\n\t\t\t\t\tborderStyle: 'double',\n\t\t\t\t\tborderColor: 'green'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n\n\t// Display next steps in a nice box\n\tif (!isSilentMode()) {\n\t\tconsole.log(\n\t\t\tboxen(\n\t\t\t\t`${chalk.cyan.bold('Things you should do next:')}\\n\\n${chalk.white('1. ')}${chalk.yellow(\n\t\t\t\t\t'Configure AI models (if needed) and add API keys to `.env`'\n\t\t\t\t)}\\n${chalk.white(' ├─ ')}${chalk.dim('Models: Use `task-master models` commands')}\\n${chalk.white(' └─ ')}${chalk.dim(\n\t\t\t\t\t'Keys: Add provider API keys to .env (or inside the MCP config file i.e. .cursor/mcp.json)'\n\t\t\t\t)}\\n${chalk.white('2. ')}${chalk.yellow(\n\t\t\t\t\t'Discuss your idea with AI and ask for a PRD using example_prd.txt, and save it to scripts/PRD.txt'\n\t\t\t\t)}\\n${chalk.white('3. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor Agent (or run CLI) to parse your PRD and generate initial tasks:'\n\t\t\t\t)}\\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('parse_prd')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master parse-prd scripts/prd.txt')}\\n${chalk.white('4. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to analyze the complexity of the tasks in your PRD using research'\n\t\t\t\t)}\\n${chalk.white(' └─ ')}${chalk.dim('MCP Tool: ')}${chalk.cyan('analyze_project_complexity')}${chalk.dim(' | CLI: ')}${chalk.cyan('task-master analyze-complexity')}\\n${chalk.white('5. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to expand all of your tasks using the complexity analysis'\n\t\t\t\t)}\\n${chalk.white('6. ')}${chalk.yellow('Ask Cursor to begin working on the next task')}\\n${chalk.white('7. ')}${chalk.yellow(\n\t\t\t\t\t'Add new tasks anytime using the add-task command or MCP tool'\n\t\t\t\t)}\\n${chalk.white('8. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to set the status of one or many tasks/subtasks at a time. Use the task id from the task lists.'\n\t\t\t\t)}\\n${chalk.white('9. ')}${chalk.yellow(\n\t\t\t\t\t'Ask Cursor to update all tasks from a specific task id based on new learnings or pivots in your project.'\n\t\t\t\t)}\\n${chalk.white('10. ')}${chalk.green.bold('Ship it!')}\\n\\n${chalk.dim(\n\t\t\t\t\t'* Review the README.md file to learn how to use other commands via Cursor Agent.'\n\t\t\t\t)}\\n${chalk.dim(\n\t\t\t\t\t'* Use the task-master command without arguments to see all available commands.'\n\t\t\t\t)}`,\n\t\t\t\t{\n\t\t\t\t\tpadding: 1,\n\t\t\t\t\tmargin: 1,\n\t\t\t\t\tborderStyle: 'round',\n\t\t\t\t\tborderColor: 'yellow',\n\t\t\t\t\ttitle: 'Getting Started',\n\t\t\t\t\ttitleAlignment: 'center'\n\t\t\t\t}\n\t\t\t)\n\t\t);\n\t}\n}\n\n// Ensure necessary functions are exported\nexport { initializeProject, log };\n"], ["/claude-task-master/mcp-server/src/tools/add-subtask.js", "/**\n * tools/add-subtask.js\n * Tool for adding subtasks to existing tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addSubtaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the addSubtask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddSubtaskTool(server) {\n\tserver.addTool({\n\t\tname: 'add_subtask',\n\t\tdescription: 'Add a subtask to an existing task',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('Parent task ID (required)'),\n\t\t\ttaskId: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Existing task ID to convert to subtask'),\n\t\t\ttitle: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Title for the new subtask (when creating a new subtask)'),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Description for the new subtask'),\n\t\t\tdetails: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Implementation details for the new subtask'),\n\t\t\tstatus: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\"Status for the new subtask (default: 'pending')\"),\n\t\t\tdependencies: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Comma-separated list of dependency IDs for the new subtask'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tskipGenerate: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Skip regenerating task files'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Adding subtask with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await addSubtaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\ttaskId: args.taskId,\n\t\t\t\t\t\ttitle: args.title,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tdetails: args.details,\n\t\t\t\t\t\tstatus: args.status,\n\t\t\t\t\t\tdependencies: args.dependencies,\n\t\t\t\t\t\tskipGenerate: args.skipGenerate,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Subtask added successfully: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to add subtask: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error adding subtask',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in addSubtask tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/get-task.js", "/**\n * tools/get-task.js\n * Tool to get task details by ID\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { showTaskDirect } from '../core/task-master-core.js';\nimport {\n\tfindTasksPath,\n\tfindComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Custom processor function that removes allTasks from the response\n * @param {Object} data - The data returned from showTaskDirect\n * @returns {Object} - The processed data with allTasks removed\n */\nfunction processTaskResponse(data) {\n\tif (!data) return data;\n\n\t// If we have the expected structure with task and allTasks\n\tif (typeof data === 'object' && data !== null && data.id && data.title) {\n\t\t// If the data itself looks like the task object, return it\n\t\treturn data;\n\t} else if (data.task) {\n\t\treturn data.task;\n\t}\n\n\t// If structure is unexpected, return as is\n\treturn data;\n}\n\n/**\n * Register the get-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerShowTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'get_task',\n\t\tdescription: 'Get detailed information about a specific task',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'Task ID(s) to get (can be comma-separated for multiple tasks)'\n\t\t\t\t),\n\t\t\tstatus: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\"Filter subtasks by status (e.g., 'pending', 'done')\"),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file relative to project root'),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the project root directory (Optional, usually from session)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst { id, file, status, projectRoot } = args;\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Getting task details for ID: ${id}${status ? ` (filtering subtasks by status: ${status})` : ''} in root: ${projectRoot}`\n\t\t\t\t);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Resolve the path to tasks.json using the NORMALIZED projectRoot from args\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: projectRoot, file: file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function, passing the normalized projectRoot\n\t\t\t\t// Resolve the path to complexity report\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = findComplexityReportPath(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\t\tcomplexityReport: args.complexityReport,\n\t\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t\t},\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t}\n\t\t\t\tconst result = await showTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\t// Pass other relevant args\n\t\t\t\t\t\tid: id,\n\t\t\t\t\t\tstatus: status,\n\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully retrieved task details for ID: ${args.id}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to get task: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\t// Use our custom processor function\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error retrieving task details',\n\t\t\t\t\tprocessTaskResponse,\n\t\t\t\t\tprojectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in get-task tool: ${error.message}\\n${error.stack}`);\n\t\t\t\treturn createErrorResponse(`Failed to get task: ${error.message}`);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/response-language.js", "/**\n * response-language.js\n * Direct function for managing response language via MCP\n */\n\nimport { setResponseLanguage } from '../../../../scripts/modules/task-manager.js';\nimport {\n\tenableSilentMode,\n\tdisableSilentMode\n} from '../../../../scripts/modules/utils.js';\nimport { createLogWrapper } from '../../tools/utils.js';\n\nexport async function responseLanguageDirect(args, log, context = {}) {\n\tconst { projectRoot, language } = args;\n\tconst mcpLog = createLogWrapper(log);\n\n\tlog.info(\n\t\t`Executing response-language_direct with args: ${JSON.stringify(args)}`\n\t);\n\tlog.info(`Using project root: ${projectRoot}`);\n\n\ttry {\n\t\tenableSilentMode();\n\t\treturn setResponseLanguage(language, {\n\t\t\tmcpLog,\n\t\t\tprojectRoot\n\t\t});\n\t} catch (error) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'DIRECT_FUNCTION_ERROR',\n\t\t\t\tmessage: error.message,\n\t\t\t\tdetails: error.stack\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode();\n\t}\n}\n"], ["/claude-task-master/test-clean-tags.js", "import fs from 'fs';\nimport {\n\tcreateTag,\n\tlistTags\n} from './scripts/modules/task-manager/tag-management.js';\n\nconsole.log('=== Testing Tag Management with Clean File ===');\n\n// Create a clean test tasks.json file\nconst testTasksPath = './test-tasks.json';\nconst cleanData = {\n\tmaster: {\n\t\ttasks: [\n\t\t\t{ id: 1, title: 'Test Task 1', status: 'pending' },\n\t\t\t{ id: 2, title: 'Test Task 2', status: 'done' }\n\t\t],\n\t\tmetadata: {\n\t\t\tcreated: new Date().toISOString(),\n\t\t\tdescription: 'Master tag'\n\t\t}\n\t}\n};\n\n// Write clean test file\nfs.writeFileSync(testTasksPath, JSON.stringify(cleanData, null, 2));\nconsole.log('Created clean test file');\n\ntry {\n\t// Test creating a new tag\n\tconsole.log('\\n--- Testing createTag ---');\n\tawait createTag(\n\t\ttestTasksPath,\n\t\t'test-branch',\n\t\t{ copyFromCurrent: true, description: 'Test branch' },\n\t\t{ projectRoot: process.cwd() },\n\t\t'json'\n\t);\n\n\t// Read the file and check for corruption\n\tconst resultData = JSON.parse(fs.readFileSync(testTasksPath, 'utf8'));\n\tconsole.log('Keys in result file:', Object.keys(resultData));\n\tconsole.log('Has _rawTaggedData in file:', !!resultData._rawTaggedData);\n\n\tif (resultData._rawTaggedData) {\n\t\tconsole.log('❌ CORRUPTION DETECTED: _rawTaggedData found in file!');\n\t} else {\n\t\tconsole.log('✅ SUCCESS: No _rawTaggedData corruption in file');\n\t}\n\n\t// Test listing tags\n\tconsole.log('\\n--- Testing listTags ---');\n\tconst tagList = await listTags(\n\t\ttestTasksPath,\n\t\t{},\n\t\t{ projectRoot: process.cwd() },\n\t\t'json'\n\t);\n\tconsole.log(\n\t\t'Found tags:',\n\t\ttagList.tags.map((t) => t.name)\n\t);\n} catch (error) {\n\tconsole.error('Error during test:', error.message);\n} finally {\n\t// Clean up test file\n\tif (fs.existsSync(testTasksPath)) {\n\t\tfs.unlinkSync(testTasksPath);\n\t\tconsole.log('\\nCleaned up test file');\n\t}\n}\n"], ["/claude-task-master/scripts/modules/config-manager.js", "import fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport chalk from 'chalk';\nimport { z } from 'zod';\nimport { AI_COMMAND_NAMES } from '../../src/constants/commands.js';\nimport {\n\tLEGACY_CONFIG_FILE,\n\tTASKMASTER_DIR\n} from '../../src/constants/paths.js';\nimport {\n\tALL_PROVIDERS,\n\tCUSTOM_PROVIDERS,\n\tCUSTOM_PROVIDERS_ARRAY,\n\tVALIDATED_PROVIDERS\n} from '../../src/constants/providers.js';\nimport { findConfigPath } from '../../src/utils/path-utils.js';\nimport { findProjectRoot, isEmpty, log, resolveEnvVariable } from './utils.js';\n\n// Calculate __dirname in ESM\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\n// Load supported models from JSON file using the calculated __dirname\nlet MODEL_MAP;\ntry {\n\tconst supportedModelsRaw = fs.readFileSync(\n\t\tpath.join(__dirname, 'supported-models.json'),\n\t\t'utf-8'\n\t);\n\tMODEL_MAP = JSON.parse(supportedModelsRaw);\n} catch (error) {\n\tconsole.error(\n\t\tchalk.red(\n\t\t\t'FATAL ERROR: Could not load supported-models.json. Please ensure the file exists and is valid JSON.'\n\t\t),\n\t\terror\n\t);\n\tMODEL_MAP = {}; // Default to empty map on error to avoid crashing, though functionality will be limited\n\tprocess.exit(1); // Exit if models can't be loaded\n}\n\n// Default configuration values (used if config file is missing or incomplete)\nconst DEFAULTS = {\n\tmodels: {\n\t\tmain: {\n\t\t\tprovider: 'anthropic',\n\t\t\tmodelId: 'claude-3-7-sonnet-20250219',\n\t\t\tmaxTokens: 64000,\n\t\t\ttemperature: 0.2\n\t\t},\n\t\tresearch: {\n\t\t\tprovider: 'perplexity',\n\t\t\tmodelId: 'sonar-pro',\n\t\t\tmaxTokens: 8700,\n\t\t\ttemperature: 0.1\n\t\t},\n\t\tfallback: {\n\t\t\t// No default fallback provider/model initially\n\t\t\tprovider: 'anthropic',\n\t\t\tmodelId: 'claude-3-5-sonnet',\n\t\t\tmaxTokens: 8192, // Default parameters if fallback IS configured\n\t\t\ttemperature: 0.2\n\t\t}\n\t},\n\tglobal: {\n\t\tlogLevel: 'info',\n\t\tdebug: false,\n\t\tdefaultNumTasks: 10,\n\t\tdefaultSubtasks: 5,\n\t\tdefaultPriority: 'medium',\n\t\tprojectName: 'Task Master',\n\t\tollamaBaseURL: 'http://localhost:11434/api',\n\t\tbedrockBaseURL: 'https://bedrock.us-east-1.amazonaws.com',\n\t\tresponseLanguage: 'English'\n\t},\n\tclaudeCode: {}\n};\n\n// --- Internal Config Loading ---\nlet loadedConfig = null;\nlet loadedConfigRoot = null; // Track which root loaded the config\n\n// Custom Error for configuration issues\nclass ConfigurationError extends Error {\n\tconstructor(message) {\n\t\tsuper(message);\n\t\tthis.name = 'ConfigurationError';\n\t}\n}\n\nfunction _loadAndValidateConfig(explicitRoot = null) {\n\tconst defaults = DEFAULTS; // Use the defined defaults\n\tlet rootToUse = explicitRoot;\n\tlet configSource = explicitRoot\n\t\t? `explicit root (${explicitRoot})`\n\t\t: 'defaults (no root provided yet)';\n\n\t// ---> If no explicit root, TRY to find it <---\n\tif (!rootToUse) {\n\t\trootToUse = findProjectRoot();\n\t\tif (rootToUse) {\n\t\t\tconfigSource = `found root (${rootToUse})`;\n\t\t} else {\n\t\t\t// No root found, use current working directory as fallback\n\t\t\t// This prevents infinite loops during initialization\n\t\t\trootToUse = process.cwd();\n\t\t\tconfigSource = `current directory (${rootToUse}) - no project markers found`;\n\t\t}\n\t}\n\t// ---> End find project root logic <---\n\n\t// --- Find configuration file ---\n\tlet configPath = null;\n\tlet config = { ...defaults }; // Start with a deep copy of defaults\n\tlet configExists = false;\n\n\t// During initialization (no project markers), skip config file search entirely\n\tconst hasProjectMarkers =\n\t\tfs.existsSync(path.join(rootToUse, TASKMASTER_DIR)) ||\n\t\tfs.existsSync(path.join(rootToUse, LEGACY_CONFIG_FILE));\n\n\tif (hasProjectMarkers) {\n\t\t// Only try to find config if we have project markers\n\t\t// This prevents the repeated warnings during init\n\t\tconfigPath = findConfigPath(null, { projectRoot: rootToUse });\n\t}\n\n\tif (configPath) {\n\t\tconfigExists = true;\n\t\tconst isLegacy = configPath.endsWith(LEGACY_CONFIG_FILE);\n\n\t\ttry {\n\t\t\tconst rawData = fs.readFileSync(configPath, 'utf-8');\n\t\t\tconst parsedConfig = JSON.parse(rawData);\n\n\t\t\t// Deep merge parsed config onto defaults\n\t\t\tconfig = {\n\t\t\t\tmodels: {\n\t\t\t\t\tmain: { ...defaults.models.main, ...parsedConfig?.models?.main },\n\t\t\t\t\tresearch: {\n\t\t\t\t\t\t...defaults.models.research,\n\t\t\t\t\t\t...parsedConfig?.models?.research\n\t\t\t\t\t},\n\t\t\t\t\tfallback:\n\t\t\t\t\t\tparsedConfig?.models?.fallback?.provider &&\n\t\t\t\t\t\tparsedConfig?.models?.fallback?.modelId\n\t\t\t\t\t\t\t? { ...defaults.models.fallback, ...parsedConfig.models.fallback }\n\t\t\t\t\t\t\t: { ...defaults.models.fallback }\n\t\t\t\t},\n\t\t\t\tglobal: { ...defaults.global, ...parsedConfig?.global },\n\t\t\t\tclaudeCode: { ...defaults.claudeCode, ...parsedConfig?.claudeCode }\n\t\t\t};\n\t\t\tconfigSource = `file (${configPath})`; // Update source info\n\n\t\t\t// Issue deprecation warning if using legacy config file\n\t\t\tif (isLegacy) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found configuration in legacy location '${configPath}'. Please migrate to .taskmaster/config.json. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// --- Validation (Warn if file content is invalid) ---\n\t\t\t// Use log.warn for consistency\n\t\t\tif (!validateProvider(config.models.main.provider)) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Invalid main provider \"${config.models.main.provider}\" in ${configPath}. Falling back to default.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconfig.models.main = { ...defaults.models.main };\n\t\t\t}\n\t\t\tif (!validateProvider(config.models.research.provider)) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Invalid research provider \"${config.models.research.provider}\" in ${configPath}. Falling back to default.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconfig.models.research = { ...defaults.models.research };\n\t\t\t}\n\t\t\tif (\n\t\t\t\tconfig.models.fallback?.provider &&\n\t\t\t\t!validateProvider(config.models.fallback.provider)\n\t\t\t) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Invalid fallback provider \"${config.models.fallback.provider}\" in ${configPath}. Fallback model configuration will be ignored.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t\tconfig.models.fallback.provider = undefined;\n\t\t\t\tconfig.models.fallback.modelId = undefined;\n\t\t\t}\n\t\t\tif (config.claudeCode && !isEmpty(config.claudeCode)) {\n\t\t\t\tconfig.claudeCode = validateClaudeCodeSettings(config.claudeCode);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Use console.error for actual errors during parsing\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(\n\t\t\t\t\t`Error reading or parsing ${configPath}: ${error.message}. Using default configuration.`\n\t\t\t\t)\n\t\t\t);\n\t\t\tconfig = { ...defaults }; // Reset to defaults on parse error\n\t\t\tconfigSource = `defaults (parse error at ${configPath})`;\n\t\t}\n\t} else {\n\t\t// Config file doesn't exist at the determined rootToUse.\n\t\tif (explicitRoot) {\n\t\t\t// Only warn if an explicit root was *expected*.\n\t\t\tconsole.warn(\n\t\t\t\tchalk.yellow(\n\t\t\t\t\t`Warning: Configuration file not found at provided project root (${explicitRoot}). Using default configuration. Run 'task-master models --setup' to configure.`\n\t\t\t\t)\n\t\t\t);\n\t\t} else {\n\t\t\t// Don't warn about missing config during initialization\n\t\t\t// Only warn if this looks like an existing project (has .taskmaster dir or legacy config marker)\n\t\t\tconst hasTaskmasterDir = fs.existsSync(\n\t\t\t\tpath.join(rootToUse, TASKMASTER_DIR)\n\t\t\t);\n\t\t\tconst hasLegacyMarker = fs.existsSync(\n\t\t\t\tpath.join(rootToUse, LEGACY_CONFIG_FILE)\n\t\t\t);\n\n\t\t\tif (hasTaskmasterDir || hasLegacyMarker) {\n\t\t\t\tconsole.warn(\n\t\t\t\t\tchalk.yellow(\n\t\t\t\t\t\t`Warning: Configuration file not found at derived root (${rootToUse}). Using defaults.`\n\t\t\t\t\t)\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t\t// Keep config as defaults\n\t\tconfig = { ...defaults };\n\t\tconfigSource = `defaults (no config file found at ${rootToUse})`;\n\t}\n\n\treturn config;\n}\n\n/**\n * Gets the current configuration, loading it if necessary.\n * Handles MCP initialization context gracefully.\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @param {boolean} forceReload - Force reloading the config file.\n * @returns {object} The loaded configuration object.\n */\nfunction getConfig(explicitRoot = null, forceReload = false) {\n\t// Determine if a reload is necessary\n\tconst needsLoad =\n\t\t!loadedConfig ||\n\t\tforceReload ||\n\t\t(explicitRoot && explicitRoot !== loadedConfigRoot);\n\n\tif (needsLoad) {\n\t\tconst newConfig = _loadAndValidateConfig(explicitRoot); // _load handles null explicitRoot\n\n\t\t// Only update the global cache if loading was forced or if an explicit root\n\t\t// was provided (meaning we attempted to load a specific project's config).\n\t\t// We avoid caching the initial default load triggered without an explicitRoot.\n\t\tif (forceReload || explicitRoot) {\n\t\t\tloadedConfig = newConfig;\n\t\t\tloadedConfigRoot = explicitRoot; // Store the root used for this loaded config\n\t\t}\n\t\treturn newConfig; // Return the newly loaded/default config\n\t}\n\n\t// If no load was needed, return the cached config\n\treturn loadedConfig;\n}\n\n/**\n * Validates if a provider name is supported.\n * Custom providers (azure, vertex, bedrock, openrouter, ollama) are always allowed.\n * Validated providers must exist in the MODEL_MAP from supported-models.json.\n * @param {string} providerName The name of the provider.\n * @returns {boolean} True if the provider is valid, false otherwise.\n */\nfunction validateProvider(providerName) {\n\t// Custom providers are always allowed\n\tif (CUSTOM_PROVIDERS_ARRAY.includes(providerName)) {\n\t\treturn true;\n\t}\n\n\t// Validated providers must exist in MODEL_MAP\n\tif (VALIDATED_PROVIDERS.includes(providerName)) {\n\t\treturn !!(MODEL_MAP && MODEL_MAP[providerName]);\n\t}\n\n\t// Unknown providers are not allowed\n\treturn false;\n}\n\n/**\n * Optional: Validates if a modelId is known for a given provider based on MODEL_MAP.\n * This is a non-strict validation; an unknown model might still be valid.\n * @param {string} providerName The name of the provider.\n * @param {string} modelId The model ID.\n * @returns {boolean} True if the modelId is in the map for the provider, false otherwise.\n */\nfunction validateProviderModelCombination(providerName, modelId) {\n\t// If provider isn't even in our map, we can't validate the model\n\tif (!MODEL_MAP[providerName]) {\n\t\treturn true; // Allow unknown providers or those without specific model lists\n\t}\n\t// If the provider is known, check if the model is in its list OR if the list is empty (meaning accept any)\n\treturn (\n\t\tMODEL_MAP[providerName].length === 0 ||\n\t\t// Use .some() to check the 'id' property of objects in the array\n\t\tMODEL_MAP[providerName].some((modelObj) => modelObj.id === modelId)\n\t);\n}\n\n/**\n * Validates Claude Code AI provider custom settings\n * @param {object} settings The settings to validate\n * @returns {object} The validated settings\n */\nfunction validateClaudeCodeSettings(settings) {\n\t// Define the base settings schema without commandSpecific first\n\tconst BaseSettingsSchema = z.object({\n\t\tmaxTurns: z.number().int().positive().optional(),\n\t\tcustomSystemPrompt: z.string().optional(),\n\t\tappendSystemPrompt: z.string().optional(),\n\t\tpermissionMode: z\n\t\t\t.enum(['default', 'acceptEdits', 'plan', 'bypassPermissions'])\n\t\t\t.optional(),\n\t\tallowedTools: z.array(z.string()).optional(),\n\t\tdisallowedTools: z.array(z.string()).optional(),\n\t\tmcpServers: z\n\t\t\t.record(\n\t\t\t\tz.string(),\n\t\t\t\tz.object({\n\t\t\t\t\ttype: z.enum(['stdio', 'sse']).optional(),\n\t\t\t\t\tcommand: z.string(),\n\t\t\t\t\targs: z.array(z.string()).optional(),\n\t\t\t\t\tenv: z.record(z.string()).optional(),\n\t\t\t\t\turl: z.string().url().optional(),\n\t\t\t\t\theaders: z.record(z.string()).optional()\n\t\t\t\t})\n\t\t\t)\n\t\t\t.optional()\n\t});\n\n\t// Define CommandSpecificSchema using the base schema\n\tconst CommandSpecificSchema = z.record(\n\t\tz.enum(AI_COMMAND_NAMES),\n\t\tBaseSettingsSchema\n\t);\n\n\t// Define the full settings schema with commandSpecific\n\tconst SettingsSchema = BaseSettingsSchema.extend({\n\t\tcommandSpecific: CommandSpecificSchema.optional()\n\t});\n\n\tlet validatedSettings = {};\n\n\ttry {\n\t\tvalidatedSettings = SettingsSchema.parse(settings);\n\t} catch (error) {\n\t\tconsole.warn(\n\t\t\tchalk.yellow(\n\t\t\t\t`Warning: Invalid Claude Code settings in config: ${error.message}. Falling back to default.`\n\t\t\t)\n\t\t);\n\n\t\tvalidatedSettings = {};\n\t}\n\n\treturn validatedSettings;\n}\n\n// --- Claude Code Settings Getters ---\n\nfunction getClaudeCodeSettings(explicitRoot = null, forceReload = false) {\n\tconst config = getConfig(explicitRoot, forceReload);\n\t// Ensure Claude Code defaults are applied if Claude Code section is missing\n\treturn { ...DEFAULTS.claudeCode, ...(config?.claudeCode || {}) };\n}\n\nfunction getClaudeCodeSettingsForCommand(\n\tcommandName,\n\texplicitRoot = null,\n\tforceReload = false\n) {\n\tconst settings = getClaudeCodeSettings(explicitRoot, forceReload);\n\tconst commandSpecific = settings?.commandSpecific || {};\n\treturn { ...settings, ...commandSpecific[commandName] };\n}\n\n// --- Role-Specific Getters ---\n\nfunction getModelConfigForRole(role, explicitRoot = null) {\n\tconst config = getConfig(explicitRoot);\n\tconst roleConfig = config?.models?.[role];\n\tif (!roleConfig) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`No model configuration found for role: ${role}. Returning default.`\n\t\t);\n\t\treturn DEFAULTS.models[role] || {};\n\t}\n\treturn roleConfig;\n}\n\nfunction getMainProvider(explicitRoot = null) {\n\treturn getModelConfigForRole('main', explicitRoot).provider;\n}\n\nfunction getMainModelId(explicitRoot = null) {\n\treturn getModelConfigForRole('main', explicitRoot).modelId;\n}\n\nfunction getMainMaxTokens(explicitRoot = null) {\n\t// Directly return value from config (which includes defaults)\n\treturn getModelConfigForRole('main', explicitRoot).maxTokens;\n}\n\nfunction getMainTemperature(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('main', explicitRoot).temperature;\n}\n\nfunction getResearchProvider(explicitRoot = null) {\n\treturn getModelConfigForRole('research', explicitRoot).provider;\n}\n\nfunction getResearchModelId(explicitRoot = null) {\n\treturn getModelConfigForRole('research', explicitRoot).modelId;\n}\n\nfunction getResearchMaxTokens(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('research', explicitRoot).maxTokens;\n}\n\nfunction getResearchTemperature(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('research', explicitRoot).temperature;\n}\n\nfunction getFallbackProvider(explicitRoot = null) {\n\t// Directly return value from config (will be undefined if not set)\n\treturn getModelConfigForRole('fallback', explicitRoot).provider;\n}\n\nfunction getFallbackModelId(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('fallback', explicitRoot).modelId;\n}\n\nfunction getFallbackMaxTokens(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('fallback', explicitRoot).maxTokens;\n}\n\nfunction getFallbackTemperature(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getModelConfigForRole('fallback', explicitRoot).temperature;\n}\n\n// --- Global Settings Getters ---\n\nfunction getGlobalConfig(explicitRoot = null) {\n\tconst config = getConfig(explicitRoot);\n\t// Ensure global defaults are applied if global section is missing\n\treturn { ...DEFAULTS.global, ...(config?.global || {}) };\n}\n\nfunction getLogLevel(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).logLevel.toLowerCase();\n}\n\nfunction getDebugFlag(explicitRoot = null) {\n\t// Directly return value from config, ensure boolean\n\treturn getGlobalConfig(explicitRoot).debug === true;\n}\n\nfunction getDefaultSubtasks(explicitRoot = null) {\n\t// Directly return value from config, ensure integer\n\tconst val = getGlobalConfig(explicitRoot).defaultSubtasks;\n\tconst parsedVal = parseInt(val, 10);\n\treturn Number.isNaN(parsedVal) ? DEFAULTS.global.defaultSubtasks : parsedVal;\n}\n\nfunction getDefaultNumTasks(explicitRoot = null) {\n\tconst val = getGlobalConfig(explicitRoot).defaultNumTasks;\n\tconst parsedVal = parseInt(val, 10);\n\treturn Number.isNaN(parsedVal) ? DEFAULTS.global.defaultNumTasks : parsedVal;\n}\n\nfunction getDefaultPriority(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).defaultPriority;\n}\n\nfunction getProjectName(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).projectName;\n}\n\nfunction getOllamaBaseURL(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).ollamaBaseURL;\n}\n\nfunction getAzureBaseURL(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).azureBaseURL;\n}\n\nfunction getBedrockBaseURL(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).bedrockBaseURL;\n}\n\n/**\n * Gets the Google Cloud project ID for Vertex AI from configuration\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {string|null} The project ID or null if not configured\n */\nfunction getVertexProjectId(explicitRoot = null) {\n\t// Return value from config\n\treturn getGlobalConfig(explicitRoot).vertexProjectId;\n}\n\n/**\n * Gets the Google Cloud location for Vertex AI from configuration\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {string} The location or default value of \"us-central1\"\n */\nfunction getVertexLocation(explicitRoot = null) {\n\t// Return value from config or default\n\treturn getGlobalConfig(explicitRoot).vertexLocation || 'us-central1';\n}\n\nfunction getResponseLanguage(explicitRoot = null) {\n\t// Directly return value from config\n\treturn getGlobalConfig(explicitRoot).responseLanguage;\n}\n\n/**\n * Gets model parameters (maxTokens, temperature) for a specific role,\n * considering model-specific overrides from supported-models.json.\n * @param {string} role - The role ('main', 'research', 'fallback').\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {{maxTokens: number, temperature: number}}\n */\nfunction getParametersForRole(role, explicitRoot = null) {\n\tconst roleConfig = getModelConfigForRole(role, explicitRoot);\n\tconst roleMaxTokens = roleConfig.maxTokens;\n\tconst roleTemperature = roleConfig.temperature;\n\tconst modelId = roleConfig.modelId;\n\tconst providerName = roleConfig.provider;\n\n\tlet effectiveMaxTokens = roleMaxTokens; // Start with the role's default\n\n\ttry {\n\t\t// Find the model definition in MODEL_MAP\n\t\tconst providerModels = MODEL_MAP[providerName];\n\t\tif (providerModels && Array.isArray(providerModels)) {\n\t\t\tconst modelDefinition = providerModels.find((m) => m.id === modelId);\n\n\t\t\t// Check if a model-specific max_tokens is defined and valid\n\t\t\tif (\n\t\t\t\tmodelDefinition &&\n\t\t\t\ttypeof modelDefinition.max_tokens === 'number' &&\n\t\t\t\tmodelDefinition.max_tokens > 0\n\t\t\t) {\n\t\t\t\tconst modelSpecificMaxTokens = modelDefinition.max_tokens;\n\t\t\t\t// Use the minimum of the role default and the model specific limit\n\t\t\t\teffectiveMaxTokens = Math.min(roleMaxTokens, modelSpecificMaxTokens);\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Applying model-specific max_tokens (${modelSpecificMaxTokens}) for ${modelId}. Effective limit: ${effectiveMaxTokens}`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`No valid model-specific max_tokens override found for ${modelId}. Using role default: ${roleMaxTokens}`\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\t// Special handling for custom OpenRouter models\n\t\t\tif (providerName === CUSTOM_PROVIDERS.OPENROUTER) {\n\t\t\t\t// Use a conservative default for OpenRouter models not in our list\n\t\t\t\tconst openrouterDefault = 32768;\n\t\t\t\teffectiveMaxTokens = Math.min(roleMaxTokens, openrouterDefault);\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`Custom OpenRouter model ${modelId} detected. Using conservative max_tokens: ${effectiveMaxTokens}`\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`No model definitions found for provider ${providerName} in MODEL_MAP. Using role default maxTokens: ${roleMaxTokens}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (lookupError) {\n\t\tlog(\n\t\t\t'warn',\n\t\t\t`Error looking up model-specific max_tokens for ${modelId}: ${lookupError.message}. Using role default: ${roleMaxTokens}`\n\t\t);\n\t\t// Fallback to role default on error\n\t\teffectiveMaxTokens = roleMaxTokens;\n\t}\n\n\treturn {\n\t\tmaxTokens: effectiveMaxTokens,\n\t\ttemperature: roleTemperature\n\t};\n}\n\n/**\n * Checks if the API key for a given provider is set in the environment.\n * Checks process.env first, then session.env if session is provided, then .env file if projectRoot provided.\n * @param {string} providerName - The name of the provider (e.g., 'openai', 'anthropic').\n * @param {object|null} [session=null] - The MCP session object (optional).\n * @param {string|null} [projectRoot=null] - The project root directory (optional, for .env file check).\n * @returns {boolean} True if the API key is set, false otherwise.\n */\nfunction isApiKeySet(providerName, session = null, projectRoot = null) {\n\t// Define the expected environment variable name for each provider\n\n\t// Providers that don't require API keys for authentication\n\tconst providersWithoutApiKeys = [\n\t\tCUSTOM_PROVIDERS.OLLAMA,\n\t\tCUSTOM_PROVIDERS.BEDROCK,\n\t\tCUSTOM_PROVIDERS.MCP,\n\t\tCUSTOM_PROVIDERS.GEMINI_CLI\n\t];\n\n\tif (providersWithoutApiKeys.includes(providerName?.toLowerCase())) {\n\t\treturn true; // Indicate key status is effectively \"OK\"\n\t}\n\n\t// Claude Code doesn't require an API key\n\tif (providerName?.toLowerCase() === 'claude-code') {\n\t\treturn true; // No API key needed\n\t}\n\n\tconst keyMap = {\n\t\topenai: 'OPENAI_API_KEY',\n\t\tanthropic: 'ANTHROPIC_API_KEY',\n\t\tgoogle: 'GOOGLE_API_KEY',\n\t\tperplexity: 'PERPLEXITY_API_KEY',\n\t\tmistral: 'MISTRAL_API_KEY',\n\t\tazure: 'AZURE_OPENAI_API_KEY',\n\t\topenrouter: 'OPENROUTER_API_KEY',\n\t\txai: 'XAI_API_KEY',\n\t\tgroq: 'GROQ_API_KEY',\n\t\tvertex: 'GOOGLE_API_KEY', // Vertex uses the same key as Google\n\t\t'claude-code': 'CLAUDE_CODE_API_KEY', // Not actually used, but included for consistency\n\t\tbedrock: 'AWS_ACCESS_KEY_ID' // Bedrock uses AWS credentials\n\t\t// Add other providers as needed\n\t};\n\n\tconst providerKey = providerName?.toLowerCase();\n\tif (!providerKey || !keyMap[providerKey]) {\n\t\tlog('warn', `Unknown provider name: ${providerName} in isApiKeySet check.`);\n\t\treturn false;\n\t}\n\n\tconst envVarName = keyMap[providerKey];\n\tconst apiKeyValue = resolveEnvVariable(envVarName, session, projectRoot);\n\n\t// Check if the key exists, is not empty, and is not a placeholder\n\treturn (\n\t\tapiKeyValue &&\n\t\tapiKeyValue.trim() !== '' &&\n\t\t!/YOUR_.*_API_KEY_HERE/.test(apiKeyValue) && // General placeholder check\n\t\t!apiKeyValue.includes('KEY_HERE')\n\t); // Another common placeholder pattern\n}\n\n/**\n * Checks the API key status within .cursor/mcp.json for a given provider.\n * Reads the mcp.json file, finds the taskmaster-ai server config, and checks the relevant env var.\n * @param {string} providerName The name of the provider.\n * @param {string|null} projectRoot - Optional explicit path to the project root.\n * @returns {boolean} True if the key exists and is not a placeholder, false otherwise.\n */\nfunction getMcpApiKeyStatus(providerName, projectRoot = null) {\n\tconst rootDir = projectRoot || findProjectRoot(); // Use existing root finding\n\tif (!rootDir) {\n\t\tconsole.warn(\n\t\t\tchalk.yellow('Warning: Could not find project root to check mcp.json.')\n\t\t);\n\t\treturn false; // Cannot check without root\n\t}\n\tconst mcpConfigPath = path.join(rootDir, '.cursor', 'mcp.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\t// console.warn(chalk.yellow('Warning: .cursor/mcp.json not found.'));\n\t\treturn false; // File doesn't exist\n\t}\n\n\ttry {\n\t\tconst mcpConfigRaw = fs.readFileSync(mcpConfigPath, 'utf-8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigRaw);\n\n\t\tconst mcpEnv =\n\t\t\tmcpConfig?.mcpServers?.['task-master-ai']?.env ||\n\t\t\tmcpConfig?.mcpServers?.['taskmaster-ai']?.env;\n\t\tif (!mcpEnv) {\n\t\t\treturn false;\n\t\t}\n\n\t\tlet apiKeyToCheck = null;\n\t\tlet placeholderValue = null;\n\n\t\tswitch (providerName) {\n\t\t\tcase 'anthropic':\n\t\t\t\tapiKeyToCheck = mcpEnv.ANTHROPIC_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_ANTHROPIC_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'openai':\n\t\t\t\tapiKeyToCheck = mcpEnv.OPENAI_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_OPENAI_API_KEY_HERE'; // Assuming placeholder matches OPENAI\n\t\t\t\tbreak;\n\t\t\tcase 'openrouter':\n\t\t\t\tapiKeyToCheck = mcpEnv.OPENROUTER_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_OPENROUTER_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'google':\n\t\t\t\tapiKeyToCheck = mcpEnv.GOOGLE_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_GOOGLE_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'perplexity':\n\t\t\t\tapiKeyToCheck = mcpEnv.PERPLEXITY_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_PERPLEXITY_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'xai':\n\t\t\t\tapiKeyToCheck = mcpEnv.XAI_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_XAI_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'groq':\n\t\t\t\tapiKeyToCheck = mcpEnv.GROQ_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_GROQ_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'ollama':\n\t\t\t\treturn true; // No key needed\n\t\t\tcase 'claude-code':\n\t\t\t\treturn true; // No key needed\n\t\t\tcase 'mistral':\n\t\t\t\tapiKeyToCheck = mcpEnv.MISTRAL_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_MISTRAL_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'azure':\n\t\t\t\tapiKeyToCheck = mcpEnv.AZURE_OPENAI_API_KEY;\n\t\t\t\tplaceholderValue = 'YOUR_AZURE_OPENAI_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'vertex':\n\t\t\t\tapiKeyToCheck = mcpEnv.GOOGLE_API_KEY; // Vertex uses Google API key\n\t\t\t\tplaceholderValue = 'YOUR_GOOGLE_API_KEY_HERE';\n\t\t\t\tbreak;\n\t\t\tcase 'bedrock':\n\t\t\t\tapiKeyToCheck = mcpEnv.AWS_ACCESS_KEY_ID; // Bedrock uses AWS credentials\n\t\t\t\tplaceholderValue = 'YOUR_AWS_ACCESS_KEY_ID_HERE';\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\treturn false; // Unknown provider\n\t\t}\n\n\t\treturn !!apiKeyToCheck && !/KEY_HERE$/.test(apiKeyToCheck);\n\t} catch (error) {\n\t\tconsole.error(\n\t\t\tchalk.red(`Error reading or parsing .cursor/mcp.json: ${error.message}`)\n\t\t);\n\t\treturn false;\n\t}\n}\n\n/**\n * Gets a list of available models based on the MODEL_MAP.\n * @returns {Array<{id: string, name: string, provider: string, swe_score: number|null, cost_per_1m_tokens: {input: number|null, output: number|null}|null, allowed_roles: string[]}>}\n */\nfunction getAvailableModels() {\n\tconst available = [];\n\tfor (const [provider, models] of Object.entries(MODEL_MAP)) {\n\t\tif (models.length > 0) {\n\t\t\tmodels\n\t\t\t\t.filter((modelObj) => Boolean(modelObj.supported))\n\t\t\t\t.forEach((modelObj) => {\n\t\t\t\t\t// Basic name generation - can be improved\n\t\t\t\t\tconst modelId = modelObj.id;\n\t\t\t\t\tconst sweScore = modelObj.swe_score;\n\t\t\t\t\tconst cost = modelObj.cost_per_1m_tokens;\n\t\t\t\t\tconst allowedRoles = modelObj.allowed_roles || ['main', 'fallback'];\n\t\t\t\t\tconst nameParts = modelId\n\t\t\t\t\t\t.split('-')\n\t\t\t\t\t\t.map((p) => p.charAt(0).toUpperCase() + p.slice(1));\n\t\t\t\t\t// Handle specific known names better if needed\n\t\t\t\t\tlet name = nameParts.join(' ');\n\t\t\t\t\tif (modelId === 'claude-3.5-sonnet-20240620')\n\t\t\t\t\t\tname = 'Claude 3.5 Sonnet';\n\t\t\t\t\tif (modelId === 'claude-3-7-sonnet-20250219')\n\t\t\t\t\t\tname = 'Claude 3.7 Sonnet';\n\t\t\t\t\tif (modelId === 'gpt-4o') name = 'GPT-4o';\n\t\t\t\t\tif (modelId === 'gpt-4-turbo') name = 'GPT-4 Turbo';\n\t\t\t\t\tif (modelId === 'sonar-pro') name = 'Perplexity Sonar Pro';\n\t\t\t\t\tif (modelId === 'sonar-mini') name = 'Perplexity Sonar Mini';\n\n\t\t\t\t\tavailable.push({\n\t\t\t\t\t\tid: modelId,\n\t\t\t\t\t\tname: name,\n\t\t\t\t\t\tprovider: provider,\n\t\t\t\t\t\tswe_score: sweScore,\n\t\t\t\t\t\tcost_per_1m_tokens: cost,\n\t\t\t\t\t\tallowed_roles: allowedRoles,\n\t\t\t\t\t\tmax_tokens: modelObj.max_tokens\n\t\t\t\t\t});\n\t\t\t\t});\n\t\t} else {\n\t\t\t// For providers with empty lists (like ollama), maybe add a placeholder or skip\n\t\t\tavailable.push({\n\t\t\t\tid: `[${provider}-any]`,\n\t\t\t\tname: `Any (${provider})`,\n\t\t\t\tprovider: provider\n\t\t\t});\n\t\t}\n\t}\n\treturn available;\n}\n\n/**\n * Writes the configuration object to the file.\n * @param {Object} config The configuration object to write.\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {boolean} True if successful, false otherwise.\n */\nfunction writeConfig(config, explicitRoot = null) {\n\t// ---> Determine root path reliably <---\n\tlet rootPath = explicitRoot;\n\tif (explicitRoot === null || explicitRoot === undefined) {\n\t\t// Logic matching _loadAndValidateConfig\n\t\tconst foundRoot = findProjectRoot(); // *** Explicitly call findProjectRoot ***\n\t\tif (!foundRoot) {\n\t\t\tconsole.error(\n\t\t\t\tchalk.red(\n\t\t\t\t\t'Error: Could not determine project root. Configuration not saved.'\n\t\t\t\t)\n\t\t\t);\n\t\t\treturn false;\n\t\t}\n\t\trootPath = foundRoot;\n\t}\n\t// ---> End determine root path logic <---\n\n\t// Use new config location: .taskmaster/config.json\n\tconst taskmasterDir = path.join(rootPath, '.taskmaster');\n\tconst configPath = path.join(taskmasterDir, 'config.json');\n\n\ttry {\n\t\t// Ensure .taskmaster directory exists\n\t\tif (!fs.existsSync(taskmasterDir)) {\n\t\t\tfs.mkdirSync(taskmasterDir, { recursive: true });\n\t\t}\n\n\t\tfs.writeFileSync(configPath, JSON.stringify(config, null, 2));\n\t\tloadedConfig = config; // Update the cache after successful write\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.error(\n\t\t\tchalk.red(\n\t\t\t\t`Error writing configuration to ${configPath}: ${error.message}`\n\t\t\t)\n\t\t);\n\t\treturn false;\n\t}\n}\n\n/**\n * Checks if a configuration file exists at the project root (new or legacy location)\n * @param {string|null} explicitRoot - Optional explicit path to the project root\n * @returns {boolean} True if the file exists, false otherwise\n */\nfunction isConfigFilePresent(explicitRoot = null) {\n\treturn findConfigPath(null, { projectRoot: explicitRoot }) !== null;\n}\n\n/**\n * Gets the user ID from the configuration.\n * @param {string|null} explicitRoot - Optional explicit path to the project root.\n * @returns {string|null} The user ID or null if not found.\n */\nfunction getUserId(explicitRoot = null) {\n\tconst config = getConfig(explicitRoot);\n\tif (!config.global) {\n\t\tconfig.global = {}; // Ensure global object exists\n\t}\n\tif (!config.global.userId) {\n\t\tconfig.global.userId = '1234567890';\n\t\t// Attempt to write the updated config.\n\t\t// It's important that writeConfig correctly resolves the path\n\t\t// using explicitRoot, similar to how getConfig does.\n\t\tconst success = writeConfig(config, explicitRoot);\n\t\tif (!success) {\n\t\t\t// Log an error or handle the failure to write,\n\t\t\t// though for now, we'll proceed with the in-memory default.\n\t\t\tlog(\n\t\t\t\t'warning',\n\t\t\t\t'Failed to write updated configuration with new userId. Please let the developers know.'\n\t\t\t);\n\t\t}\n\t}\n\treturn config.global.userId;\n}\n\n/**\n * Gets a list of all known provider names (both validated and custom).\n * @returns {string[]} An array of all provider names.\n */\nfunction getAllProviders() {\n\treturn ALL_PROVIDERS;\n}\n\nfunction getBaseUrlForRole(role, explicitRoot = null) {\n\tconst roleConfig = getModelConfigForRole(role, explicitRoot);\n\tif (roleConfig && typeof roleConfig.baseURL === 'string') {\n\t\treturn roleConfig.baseURL;\n\t}\n\tconst provider = roleConfig?.provider;\n\tif (provider) {\n\t\tconst envVarName = `${provider.toUpperCase()}_BASE_URL`;\n\t\treturn resolveEnvVariable(envVarName, null, explicitRoot);\n\t}\n\treturn undefined;\n}\n\n// Export the providers without API keys array for use in other modules\nexport const providersWithoutApiKeys = [\n\tCUSTOM_PROVIDERS.OLLAMA,\n\tCUSTOM_PROVIDERS.BEDROCK,\n\tCUSTOM_PROVIDERS.GEMINI_CLI,\n\tCUSTOM_PROVIDERS.MCP\n];\n\nexport {\n\t// Core config access\n\tgetConfig,\n\twriteConfig,\n\tConfigurationError,\n\tisConfigFilePresent,\n\t// Claude Code settings\n\tgetClaudeCodeSettings,\n\tgetClaudeCodeSettingsForCommand,\n\t// Validation\n\tvalidateProvider,\n\tvalidateProviderModelCombination,\n\tvalidateClaudeCodeSettings,\n\tVALIDATED_PROVIDERS,\n\tCUSTOM_PROVIDERS,\n\tALL_PROVIDERS,\n\tMODEL_MAP,\n\tgetAvailableModels,\n\t// Role-specific getters (No env var overrides)\n\tgetMainProvider,\n\tgetMainModelId,\n\tgetMainMaxTokens,\n\tgetMainTemperature,\n\tgetResearchProvider,\n\tgetResearchModelId,\n\tgetResearchMaxTokens,\n\tgetResearchTemperature,\n\tgetFallbackProvider,\n\tgetFallbackModelId,\n\tgetFallbackMaxTokens,\n\tgetFallbackTemperature,\n\tgetBaseUrlForRole,\n\t// Global setting getters (No env var overrides)\n\tgetLogLevel,\n\tgetDebugFlag,\n\tgetDefaultNumTasks,\n\tgetDefaultSubtasks,\n\tgetDefaultPriority,\n\tgetProjectName,\n\tgetOllamaBaseURL,\n\tgetAzureBaseURL,\n\tgetBedrockBaseURL,\n\tgetResponseLanguage,\n\tgetParametersForRole,\n\tgetUserId,\n\t// API Key Checkers (still relevant)\n\tisApiKeySet,\n\tgetMcpApiKeyStatus,\n\t// ADD: Function to get all provider names\n\tgetAllProviders,\n\tgetVertexProjectId,\n\tgetVertexLocation\n};\n"], ["/claude-task-master/mcp-server/src/tools/remove-task.js", "/**\n * tools/remove-task.js\n * Tool to remove a task by ID\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { removeTaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the remove-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRemoveTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'remove_task',\n\t\tdescription: 'Remove a task or subtask permanently from the tasks list',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"ID of the task or subtask to remove (e.g., '5' or '5.2'). Can be comma-separated to update multiple tasks/subtasks at once.\"\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\tconfirm: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Whether to skip confirmation prompt (default: false)'),\n\t\t\ttag: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Specify which tag context to operate on. Defaults to the current active tag.'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Removing task(s) with ID(s): ${args.id}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tlog.info(`Using tasks file path: ${tasksJsonPath}`);\n\n\t\t\t\tconst result = await removeTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully removed task: ${args.id}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to remove task: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error removing task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in remove-task tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(`Failed to remove task: ${error.message}`);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/utils/path-utils.js", "/**\n * Path utility functions for Task Master\n * Provides centralized path resolution logic for both CLI and MCP use cases\n */\n\nimport path from 'path';\nimport fs from 'fs';\nimport {\n\tTASKMASTER_TASKS_FILE,\n\tLEGACY_TASKS_FILE,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_REPORTS_DIR,\n\tCOMPLEXITY_REPORT_FILE,\n\tTASKMASTER_CONFIG_FILE,\n\tLEGACY_CONFIG_FILE\n} from '../constants/paths.js';\nimport { getLoggerOrDefault } from './logger-utils.js';\n\n/**\n * Normalize project root to ensure it doesn't end with .taskmaster\n * This prevents double .taskmaster paths when using constants that include .taskmaster\n * @param {string} projectRoot - The project root path to normalize\n * @returns {string} - Normalized project root path\n */\nexport function normalizeProjectRoot(projectRoot) {\n\tif (!projectRoot) return projectRoot;\n\n\t// Ensure it's a string\n\tprojectRoot = String(projectRoot);\n\n\t// Split the path into segments\n\tconst segments = projectRoot.split(path.sep);\n\n\t// Find the index of .taskmaster segment\n\tconst taskmasterIndex = segments.findIndex(\n\t\t(segment) => segment === '.taskmaster'\n\t);\n\n\tif (taskmasterIndex !== -1) {\n\t\t// If .taskmaster is found, return everything up to but not including .taskmaster\n\t\tconst normalizedSegments = segments.slice(0, taskmasterIndex);\n\t\treturn normalizedSegments.join(path.sep) || path.sep;\n\t}\n\n\treturn projectRoot;\n}\n\n/**\n * Find the project root directory by looking for project markers\n * @param {string} startDir - Directory to start searching from\n * @returns {string|null} - Project root path or null if not found\n */\nexport function findProjectRoot(startDir = process.cwd()) {\n\tconst projectMarkers = [\n\t\t'.taskmaster',\n\t\tTASKMASTER_TASKS_FILE,\n\t\t'tasks.json',\n\t\tLEGACY_TASKS_FILE,\n\t\t'.git',\n\t\t'.svn',\n\t\t'package.json',\n\t\t'yarn.lock',\n\t\t'package-lock.json',\n\t\t'pnpm-lock.yaml'\n\t];\n\n\tlet currentDir = path.resolve(startDir);\n\tconst rootDir = path.parse(currentDir).root;\n\tconst maxDepth = 50; // Reasonable limit to prevent infinite loops\n\tlet depth = 0;\n\n\twhile (currentDir !== rootDir && depth < maxDepth) {\n\t\t// Check if current directory contains any project markers\n\t\tfor (const marker of projectMarkers) {\n\t\t\tconst markerPath = path.join(currentDir, marker);\n\t\t\tif (fs.existsSync(markerPath)) {\n\t\t\t\treturn currentDir;\n\t\t\t}\n\t\t}\n\t\tcurrentDir = path.dirname(currentDir);\n\t\tdepth++;\n\t}\n\n\t// Fallback to current working directory if no project root found\n\treturn process.cwd();\n}\n\n/**\n * Find the tasks.json file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object from MCP args (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved tasks.json path or null if not found\n */\nexport function findTasksPath(explicitPath = null, args = null, log = null) {\n\t// Use the passed logger if available, otherwise use the default logger\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. First determine project root to use as base for all path resolution\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 2. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 3. If explicit path is provided, resolve it relative to project root (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(projectRoot, explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit tasks path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit tasks path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 4. Check possible locations in order of preference\n\tconst possiblePaths = [\n\t\tpath.join(projectRoot, TASKMASTER_TASKS_FILE), // .taskmaster/tasks/tasks.json (NEW)\n\t\tpath.join(projectRoot, LEGACY_TASKS_FILE) // tasks/tasks.json (LEGACY)\n\t];\n\n\tfor (const tasksPath of possiblePaths) {\n\t\tif (fs.existsSync(tasksPath)) {\n\t\t\tlogger.info?.(`Found tasks file at: ${tasksPath}`);\n\n\t\t\t// Issue deprecation warning for legacy paths\n\t\t\tif (\n\t\t\t\ttasksPath.includes('tasks/tasks.json') &&\n\t\t\t\t!tasksPath.includes('.taskmaster')\n\t\t\t) {\n\t\t\t\tlogger.warn?.(\n\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found tasks.json in legacy location '${tasksPath}'. Please migrate to the new .taskmaster directory structure. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t);\n\t\t\t} else if (\n\t\t\t\ttasksPath.endsWith('tasks.json') &&\n\t\t\t\t!tasksPath.includes('.taskmaster') &&\n\t\t\t\t!tasksPath.includes('tasks/')\n\t\t\t) {\n\t\t\t\tlogger.warn?.(\n\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found tasks.json in legacy root location '${tasksPath}'. Please migrate to the new .taskmaster directory structure. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\treturn tasksPath;\n\t\t}\n\t}\n\n\tlogger.warn?.(`No tasks.json found in project: ${projectRoot}`);\n\treturn null;\n}\n\n/**\n * Find the PRD document file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved PRD document path or null if not found\n */\nexport function findPRDPath(explicitPath = null, args = null, log = null) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit PRD path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit PRD path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Check possible locations in order of preference\n\tconst locations = [\n\t\tTASKMASTER_DOCS_DIR, // .taskmaster/docs/ (NEW)\n\t\t'scripts/', // Legacy location\n\t\t'' // Project root\n\t];\n\n\tconst fileNames = ['PRD.md', 'prd.md', 'PRD.txt', 'prd.txt'];\n\n\tfor (const location of locations) {\n\t\tfor (const fileName of fileNames) {\n\t\t\tconst prdPath = path.join(projectRoot, location, fileName);\n\t\t\tif (fs.existsSync(prdPath)) {\n\t\t\t\tlogger.info?.(`Found PRD document at: ${prdPath}`);\n\n\t\t\t\t// Issue deprecation warning for legacy paths\n\t\t\t\tif (location === 'scripts/' || location === '') {\n\t\t\t\t\tlogger.warn?.(\n\t\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found PRD file in legacy location '${prdPath}'. Please migrate to .taskmaster/docs/ directory. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn prdPath;\n\t\t\t}\n\t\t}\n\t}\n\n\tlogger.warn?.(`No PRD document found in project: ${projectRoot}`);\n\treturn null;\n}\n\n/**\n * Find the complexity report file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved complexity report path or null if not found\n */\nexport function findComplexityReportPath(\n\texplicitPath = null,\n\targs = null,\n\tlog = null\n) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit complexity report path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit complexity report path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Check possible locations in order of preference\n\tconst locations = [\n\t\tTASKMASTER_REPORTS_DIR, // .taskmaster/reports/ (NEW)\n\t\t'scripts/', // Legacy location\n\t\t'' // Project root\n\t];\n\n\tconst fileNames = ['task-complexity', 'complexity-report'].map((fileName) => {\n\t\tif (args?.tag && args?.tag !== 'master') {\n\t\t\treturn `${fileName}_${args.tag}.json`;\n\t\t}\n\t\treturn `${fileName}.json`;\n\t});\n\n\tfor (const location of locations) {\n\t\tfor (const fileName of fileNames) {\n\t\t\tconst reportPath = path.join(projectRoot, location, fileName);\n\t\t\tif (fs.existsSync(reportPath)) {\n\t\t\t\tlogger.info?.(`Found complexity report at: ${reportPath}`);\n\n\t\t\t\t// Issue deprecation warning for legacy paths\n\t\t\t\tif (location === 'scripts/' || location === '') {\n\t\t\t\t\tlogger.warn?.(\n\t\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found complexity report in legacy location '${reportPath}'. Please migrate to .taskmaster/reports/ directory. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn reportPath;\n\t\t\t}\n\t\t}\n\t}\n\n\tlogger.warn?.(`No complexity report found in project: ${projectRoot}`);\n\treturn null;\n}\n\n/**\n * Resolve output path for tasks.json (create if needed)\n * @param {string|null} explicitPath - Explicit output path provided by user\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string} - Resolved output path for tasks.json\n */\nexport function resolveTasksOutputPath(\n\texplicitPath = null,\n\targs = null,\n\tlog = null\n) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tlogger.info?.(`Using explicit output path: ${resolvedPath}`);\n\t\treturn resolvedPath;\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot =\n\t\targs?.projectRoot || findProjectRoot() || process.cwd();\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Use new .taskmaster structure by default\n\tconst defaultPath = path.join(projectRoot, TASKMASTER_TASKS_FILE);\n\tlogger.info?.(`Using default output path: ${defaultPath}`);\n\n\t// Ensure the directory exists\n\tconst outputDir = path.dirname(defaultPath);\n\tif (!fs.existsSync(outputDir)) {\n\t\tlogger.info?.(`Creating tasks directory: ${outputDir}`);\n\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t}\n\n\treturn defaultPath;\n}\n\n/**\n * Resolve output path for complexity report (create if needed)\n * @param {string|null} explicitPath - Explicit output path provided by user\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string} - Resolved output path for complexity report\n */\nexport function resolveComplexityReportOutputPath(\n\texplicitPath = null,\n\targs = null,\n\tlog = null\n) {\n\tconst logger = getLoggerOrDefault(log);\n\tconst tag = args?.tag;\n\n\t// 1. If explicit path is provided, use it\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tlogger.info?.(\n\t\t\t`Using explicit complexity report output path: ${resolvedPath}`\n\t\t);\n\t\treturn resolvedPath;\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot =\n\t\targs?.projectRoot || findProjectRoot() || process.cwd();\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 3. Use tag-aware filename\n\tlet filename = 'task-complexity-report.json';\n\tif (tag && tag !== 'master') {\n\t\tfilename = `task-complexity-report_${tag}.json`;\n\t}\n\n\t// 4. Use new .taskmaster structure by default\n\tconst defaultPath = path.join(projectRoot, '.taskmaster/reports', filename);\n\tlogger.info?.(\n\t\t`Using tag-aware complexity report output path: ${defaultPath}`\n\t);\n\n\t// Ensure the directory exists\n\tconst outputDir = path.dirname(defaultPath);\n\tif (!fs.existsSync(outputDir)) {\n\t\tlogger.info?.(`Creating reports directory: ${outputDir}`);\n\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t}\n\n\treturn defaultPath;\n}\n\n/**\n * Find the configuration file path with fallback logic\n * @param {string|null} explicitPath - Explicit path provided by user (highest priority)\n * @param {Object|null} args - Args object for MCP context (optional)\n * @param {Object|null} log - Logger object (optional)\n * @returns {string|null} - Resolved config file path or null if not found\n */\nexport function findConfigPath(explicitPath = null, args = null, log = null) {\n\tconst logger = getLoggerOrDefault(log);\n\n\t// 1. If explicit path is provided, use it (highest priority)\n\tif (explicitPath) {\n\t\tconst resolvedPath = path.isAbsolute(explicitPath)\n\t\t\t? explicitPath\n\t\t\t: path.resolve(process.cwd(), explicitPath);\n\n\t\tif (fs.existsSync(resolvedPath)) {\n\t\t\tlogger.info?.(`Using explicit config path: ${resolvedPath}`);\n\t\t\treturn resolvedPath;\n\t\t} else {\n\t\t\tlogger.warn?.(\n\t\t\t\t`Explicit config path not found: ${resolvedPath}, trying fallbacks`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 2. Try to get project root from args (MCP) or find it\n\tconst rawProjectRoot = args?.projectRoot || findProjectRoot();\n\n\tif (!rawProjectRoot) {\n\t\tlogger.warn?.('Could not determine project root directory');\n\t\treturn null;\n\t}\n\n\t// 3. Normalize project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(rawProjectRoot);\n\n\t// 4. Check possible locations in order of preference\n\tconst possiblePaths = [\n\t\tpath.join(projectRoot, TASKMASTER_CONFIG_FILE), // NEW location\n\t\tpath.join(projectRoot, LEGACY_CONFIG_FILE) // LEGACY location\n\t];\n\n\tfor (const configPath of possiblePaths) {\n\t\tif (fs.existsSync(configPath)) {\n\t\t\t// Issue deprecation warning for legacy paths\n\t\t\tif (configPath?.endsWith(LEGACY_CONFIG_FILE)) {\n\t\t\t\tlogger.warn?.(\n\t\t\t\t\t`⚠️ DEPRECATION WARNING: Found configuration in legacy location '${configPath}'. Please migrate to .taskmaster/config.json. Run 'task-master migrate' to automatically migrate your project.`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\treturn configPath;\n\t\t}\n\t}\n\n\tlogger.warn?.(`No configuration file found in project: ${projectRoot}`);\n\treturn null;\n}\n"], ["/claude-task-master/mcp-server/src/tools/clear-subtasks.js", "/**\n * tools/clear-subtasks.js\n * Tool for clearing subtasks from parent tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { clearSubtasksDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the clearSubtasks tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerClearSubtasksTool(server) {\n\tserver.addTool({\n\t\tname: 'clear_subtasks',\n\t\tdescription: 'Clear subtasks from specified tasks',\n\t\tparameters: z\n\t\t\t.object({\n\t\t\t\tid: z\n\t\t\t\t\t.string()\n\t\t\t\t\t.optional()\n\t\t\t\t\t.describe('Task IDs (comma-separated) to clear subtasks from'),\n\t\t\t\tall: z.boolean().optional().describe('Clear subtasks from all tasks'),\n\t\t\t\tfile: z\n\t\t\t\t\t.string()\n\t\t\t\t\t.optional()\n\t\t\t\t\t.describe(\n\t\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t\t),\n\t\t\t\tprojectRoot: z\n\t\t\t\t\t.string()\n\t\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t\t})\n\t\t\t.refine((data) => data.id || data.all, {\n\t\t\t\tmessage: \"Either 'id' or 'all' parameter must be provided\",\n\t\t\t\tpath: ['id', 'all']\n\t\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Clearing subtasks with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await clearSubtasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tall: args.all,\n\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Subtasks cleared successfully: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to clear subtasks: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error clearing subtasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in clearSubtasks tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/remove-subtask.js", "/**\n * tools/remove-subtask.js\n * Tool for removing subtasks from parent tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { removeSubtaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the removeSubtask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRemoveSubtaskTool(server) {\n\tserver.addTool({\n\t\tname: 'remove_subtask',\n\t\tdescription: 'Remove a subtask from its parent task',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Subtask ID to remove in format 'parentId.subtaskId' (required)\"\n\t\t\t\t),\n\t\t\tconvert: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Convert the subtask to a standalone task instead of deleting it'\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tskipGenerate: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Skip regenerating task files'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Removing subtask with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await removeSubtaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tconvert: args.convert,\n\t\t\t\t\t\tskipGenerate: args.skipGenerate,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Subtask removed successfully: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to remove subtask: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error removing subtask',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in removeSubtask tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/utils/rule-transformer.js", "/**\n * Rule Transformer Module\n * Handles conversion of Cursor rules to profile rules\n *\n * This module procedurally generates .{profile}/rules files from assets/rules files,\n * eliminating the need to maintain both sets of files manually.\n */\nimport fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Import the shared MCP configuration helper\nimport {\n\tsetupMCPConfiguration,\n\tremoveTaskMasterMCPConfiguration\n} from './create-mcp-config.js';\n\n// Import profile constants (single source of truth)\nimport { RULE_PROFILES } from '../constants/profiles.js';\n\n// --- Profile Imports ---\nimport * as profilesModule from '../profiles/index.js';\n\nexport function isValidProfile(profile) {\n\treturn RULE_PROFILES.includes(profile);\n}\n\n/**\n * Get rule profile by name\n * @param {string} name - Profile name\n * @returns {Object|null} Profile object or null if not found\n */\nexport function getRulesProfile(name) {\n\tif (!isValidProfile(name)) {\n\t\treturn null;\n\t}\n\n\t// Get the profile from the imported profiles module\n\tconst profileKey = `${name}Profile`;\n\tconst profile = profilesModule[profileKey];\n\n\tif (!profile) {\n\t\tthrow new Error(\n\t\t\t`Profile not found: static import missing for '${name}'. Valid profiles: ${RULE_PROFILES.join(', ')}`\n\t\t);\n\t}\n\n\treturn profile;\n}\n\n/**\n * Replace basic Cursor terms with profile equivalents\n */\nfunction replaceBasicTerms(content, conversionConfig) {\n\tlet result = content;\n\n\t// Apply profile term replacements\n\tconversionConfig.profileTerms.forEach((pattern) => {\n\t\tif (typeof pattern.to === 'function') {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t} else {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t}\n\t});\n\n\t// Apply file extension replacements\n\tconversionConfig.fileExtensions.forEach((pattern) => {\n\t\tresult = result.replace(pattern.from, pattern.to);\n\t});\n\n\treturn result;\n}\n\n/**\n * Replace Cursor tool references with profile tool equivalents\n */\nfunction replaceToolReferences(content, conversionConfig) {\n\tlet result = content;\n\n\t// Basic pattern for direct tool name replacements\n\tconst toolNames = conversionConfig.toolNames;\n\tconst toolReferencePattern = new RegExp(\n\t\t`\\\\b(${Object.keys(toolNames).join('|')})\\\\b`,\n\t\t'g'\n\t);\n\n\t// Apply direct tool name replacements\n\tresult = result.replace(toolReferencePattern, (match, toolName) => {\n\t\treturn toolNames[toolName] || toolName;\n\t});\n\n\t// Apply contextual tool replacements\n\tconversionConfig.toolContexts.forEach((pattern) => {\n\t\tresult = result.replace(pattern.from, pattern.to);\n\t});\n\n\t// Apply tool group replacements\n\tconversionConfig.toolGroups.forEach((pattern) => {\n\t\tresult = result.replace(pattern.from, pattern.to);\n\t});\n\n\treturn result;\n}\n\n/**\n * Update documentation URLs to point to profile documentation\n */\nfunction updateDocReferences(content, conversionConfig) {\n\tlet result = content;\n\n\t// Apply documentation URL replacements\n\tconversionConfig.docUrls.forEach((pattern) => {\n\t\tif (typeof pattern.to === 'function') {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t} else {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t}\n\t});\n\n\treturn result;\n}\n\n/**\n * Update file references in markdown links\n */\nfunction updateFileReferences(content, conversionConfig) {\n\tconst { pathPattern, replacement } = conversionConfig.fileReferences;\n\treturn content.replace(pathPattern, replacement);\n}\n\n/**\n * Transform rule content to profile-specific rules\n * @param {string} content - The content to transform\n * @param {Object} conversionConfig - The conversion configuration\n * @param {Object} globalReplacements - Global text replacements\n * @returns {string} - The transformed content\n */\nfunction transformRuleContent(content, conversionConfig, globalReplacements) {\n\tlet result = content;\n\n\t// Apply all transformations in appropriate order\n\tresult = updateFileReferences(result, conversionConfig);\n\tresult = replaceBasicTerms(result, conversionConfig);\n\tresult = replaceToolReferences(result, conversionConfig);\n\tresult = updateDocReferences(result, conversionConfig);\n\n\t// Apply any global/catch-all replacements from the profile\n\t// Super aggressive failsafe pass to catch any variations we might have missed\n\t// This ensures critical transformations are applied even in contexts we didn't anticipate\n\tglobalReplacements.forEach((pattern) => {\n\t\tif (typeof pattern.to === 'function') {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t} else {\n\t\t\tresult = result.replace(pattern.from, pattern.to);\n\t\t}\n\t});\n\n\treturn result;\n}\n\n/**\n * Convert a Cursor rule file to a profile-specific rule file\n * @param {string} sourcePath - Path to the source .mdc file\n * @param {string} targetPath - Path to the target file\n * @param {Object} profile - The profile configuration\n * @returns {boolean} - Success status\n */\nexport function convertRuleToProfileRule(sourcePath, targetPath, profile) {\n\tconst { conversionConfig, globalReplacements } = profile;\n\ttry {\n\t\t// Read source content\n\t\tconst content = fs.readFileSync(sourcePath, 'utf8');\n\n\t\t// Transform content\n\t\tconst transformedContent = transformRuleContent(\n\t\t\tcontent,\n\t\t\tconversionConfig,\n\t\t\tglobalReplacements\n\t\t);\n\n\t\t// Ensure target directory exists\n\t\tconst targetDir = path.dirname(targetPath);\n\t\tif (!fs.existsSync(targetDir)) {\n\t\t\tfs.mkdirSync(targetDir, { recursive: true });\n\t\t}\n\n\t\t// Write transformed content\n\t\tfs.writeFileSync(targetPath, transformedContent);\n\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.error(`Error converting rule file: ${error.message}`);\n\t\treturn false;\n\t}\n}\n\n/**\n * Convert all Cursor rules to profile rules for a specific profile\n */\nexport function convertAllRulesToProfileRules(projectRoot, profile) {\n\tconst __filename = fileURLToPath(import.meta.url);\n\tconst __dirname = path.dirname(__filename);\n\tconst sourceDir = path.join(__dirname, '..', '..', 'assets', 'rules');\n\tconst targetDir = path.join(projectRoot, profile.rulesDir);\n\tconst assetsDir = path.join(__dirname, '..', '..', 'assets');\n\n\tlet success = 0;\n\tlet failed = 0;\n\n\t// 1. Call onAddRulesProfile first (for pre-processing like copying assets)\n\tif (typeof profile.onAddRulesProfile === 'function') {\n\t\ttry {\n\t\t\tprofile.onAddRulesProfile(projectRoot, assetsDir);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Rule Transformer] Called onAddRulesProfile for ${profile.profileName}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Rule Transformer] onAddRulesProfile failed for ${profile.profileName}: ${error.message}`\n\t\t\t);\n\t\t\tfailed++;\n\t\t}\n\t}\n\n\t// 2. Handle fileMap-based rule conversion (if any)\n\tconst sourceFiles = Object.keys(profile.fileMap);\n\tif (sourceFiles.length > 0) {\n\t\t// Only create rules directory if we have files to copy\n\t\tif (!fs.existsSync(targetDir)) {\n\t\t\tfs.mkdirSync(targetDir, { recursive: true });\n\t\t}\n\n\t\tfor (const sourceFile of sourceFiles) {\n\t\t\t// Determine if this is an asset file (not a rule file)\n\t\t\tconst isAssetFile = !sourceFile.startsWith('rules/');\n\n\t\t\ttry {\n\t\t\t\t// Use explicit path from fileMap - assets/ is the base directory\n\t\t\t\tconst sourcePath = path.join(assetsDir, sourceFile);\n\n\t\t\t\t// Check if source file exists\n\t\t\t\tif (!fs.existsSync(sourcePath)) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'warn',\n\t\t\t\t\t\t`[Rule Transformer] Source file not found: ${sourcePath}, skipping`\n\t\t\t\t\t);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst targetFilename = profile.fileMap[sourceFile];\n\t\t\t\tconst targetPath = path.join(targetDir, targetFilename);\n\n\t\t\t\t// Ensure target subdirectory exists (for rules like taskmaster/dev_workflow.md)\n\t\t\t\tconst targetFileDir = path.dirname(targetPath);\n\t\t\t\tif (!fs.existsSync(targetFileDir)) {\n\t\t\t\t\tfs.mkdirSync(targetFileDir, { recursive: true });\n\t\t\t\t}\n\n\t\t\t\t// Read source content\n\t\t\t\tlet content = fs.readFileSync(sourcePath, 'utf8');\n\n\t\t\t\t// Apply transformations (only if this is a rule file, not an asset file)\n\t\t\t\tif (!isAssetFile) {\n\t\t\t\t\tcontent = transformRuleContent(\n\t\t\t\t\t\tcontent,\n\t\t\t\t\t\tprofile.conversionConfig,\n\t\t\t\t\t\tprofile.globalReplacements\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Write to target\n\t\t\t\tfs.writeFileSync(targetPath, content, 'utf8');\n\t\t\t\tsuccess++;\n\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] ${isAssetFile ? 'Copied' : 'Converted'} ${sourceFile} -> ${targetFilename} for ${profile.profileName}`\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tfailed++;\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`[Rule Transformer] Failed to ${isAssetFile ? 'copy' : 'convert'} ${sourceFile} for ${profile.profileName}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t// 3. Setup MCP configuration (if enabled)\n\tif (profile.mcpConfig !== false) {\n\t\ttry {\n\t\t\tsetupMCPConfiguration(projectRoot, profile.mcpConfigPath);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Rule Transformer] Setup MCP configuration for ${profile.profileName}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Rule Transformer] MCP setup failed for ${profile.profileName}: ${error.message}`\n\t\t\t);\n\t\t}\n\t}\n\n\t// 4. Call post-conversion hook (for finalization)\n\tif (typeof profile.onPostConvertRulesProfile === 'function') {\n\t\ttry {\n\t\t\tprofile.onPostConvertRulesProfile(projectRoot, assetsDir);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Rule Transformer] Called onPostConvertRulesProfile for ${profile.profileName}`\n\t\t\t);\n\t\t} catch (error) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Rule Transformer] onPostConvertRulesProfile failed for ${profile.profileName}: ${error.message}`\n\t\t\t);\n\t\t}\n\t}\n\n\t// Ensure we return at least 1 success for profiles that only use lifecycle functions\n\treturn { success: Math.max(success, 1), failed };\n}\n\n/**\n * Remove only Task Master specific files from a profile, leaving other existing rules intact\n * @param {string} projectRoot - Target project directory\n * @param {Object} profile - Profile configuration\n * @returns {Object} Result object\n */\nexport function removeProfileRules(projectRoot, profile) {\n\tconst targetDir = path.join(projectRoot, profile.rulesDir);\n\tconst profileDir = path.join(projectRoot, profile.profileDir);\n\n\tconst result = {\n\t\tprofileName: profile.profileName,\n\t\tsuccess: false,\n\t\tskipped: false,\n\t\terror: null,\n\t\tfilesRemoved: [],\n\t\tmcpResult: null,\n\t\tprofileDirRemoved: false,\n\t\tnotice: null\n\t};\n\n\ttry {\n\t\t// 1. Call onRemoveRulesProfile first (for custom cleanup like removing assets)\n\t\tif (typeof profile.onRemoveRulesProfile === 'function') {\n\t\t\ttry {\n\t\t\t\tprofile.onRemoveRulesProfile(projectRoot);\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] Called onRemoveRulesProfile for ${profile.profileName}`\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`[Rule Transformer] onRemoveRulesProfile failed for ${profile.profileName}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// 2. Remove fileMap-based files (if any)\n\t\tconst sourceFiles = Object.keys(profile.fileMap);\n\t\tif (sourceFiles.length > 0) {\n\t\t\t// Check if profile directory exists at all (for full profiles)\n\t\t\tif (!fs.existsSync(profileDir)) {\n\t\t\t\tresult.success = true;\n\t\t\t\tresult.skipped = true;\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] Profile directory does not exist: ${profileDir}`\n\t\t\t\t);\n\t\t\t\treturn result;\n\t\t\t}\n\n\t\t\tlet hasOtherRulesFiles = false;\n\n\t\t\tif (fs.existsSync(targetDir)) {\n\t\t\t\t// Get list of files we're responsible for\n\t\t\t\tconst taskMasterFiles = sourceFiles.map(\n\t\t\t\t\t(sourceFile) => profile.fileMap[sourceFile]\n\t\t\t\t);\n\n\t\t\t\t// Get all files in the rules directory\n\t\t\t\tconst allFiles = fs.readdirSync(targetDir, { recursive: true });\n\t\t\t\tconst allFilePaths = allFiles\n\t\t\t\t\t.filter((file) => {\n\t\t\t\t\t\tconst fullPath = path.join(targetDir, file);\n\t\t\t\t\t\treturn fs.statSync(fullPath).isFile();\n\t\t\t\t\t})\n\t\t\t\t\t.map((file) => file.toString()); // Ensure it's a string\n\n\t\t\t\t// Remove only Task Master files\n\t\t\t\tfor (const taskMasterFile of taskMasterFiles) {\n\t\t\t\t\tconst filePath = path.join(targetDir, taskMasterFile);\n\t\t\t\t\tif (fs.existsSync(filePath)) {\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tfs.rmSync(filePath, { force: true });\n\t\t\t\t\t\t\tresult.filesRemoved.push(taskMasterFile);\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t\t\t`[Rule Transformer] Removed Task Master file: ${taskMasterFile}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\t\tlog(\n\t\t\t\t\t\t\t\t'error',\n\t\t\t\t\t\t\t\t`[Rule Transformer] Failed to remove ${taskMasterFile}: ${error.message}`\n\t\t\t\t\t\t\t);\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Check for other (non-Task Master) files\n\t\t\t\tconst remainingFiles = allFilePaths.filter(\n\t\t\t\t\t(file) => !taskMasterFiles.includes(file)\n\t\t\t\t);\n\n\t\t\t\thasOtherRulesFiles = remainingFiles.length > 0;\n\n\t\t\t\t// Remove empty directories or note preserved files\n\t\t\t\tif (remainingFiles.length === 0) {\n\t\t\t\t\tfs.rmSync(targetDir, { recursive: true, force: true });\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`[Rule Transformer] Removed empty rules directory: ${targetDir}`\n\t\t\t\t\t);\n\t\t\t\t} else if (hasOtherRulesFiles) {\n\t\t\t\t\tresult.notice = `Preserved ${remainingFiles.length} existing rule files in ${profile.rulesDir}`;\n\t\t\t\t\tlog('info', `[Rule Transformer] ${result.notice}`);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// 3. Handle MCP configuration - only remove Task Master, preserve other servers\n\t\tif (profile.mcpConfig !== false) {\n\t\t\ttry {\n\t\t\t\tresult.mcpResult = removeTaskMasterMCPConfiguration(\n\t\t\t\t\tprojectRoot,\n\t\t\t\t\tprofile.mcpConfigPath\n\t\t\t\t);\n\t\t\t\tif (result.mcpResult.hasOtherServers) {\n\t\t\t\t\tif (!result.notice) {\n\t\t\t\t\t\tresult.notice = 'Preserved other MCP server configurations';\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresult.notice += '; preserved other MCP server configurations';\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Rule Transformer] Processed MCP configuration for ${profile.profileName}`\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog(\n\t\t\t\t\t'error',\n\t\t\t\t\t`[Rule Transformer] MCP cleanup failed for ${profile.profileName}: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// 4. Check if we should remove the entire profile directory\n\t\tif (fs.existsSync(profileDir)) {\n\t\t\tconst remainingContents = fs.readdirSync(profileDir);\n\t\t\tif (remainingContents.length === 0 && profile.profileDir !== '.') {\n\t\t\t\t// Only remove profile directory if it's empty and not root directory\n\t\t\t\ttry {\n\t\t\t\t\tfs.rmSync(profileDir, { recursive: true, force: true });\n\t\t\t\t\tresult.profileDirRemoved = true;\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'debug',\n\t\t\t\t\t\t`[Rule Transformer] Removed empty profile directory: ${profileDir}`\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'error',\n\t\t\t\t\t\t`[Rule Transformer] Failed to remove profile directory ${profileDir}: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else if (remainingContents.length > 0) {\n\t\t\t\t// Profile directory has remaining files/folders, add notice\n\t\t\t\tconst preservedNotice = `Preserved ${remainingContents.length} existing files/folders in ${profile.profileDir}`;\n\t\t\t\tif (!result.notice) {\n\t\t\t\t\tresult.notice = preservedNotice;\n\t\t\t\t} else {\n\t\t\t\t\tresult.notice += `; ${preservedNotice.toLowerCase()}`;\n\t\t\t\t}\n\t\t\t\tlog('info', `[Rule Transformer] ${preservedNotice}`);\n\t\t\t}\n\t\t}\n\n\t\tresult.success = true;\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`[Rule Transformer] Successfully removed ${profile.profileName} Task Master files from ${projectRoot}`\n\t\t);\n\t} catch (error) {\n\t\tresult.error = error.message;\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Rule Transformer] Failed to remove ${profile.profileName} rules: ${error.message}`\n\t\t);\n\t}\n\n\treturn result;\n}\n"], ["/claude-task-master/mcp-server/src/tools/update-subtask.js", "/**\n * tools/update-subtask.js\n * Tool to append additional information to a specific subtask\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { updateSubtaskByIdDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the update-subtask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUpdateSubtaskTool(server) {\n\tserver.addTool({\n\t\tname: 'update_subtask',\n\t\tdescription:\n\t\t\t'Appends timestamped information to a specific subtask without replacing existing content. If you just want to update the subtask status, use set_task_status instead.',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'ID of the subtask to update in format \"parentId.subtaskId\" (e.g., \"5.2\"). Parent ID is the ID of the task that contains the subtask.'\n\t\t\t\t),\n\t\t\tprompt: z.string().describe('Information to add to the subtask'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Use Perplexity AI for research-backed updates'),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'update_subtask';\n\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Updating subtask with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateSubtaskByIdDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully updated subtask with ID ${args.id}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to update subtask: ${result.error?.message || 'Unknown error'}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error updating subtask',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/analyze.js", "/**\n * tools/analyze.js\n * Tool for analyzing task complexity and generating recommendations\n */\n\nimport { z } from 'zod';\nimport path from 'path';\nimport fs from 'fs'; // Import fs for directory check/creation\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { analyzeTaskComplexityDirect } from '../core/task-master-core.js'; // Assuming core functions are exported via task-master-core.js\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\nimport { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';\nimport { resolveComplexityReportOutputPath } from '../../../src/utils/path-utils.js';\n\n/**\n * Register the analyze_project_complexity tool\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAnalyzeProjectComplexityTool(server) {\n\tserver.addTool({\n\t\tname: 'analyze_project_complexity',\n\t\tdescription:\n\t\t\t'Analyze task complexity and generate expansion recommendations.',\n\t\tparameters: z.object({\n\t\t\tthreshold: z.coerce // Use coerce for number conversion from string if needed\n\t\t\t\t.number()\n\t\t\t\t.int()\n\t\t\t\t.min(1)\n\t\t\t\t.max(10)\n\t\t\t\t.optional()\n\t\t\t\t.default(5) // Default threshold\n\t\t\t\t.describe('Complexity score threshold (1-10) to recommend expansion.'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Use Perplexity AI for research-backed analysis.'),\n\t\t\toutput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`Output file path relative to project root (default: ${COMPLEXITY_REPORT_FILE}).`\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the tasks file relative to project root (default: tasks/tasks.json).'\n\t\t\t\t),\n\t\t\tids: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Comma-separated list of task IDs to analyze specifically (e.g., \"1,3,5\").'\n\t\t\t\t),\n\t\t\tfrom: z.coerce\n\t\t\t\t.number()\n\t\t\t\t.int()\n\t\t\t\t.positive()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Starting task ID in a range to analyze.'),\n\t\t\tto: z.coerce\n\t\t\t\t.number()\n\t\t\t\t.int()\n\t\t\t\t.positive()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Ending task ID in a range to analyze.'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'analyze_project_complexity'; // Define tool name for logging\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing ${toolName} tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json within project root '${args.projectRoot}': ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst outputPath = resolveComplexityReportOutputPath(\n\t\t\t\t\targs.output,\n\t\t\t\t\t{\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tlog.info(`${toolName}: Report output path: ${outputPath}`);\n\n\t\t\t\t// Ensure output directory exists\n\t\t\t\tconst outputDir = path.dirname(outputPath);\n\t\t\t\ttry {\n\t\t\t\t\tif (!fs.existsSync(outputDir)) {\n\t\t\t\t\t\tfs.mkdirSync(outputDir, { recursive: true });\n\t\t\t\t\t\tlog.info(`${toolName}: Created output directory: ${outputDir}`);\n\t\t\t\t\t}\n\t\t\t\t} catch (dirError) {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`${toolName}: Failed to create output directory ${outputDir}: ${dirError.message}`\n\t\t\t\t\t);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to create output directory: ${dirError.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// 3. Call Direct Function - Pass projectRoot in first arg object\n\t\t\t\tconst result = await analyzeTaskComplexityDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\toutputPath: outputPath,\n\t\t\t\t\t\tthreshold: args.threshold,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag,\n\t\t\t\t\t\tids: args.ids,\n\t\t\t\t\t\tfrom: args.from,\n\t\t\t\t\t\tto: args.to\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\t// 4. Handle Result\n\t\t\t\tlog.info(\n\t\t\t\t\t`${toolName}: Direct function result: success=${result.success}`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error analyzing task complexity',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/utils/git-utils.js", "/**\n * git-utils.js\n * Git integration utilities for Task Master\n * Uses raw git commands and gh CLI for operations\n * MCP-friendly: All functions require projectRoot parameter\n */\n\nimport { exec, execSync } from 'child_process';\nimport { promisify } from 'util';\nimport path from 'path';\nimport fs from 'fs';\n\nconst execAsync = promisify(exec);\n\n/**\n * Check if the specified directory is inside a git repository\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<boolean>} True if inside a git repository\n */\nasync function isGitRepository(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for isGitRepository');\n\t}\n\n\ttry {\n\t\tawait execAsync('git rev-parse --git-dir', { cwd: projectRoot });\n\t\treturn true;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Get the current git branch name\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string|null>} Current branch name or null if not in git repo\n */\nasync function getCurrentBranch(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getCurrentBranch');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync('git rev-parse --abbrev-ref HEAD', {\n\t\t\tcwd: projectRoot\n\t\t});\n\t\treturn stdout.trim();\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Get list of all local git branches\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string[]>} Array of branch names\n */\nasync function getLocalBranches(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getLocalBranches');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync(\n\t\t\t'git branch --format=\"%(refname:short)\"',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn stdout\n\t\t\t.trim()\n\t\t\t.split('\\n')\n\t\t\t.filter((branch) => branch.length > 0)\n\t\t\t.map((branch) => branch.trim());\n\t} catch (error) {\n\t\treturn [];\n\t}\n}\n\n/**\n * Get list of all remote branches\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string[]>} Array of remote branch names (without remote prefix)\n */\nasync function getRemoteBranches(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getRemoteBranches');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync(\n\t\t\t'git branch -r --format=\"%(refname:short)\"',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn stdout\n\t\t\t.trim()\n\t\t\t.split('\\n')\n\t\t\t.filter((branch) => branch.length > 0 && !branch.includes('HEAD'))\n\t\t\t.map((branch) => branch.replace(/^origin\\//, '').trim());\n\t} catch (error) {\n\t\treturn [];\n\t}\n}\n\n/**\n * Check if gh CLI is available and authenticated\n * @param {string} [projectRoot] - Directory context (optional for this check)\n * @returns {Promise<boolean>} True if gh CLI is available and authenticated\n */\nasync function isGhCliAvailable(projectRoot = null) {\n\ttry {\n\t\tconst options = projectRoot ? { cwd: projectRoot } : {};\n\t\tawait execAsync('gh auth status', options);\n\t\treturn true;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Get GitHub repository information using gh CLI\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<Object|null>} Repository info or null if not available\n */\nasync function getGitHubRepoInfo(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getGitHubRepoInfo');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync(\n\t\t\t'gh repo view --json name,owner,defaultBranchRef',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn JSON.parse(stdout);\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Sanitize branch name to be a valid tag name\n * @param {string} branchName - Git branch name\n * @returns {string} Sanitized tag name\n */\nfunction sanitizeBranchNameForTag(branchName) {\n\tif (!branchName || typeof branchName !== 'string') {\n\t\treturn 'unknown-branch';\n\t}\n\n\t// Replace invalid characters with hyphens and clean up\n\treturn branchName\n\t\t.replace(/[^a-zA-Z0-9_-]/g, '-') // Replace invalid chars with hyphens\n\t\t.replace(/^-+|-+$/g, '') // Remove leading/trailing hyphens\n\t\t.replace(/-+/g, '-') // Collapse multiple hyphens\n\t\t.toLowerCase() // Convert to lowercase\n\t\t.substring(0, 50); // Limit length\n}\n\n/**\n * Check if a branch name would create a valid tag name\n * @param {string} branchName - Git branch name\n * @returns {boolean} True if branch name can be converted to valid tag\n */\nfunction isValidBranchForTag(branchName) {\n\tif (!branchName || typeof branchName !== 'string') {\n\t\treturn false;\n\t}\n\n\t// Check if it's a reserved branch name that shouldn't become tags\n\tconst reservedBranches = ['main', 'master', 'develop', 'dev', 'HEAD'];\n\tif (reservedBranches.includes(branchName.toLowerCase())) {\n\t\treturn false;\n\t}\n\n\t// Check if sanitized name would be meaningful\n\tconst sanitized = sanitizeBranchNameForTag(branchName);\n\treturn sanitized.length > 0 && sanitized !== 'unknown-branch';\n}\n\n/**\n * Get git repository root directory\n * @param {string} projectRoot - Directory to start search from (required)\n * @returns {Promise<string|null>} Git repository root path or null\n */\nasync function getGitRepositoryRoot(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getGitRepositoryRoot');\n\t}\n\n\ttry {\n\t\tconst { stdout } = await execAsync('git rev-parse --show-toplevel', {\n\t\t\tcwd: projectRoot\n\t\t});\n\t\treturn stdout.trim();\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Check if specified directory is the git repository root\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<boolean>} True if directory is git root\n */\nasync function isGitRepositoryRoot(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for isGitRepositoryRoot');\n\t}\n\n\ttry {\n\t\tconst gitRoot = await getGitRepositoryRoot(projectRoot);\n\t\treturn gitRoot && path.resolve(gitRoot) === path.resolve(projectRoot);\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Get the default branch name for the repository\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<string|null>} Default branch name or null\n */\nasync function getDefaultBranch(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for getDefaultBranch');\n\t}\n\n\ttry {\n\t\t// Try to get from GitHub first (if gh CLI is available)\n\t\tif (await isGhCliAvailable(projectRoot)) {\n\t\t\tconst repoInfo = await getGitHubRepoInfo(projectRoot);\n\t\t\tif (repoInfo && repoInfo.defaultBranchRef) {\n\t\t\t\treturn repoInfo.defaultBranchRef.name;\n\t\t\t}\n\t\t}\n\n\t\t// Fallback to git remote info\n\t\tconst { stdout } = await execAsync(\n\t\t\t'git symbolic-ref refs/remotes/origin/HEAD',\n\t\t\t{ cwd: projectRoot }\n\t\t);\n\t\treturn stdout.replace('refs/remotes/origin/', '').trim();\n\t} catch (error) {\n\t\t// Final fallback - common default branch names\n\t\tconst commonDefaults = ['main', 'master'];\n\t\tconst branches = await getLocalBranches(projectRoot);\n\n\t\tfor (const defaultName of commonDefaults) {\n\t\t\tif (branches.includes(defaultName)) {\n\t\t\t\treturn defaultName;\n\t\t\t}\n\t\t}\n\n\t\treturn null;\n\t}\n}\n\n/**\n * Check if we're currently on the default branch\n * @param {string} projectRoot - Directory to check (required)\n * @returns {Promise<boolean>} True if on default branch\n */\nasync function isOnDefaultBranch(projectRoot) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for isOnDefaultBranch');\n\t}\n\n\ttry {\n\t\tconst currentBranch = await getCurrentBranch(projectRoot);\n\t\tconst defaultBranch = await getDefaultBranch(projectRoot);\n\t\treturn currentBranch && defaultBranch && currentBranch === defaultBranch;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Check and automatically switch tags based on git branch if enabled\n * This runs automatically during task operations, similar to migration\n * @param {string} projectRoot - Project root directory (required)\n * @param {string} tasksPath - Path to tasks.json file\n * @returns {Promise<void>}\n */\nasync function checkAndAutoSwitchGitTag(projectRoot, tasksPath) {\n\tif (!projectRoot) {\n\t\tthrow new Error('projectRoot is required for checkAndAutoSwitchGitTag');\n\t}\n\n\t// DISABLED: Automatic git workflow is too rigid and opinionated\n\t// Users should explicitly use git-tag commands if they want integration\n\treturn;\n}\n\n/**\n * Synchronous version of git tag checking and switching\n * This runs during readJSON to ensure git integration happens BEFORE tag resolution\n * @param {string} projectRoot - Project root directory (required)\n * @param {string} tasksPath - Path to tasks.json file\n * @returns {void}\n */\nfunction checkAndAutoSwitchGitTagSync(projectRoot, tasksPath) {\n\tif (!projectRoot) {\n\t\treturn; // Can't proceed without project root\n\t}\n\n\t// DISABLED: Automatic git workflow is too rigid and opinionated\n\t// Users should explicitly use git-tag commands if they want integration\n\treturn;\n}\n\n/**\n * Synchronous check if directory is in a git repository\n * @param {string} projectRoot - Directory to check (required)\n * @returns {boolean} True if inside a git repository\n */\nfunction isGitRepositorySync(projectRoot) {\n\tif (!projectRoot) {\n\t\treturn false;\n\t}\n\n\ttry {\n\t\texecSync('git rev-parse --git-dir', {\n\t\t\tcwd: projectRoot,\n\t\t\tstdio: 'ignore' // Suppress output\n\t\t});\n\t\treturn true;\n\t} catch (error) {\n\t\treturn false;\n\t}\n}\n\n/**\n * Synchronous get current git branch name\n * @param {string} projectRoot - Directory to check (required)\n * @returns {string|null} Current branch name or null if not in git repo\n */\nfunction getCurrentBranchSync(projectRoot) {\n\tif (!projectRoot) {\n\t\treturn null;\n\t}\n\n\ttry {\n\t\tconst stdout = execSync('git rev-parse --abbrev-ref HEAD', {\n\t\t\tcwd: projectRoot,\n\t\t\tencoding: 'utf8'\n\t\t});\n\t\treturn stdout.trim();\n\t} catch (error) {\n\t\treturn null;\n\t}\n}\n\n/**\n * Check if the current working directory is inside a Git work-tree.\n * Uses `git rev-parse --is-inside-work-tree` which is more specific than --git-dir\n * for detecting work-trees (excludes bare repos and .git directories).\n * This is ideal for preventing accidental git init in existing work-trees.\n * @returns {boolean} True if inside a Git work-tree, false otherwise.\n */\nfunction insideGitWorkTree() {\n\ttry {\n\t\texecSync('git rev-parse --is-inside-work-tree', {\n\t\t\tstdio: 'ignore',\n\t\t\tcwd: process.cwd()\n\t\t});\n\t\treturn true;\n\t} catch {\n\t\treturn false;\n\t}\n}\n\n// Export all functions\nexport {\n\tisGitRepository,\n\tgetCurrentBranch,\n\tgetLocalBranches,\n\tgetRemoteBranches,\n\tisGhCliAvailable,\n\tgetGitHubRepoInfo,\n\tsanitizeBranchNameForTag,\n\tisValidBranchForTag,\n\tgetGitRepositoryRoot,\n\tisGitRepositoryRoot,\n\tgetDefaultBranch,\n\tisOnDefaultBranch,\n\tcheckAndAutoSwitchGitTag,\n\tcheckAndAutoSwitchGitTagSync,\n\tisGitRepositorySync,\n\tgetCurrentBranchSync,\n\tinsideGitWorkTree\n};\n"], ["/claude-task-master/mcp-server/src/tools/update-task.js", "/**\n * tools/update-task.js\n * Tool to update a single task by ID with new information\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { updateTaskByIdDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the update-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUpdateTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'update_task',\n\t\tdescription:\n\t\t\t'Updates a single task by ID with new information or context provided in the prompt.',\n\t\tparameters: z.object({\n\t\t\tid: z\n\t\t\t\t.string() // ID can be number or string like \"1.2\"\n\t\t\t\t.describe(\n\t\t\t\t\t\"ID of the task (e.g., '15') to update. Subtasks are supported using the update-subtask tool.\"\n\t\t\t\t),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.describe('New information or context to incorporate into the task'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Use Perplexity AI for research-backed updates'),\n\t\t\tappend: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Append timestamped information to task details instead of full update'\n\t\t\t\t),\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'update_task';\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing ${toolName} tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// 3. Call Direct Function - Include projectRoot\n\t\t\t\tconst result = await updateTaskByIdDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tappend: args.append,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\t// 4. Handle Result\n\t\t\t\tlog.info(\n\t\t\t\t\t`${toolName}: Direct function result: success=${result.success}`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error updating task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/expand-task.js", "/**\n * tools/expand-task.js\n * Tool to expand a task into subtasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { expandTaskDirect } from '../core/task-master-core.js';\nimport {\n\tfindTasksPath,\n\tfindComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the expand-task tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerExpandTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'expand_task',\n\t\tdescription: 'Expand a task into subtasks for detailed implementation',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('ID of task to expand'),\n\t\t\tnum: z.string().optional().describe('Number of subtasks to generate'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Use research role for generation'),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Additional context for subtask generation'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the tasks file relative to project root (e.g., tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Force expansion even if subtasks exist'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting expand-task with args: ${JSON.stringify(args)}`);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst complexityReportPath = findComplexityReportPath(\n\t\t\t\t\t{ ...args, tag: resolvedTag },\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tconst result = await expandTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tnum: args.num,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tforce: args.force,\n\t\t\t\t\t\tcomplexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error expanding task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in expand-task tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/update.js", "/**\n * tools/update.js\n * Tool to update tasks based on new context/prompt\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { updateTasksDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the update tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUpdateTool(server) {\n\tserver.addTool({\n\t\tname: 'update',\n\t\tdescription:\n\t\t\t\"Update multiple upcoming tasks (with ID >= 'from' ID) based on new context or changes provided in the prompt. Use 'update_task' instead for a single specific task or 'update_subtask' for subtasks.\",\n\t\tparameters: z.object({\n\t\t\tfrom: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t\"Task ID from which to start updating (inclusive). IMPORTANT: This tool uses 'from', not 'id'\"\n\t\t\t\t),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.describe('Explanation of changes or new context to apply'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Use Perplexity AI for research-backed updates'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file relative to project root'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'The directory of the project. (Optional, usually from session)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\tconst toolName = 'update';\n\t\t\tconst { from, prompt, research, file, projectRoot, tag } = args;\n\n\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\ttag: args.tag\n\t\t\t});\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing ${toolName} tool with normalized root: ${projectRoot}`\n\t\t\t\t);\n\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath({ projectRoot, file }, log);\n\t\t\t\t\tlog.info(`${toolName}: Resolved tasks path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`${toolName}: Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json within project root '${projectRoot}': ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await updateTasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tfrom: from,\n\t\t\t\t\t\tprompt: prompt,\n\t\t\t\t\t\tresearch: research,\n\t\t\t\t\t\tprojectRoot: projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tlog.info(\n\t\t\t\t\t`${toolName}: Direct function result: success=${result.success}`\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error updating tasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Critical error in ${toolName} tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Internal tool error (${toolName}): ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/expand-all.js", "/**\n * tools/expand-all.js\n * Tool for expanding all pending tasks with subtasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { expandAllTasksDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the expandAll tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerExpandAllTool(server) {\n\tserver.addTool({\n\t\tname: 'expand_all',\n\t\tdescription:\n\t\t\t'Expand all pending tasks into subtasks based on complexity or defaults',\n\t\tparameters: z.object({\n\t\t\tnum: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Target number of subtasks per task (uses complexity/defaults otherwise)'\n\t\t\t\t),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Enable research-backed subtask generation (e.g., using Perplexity)'\n\t\t\t\t),\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Additional context to guide subtask generation for all tasks'\n\t\t\t\t),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Force regeneration of subtasks for tasks that already have them'\n\t\t\t\t),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file in the /tasks folder inside the project root (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the project root directory (derived from session if possible)'\n\t\t\t\t),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Tool expand_all execution started with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t\tlog.info(`Resolved tasks.json path: ${tasksJsonPath}`);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await expandAllTasksDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tnum: args.num,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\tforce: args.force,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error expanding all tasks',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(\n\t\t\t\t\t`Unexpected error in expand_all tool execute: ${error.message}`\n\t\t\t\t);\n\t\t\t\tif (error.stack) {\n\t\t\t\t\tlog.error(error.stack);\n\t\t\t\t}\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`An unexpected error occurred: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/logger.js", "import chalk from 'chalk';\nimport { isSilentMode } from '../../scripts/modules/utils.js';\nimport { getLogLevel } from '../../scripts/modules/config-manager.js';\n\n// Define log levels\nconst LOG_LEVELS = {\n\tdebug: 0,\n\tinfo: 1,\n\twarn: 2,\n\terror: 3,\n\tsuccess: 4\n};\n\n// Get log level from config manager or default to info\nconst LOG_LEVEL = LOG_LEVELS[getLogLevel().toLowerCase()] ?? LOG_LEVELS.info;\n\n/**\n * Logs a message with the specified level\n * @param {string} level - The log level (debug, info, warn, error, success)\n * @param {...any} args - Arguments to log\n */\nfunction log(level, ...args) {\n\t// Skip logging if silent mode is enabled\n\tif (isSilentMode()) {\n\t\treturn;\n\t}\n\n\t// Use text prefixes instead of emojis\n\tconst prefixes = {\n\t\tdebug: chalk.gray('[DEBUG]'),\n\t\tinfo: chalk.blue('[INFO]'),\n\t\twarn: chalk.yellow('[WARN]'),\n\t\terror: chalk.red('[ERROR]'),\n\t\tsuccess: chalk.green('[SUCCESS]')\n\t};\n\n\tif (LOG_LEVELS[level] !== undefined && LOG_LEVELS[level] >= LOG_LEVEL) {\n\t\tconst prefix = prefixes[level] || '';\n\t\tlet coloredArgs = args;\n\n\t\ttry {\n\t\t\tswitch (level) {\n\t\t\t\tcase 'error':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.red(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'warn':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.yellow(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'success':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.green(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'info':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.blue(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'debug':\n\t\t\t\t\tcoloredArgs = args.map((arg) =>\n\t\t\t\t\t\ttypeof arg === 'string' ? chalk.gray(arg) : arg\n\t\t\t\t\t);\n\t\t\t\t\tbreak;\n\t\t\t\t// default: use original args (no color)\n\t\t\t}\n\t\t} catch (colorError) {\n\t\t\t// Fallback if chalk fails on an argument\n\t\t\t// Use console.error here for internal logger errors, separate from normal logging\n\t\t\tconsole.error('Internal Logger Error applying chalk color:', colorError);\n\t\t\tcoloredArgs = args;\n\t\t}\n\n\t\t// Revert to console.log - FastMCP's context logger (context.log)\n\t\t// is responsible for directing logs correctly (e.g., to stderr)\n\t\t// during tool execution without upsetting the client connection.\n\t\t// Logs outside of tool execution (like startup) will go to stdout.\n\t\tconsole.log(prefix, ...coloredArgs);\n\t}\n}\n\n/**\n * Create a logger object with methods for different log levels\n * @returns {Object} Logger object with info, error, debug, warn, and success methods\n */\nexport function createLogger() {\n\tconst createLogMethod =\n\t\t(level) =>\n\t\t(...args) =>\n\t\t\tlog(level, ...args);\n\n\treturn {\n\t\tdebug: createLogMethod('debug'),\n\t\tinfo: createLogMethod('info'),\n\t\twarn: createLogMethod('warn'),\n\t\terror: createLogMethod('error'),\n\t\tsuccess: createLogMethod('success'),\n\t\tlog: log // Also expose the raw log function\n\t};\n}\n\n// Export a default logger instance\nconst logger = createLogger();\n\nexport default logger;\nexport { log, LOG_LEVELS };\n"], ["/claude-task-master/bin/task-master.js", "#!/usr/bin/env node\n\n/**\n * Task Master\n * Copyright (c) 2025 Eyal Toledano, Ralph Khreish\n *\n * This software is licensed under the MIT License with Commons Clause.\n * You may use this software for any purpose, including commercial applications,\n * and modify and redistribute it freely, subject to the following restrictions:\n *\n * 1. You may not sell this software or offer it as a service.\n * 2. The origin of this software must not be misrepresented.\n * 3. Altered source versions must be plainly marked as such.\n *\n * For the full license text, see the LICENSE file in the root directory.\n */\n\n/**\n * Claude Task Master CLI\n * Main entry point for globally installed package\n */\n\nimport { fileURLToPath } from 'url';\nimport { dirname, resolve } from 'path';\nimport { createRequire } from 'module';\nimport { spawn } from 'child_process';\nimport { Command } from 'commander';\nimport { displayHelp, displayBanner } from '../scripts/modules/ui.js';\nimport { registerCommands } from '../scripts/modules/commands.js';\nimport { detectCamelCaseFlags } from '../scripts/modules/utils.js';\nimport chalk from 'chalk';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\nconst require = createRequire(import.meta.url);\n\n// Get package information\nconst packageJson = require('../package.json');\nconst version = packageJson.version;\n\n// Get paths to script files\nconst devScriptPath = resolve(__dirname, '../scripts/dev.js');\nconst initScriptPath = resolve(__dirname, '../scripts/init.js');\n\n// Helper function to run dev.js with arguments\nfunction runDevScript(args) {\n\t// Debug: Show the transformed arguments when DEBUG=1 is set\n\tif (process.env.DEBUG === '1') {\n\t\tconsole.error('\\nDEBUG - CLI Wrapper Analysis:');\n\t\tconsole.error('- Original command: ' + process.argv.join(' '));\n\t\tconsole.error('- Transformed args: ' + args.join(' '));\n\t\tconsole.error(\n\t\t\t'- dev.js will receive: node ' +\n\t\t\t\tdevScriptPath +\n\t\t\t\t' ' +\n\t\t\t\targs.join(' ') +\n\t\t\t\t'\\n'\n\t\t);\n\t}\n\n\t// For testing: If TEST_MODE is set, just print args and exit\n\tif (process.env.TEST_MODE === '1') {\n\t\tconsole.log('Would execute:');\n\t\tconsole.log(`node ${devScriptPath} ${args.join(' ')}`);\n\t\tprocess.exit(0);\n\t\treturn;\n\t}\n\n\tconst child = spawn('node', [devScriptPath, ...args], {\n\t\tstdio: 'inherit',\n\t\tcwd: process.cwd()\n\t});\n\n\tchild.on('close', (code) => {\n\t\tprocess.exit(code);\n\t});\n}\n\n// Helper function to detect camelCase and convert to kebab-case\nconst toKebabCase = (str) => str.replace(/([A-Z])/g, '-$1').toLowerCase();\n\n/**\n * Create a wrapper action that passes the command to dev.js\n * @param {string} commandName - The name of the command\n * @returns {Function} Wrapper action function\n */\nfunction createDevScriptAction(commandName) {\n\treturn (options, cmd) => {\n\t\t// Check for camelCase flags and error out with helpful message\n\t\tconst camelCaseFlags = detectCamelCaseFlags(process.argv);\n\n\t\t// If camelCase flags were found, show error and exit\n\t\tif (camelCaseFlags.length > 0) {\n\t\t\tconsole.error('\\nError: Please use kebab-case for CLI flags:');\n\t\t\tcamelCaseFlags.forEach((flag) => {\n\t\t\t\tconsole.error(` Instead of: --${flag.original}`);\n\t\t\t\tconsole.error(` Use: --${flag.kebabCase}`);\n\t\t\t});\n\t\t\tconsole.error(\n\t\t\t\t'\\nExample: task-master parse-prd --num-tasks=5 instead of --numTasks=5\\n'\n\t\t\t);\n\t\t\tprocess.exit(1);\n\t\t}\n\n\t\t// Since we've ensured no camelCase flags, we can now just:\n\t\t// 1. Start with the command name\n\t\tconst args = [commandName];\n\n\t\t// 3. Get positional arguments and explicit flags from the command line\n\t\tconst commandArgs = [];\n\t\tconst positionals = new Set(); // Track positional args we've seen\n\n\t\t// Find the command in raw process.argv to extract args\n\t\tconst commandIndex = process.argv.indexOf(commandName);\n\t\tif (commandIndex !== -1) {\n\t\t\t// Process all args after the command name\n\t\t\tfor (let i = commandIndex + 1; i < process.argv.length; i++) {\n\t\t\t\tconst arg = process.argv[i];\n\n\t\t\t\tif (arg.startsWith('--')) {\n\t\t\t\t\t// It's a flag - pass through as is\n\t\t\t\t\tcommandArgs.push(arg);\n\t\t\t\t\t// Skip the next arg if this is a flag with a value (not --flag=value format)\n\t\t\t\t\tif (\n\t\t\t\t\t\t!arg.includes('=') &&\n\t\t\t\t\t\ti + 1 < process.argv.length &&\n\t\t\t\t\t\t!process.argv[i + 1].startsWith('--')\n\t\t\t\t\t) {\n\t\t\t\t\t\tcommandArgs.push(process.argv[++i]);\n\t\t\t\t\t}\n\t\t\t\t} else if (!positionals.has(arg)) {\n\t\t\t\t\t// It's a positional argument we haven't seen\n\t\t\t\t\tcommandArgs.push(arg);\n\t\t\t\t\tpositionals.add(arg);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Add all command line args we collected\n\t\targs.push(...commandArgs);\n\n\t\t// 4. Add default options from Commander if not specified on command line\n\t\t// Track which options we've seen on the command line\n\t\tconst userOptions = new Set();\n\t\tfor (const arg of commandArgs) {\n\t\t\tif (arg.startsWith('--')) {\n\t\t\t\t// Extract option name (without -- and value)\n\t\t\t\tconst name = arg.split('=')[0].slice(2);\n\t\t\t\tuserOptions.add(name);\n\n\t\t\t\t// Add the kebab-case version too, to prevent duplicates\n\t\t\t\tconst kebabName = name.replace(/([A-Z])/g, '-$1').toLowerCase();\n\t\t\t\tuserOptions.add(kebabName);\n\n\t\t\t\t// Add the camelCase version as well\n\t\t\t\tconst camelName = kebabName.replace(/-([a-z])/g, (_, letter) =>\n\t\t\t\t\tletter.toUpperCase()\n\t\t\t\t);\n\t\t\t\tuserOptions.add(camelName);\n\t\t\t}\n\t\t}\n\n\t\t// Add Commander-provided defaults for options not specified by user\n\t\tObject.entries(options).forEach(([key, value]) => {\n\t\t\t// Debug output to see what keys we're getting\n\t\t\tif (process.env.DEBUG === '1') {\n\t\t\t\tconsole.error(`DEBUG - Processing option: ${key} = ${value}`);\n\t\t\t}\n\n\t\t\t// Special case for numTasks > num-tasks (a known problem case)\n\t\t\tif (key === 'numTasks') {\n\t\t\t\tif (process.env.DEBUG === '1') {\n\t\t\t\t\tconsole.error('DEBUG - Converting numTasks to num-tasks');\n\t\t\t\t}\n\t\t\t\tif (!userOptions.has('num-tasks') && !userOptions.has('numTasks')) {\n\t\t\t\t\targs.push(`--num-tasks=${value}`);\n\t\t\t\t}\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Skip built-in Commander properties and options the user provided\n\t\t\tif (\n\t\t\t\t['parent', 'commands', 'options', 'rawArgs'].includes(key) ||\n\t\t\t\tuserOptions.has(key)\n\t\t\t) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Also check the kebab-case version of this key\n\t\t\tconst kebabKey = key.replace(/([A-Z])/g, '-$1').toLowerCase();\n\t\t\tif (userOptions.has(kebabKey)) {\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Add default values, using kebab-case for the parameter name\n\t\t\tif (value !== undefined) {\n\t\t\t\tif (typeof value === 'boolean') {\n\t\t\t\t\tif (value === true) {\n\t\t\t\t\t\targs.push(`--${kebabKey}`);\n\t\t\t\t\t} else if (value === false && key === 'generate') {\n\t\t\t\t\t\targs.push('--skip-generate');\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// Always use kebab-case for option names\n\t\t\t\t\targs.push(`--${kebabKey}=${value}`);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\n\t\t// Special handling for parent parameter (uses -p)\n\t\tif (options.parent && !args.includes('-p') && !userOptions.has('parent')) {\n\t\t\targs.push('-p', options.parent);\n\t\t}\n\n\t\t// Debug output for troubleshooting\n\t\tif (process.env.DEBUG === '1') {\n\t\t\tconsole.error('DEBUG - Command args:', commandArgs);\n\t\t\tconsole.error('DEBUG - User options:', Array.from(userOptions));\n\t\t\tconsole.error('DEBUG - Commander options:', options);\n\t\t\tconsole.error('DEBUG - Final args:', args);\n\t\t}\n\n\t\t// Run the script with our processed args\n\t\trunDevScript(args);\n\t};\n}\n\n// // Special case for the 'init' command which uses a different script\n// function registerInitCommand(program) {\n// \tprogram\n// \t\t.command('init')\n// \t\t.description('Initialize a new project')\n// \t\t.option('-y, --yes', 'Skip prompts and use default values')\n// \t\t.option('-n, --name <name>', 'Project name')\n// \t\t.option('-d, --description <description>', 'Project description')\n// \t\t.option('-v, --version <version>', 'Project version')\n// \t\t.option('-a, --author <author>', 'Author name')\n// \t\t.option('--skip-install', 'Skip installing dependencies')\n// \t\t.option('--dry-run', 'Show what would be done without making changes')\n// \t\t.action((options) => {\n// \t\t\t// Pass through any options to the init script\n// \t\t\tconst args = [\n// \t\t\t\t'--yes',\n// \t\t\t\t'name',\n// \t\t\t\t'description',\n// \t\t\t\t'version',\n// \t\t\t\t'author',\n// \t\t\t\t'skip-install',\n// \t\t\t\t'dry-run'\n// \t\t\t]\n// \t\t\t\t.filter((opt) => options[opt])\n// \t\t\t\t.map((opt) => {\n// \t\t\t\t\tif (opt === 'yes' || opt === 'skip-install' || opt === 'dry-run') {\n// \t\t\t\t\t\treturn `--${opt}`;\n// \t\t\t\t\t}\n// \t\t\t\t\treturn `--${opt}=${options[opt]}`;\n// \t\t\t\t});\n\n// \t\t\tconst child = spawn('node', [initScriptPath, ...args], {\n// \t\t\t\tstdio: 'inherit',\n// \t\t\t\tcwd: process.cwd()\n// \t\t\t});\n\n// \t\t\tchild.on('close', (code) => {\n// \t\t\t\tprocess.exit(code);\n// \t\t\t});\n// \t\t});\n// }\n\n// Set up the command-line interface\nconst program = new Command();\n\nprogram\n\t.name('task-master')\n\t.description('Claude Task Master CLI')\n\t.version(version)\n\t.addHelpText('afterAll', () => {\n\t\t// Use the same help display function as dev.js for consistency\n\t\tdisplayHelp();\n\t\treturn ''; // Return empty string to prevent commander's default help\n\t});\n\n// Add custom help option to directly call our help display\nprogram.helpOption('-h, --help', 'Display help information');\nprogram.on('--help', () => {\n\tdisplayHelp();\n});\n\n// // Add special case commands\n// registerInitCommand(program);\n\nprogram\n\t.command('dev')\n\t.description('Run the dev.js script')\n\t.action(() => {\n\t\tconst args = process.argv.slice(process.argv.indexOf('dev') + 1);\n\t\trunDevScript(args);\n\t});\n\n// Use a temporary Command instance to get all command definitions\nconst tempProgram = new Command();\nregisterCommands(tempProgram);\n\n// For each command in the temp instance, add a modified version to our actual program\ntempProgram.commands.forEach((cmd) => {\n\tif (['dev'].includes(cmd.name())) {\n\t\t// Skip commands we've already defined specially\n\t\treturn;\n\t}\n\n\t// Create a new command with the same name and description\n\tconst newCmd = program.command(cmd.name()).description(cmd.description());\n\n\t// Copy all options\n\tcmd.options.forEach((opt) => {\n\t\tnewCmd.option(opt.flags, opt.description, opt.defaultValue);\n\t});\n\n\t// Set the action to proxy to dev.js\n\tnewCmd.action(createDevScriptAction(cmd.name()));\n});\n\n// Parse the command line arguments\nprogram.parse(process.argv);\n\n// Add global error handling for unknown commands and options\nprocess.on('uncaughtException', (err) => {\n\t// Check if this is a commander.js unknown option error\n\tif (err.code === 'commander.unknownOption') {\n\t\tconst option = err.message.match(/'([^']+)'/)?.[1];\n\t\tconst commandArg = process.argv.find(\n\t\t\t(arg) =>\n\t\t\t\t!arg.startsWith('-') &&\n\t\t\t\targ !== 'task-master' &&\n\t\t\t\t!arg.includes('/') &&\n\t\t\t\targ !== 'node'\n\t\t);\n\t\tconst command = commandArg || 'unknown';\n\n\t\tconsole.error(chalk.red(`Error: Unknown option '${option}'`));\n\t\tconsole.error(\n\t\t\tchalk.yellow(\n\t\t\t\t`Run 'task-master ${command} --help' to see available options for this command`\n\t\t\t)\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\t// Check if this is a commander.js unknown command error\n\tif (err.code === 'commander.unknownCommand') {\n\t\tconst command = err.message.match(/'([^']+)'/)?.[1];\n\n\t\tconsole.error(chalk.red(`Error: Unknown command '${command}'`));\n\t\tconsole.error(\n\t\t\tchalk.yellow(`Run 'task-master --help' to see available commands`)\n\t\t);\n\t\tprocess.exit(1);\n\t}\n\n\t// Handle other uncaught exceptions\n\tconsole.error(chalk.red(`Error: ${err.message}`));\n\tif (process.env.DEBUG === '1') {\n\t\tconsole.error(err);\n\t}\n\tprocess.exit(1);\n});\n\n// Show help if no command was provided (just 'task-master' with no args)\nif (process.argv.length <= 2) {\n\tdisplayBanner();\n\tdisplayHelp();\n\tprocess.exit(0);\n}\n\n// Add exports at the end of the file\nexport { detectCamelCaseFlags };\n"], ["/claude-task-master/src/utils/profiles.js", "/**\n * Profiles Utility\n * Consolidated utilities for profile detection, setup, and summary generation\n */\nimport fs from 'fs';\nimport path from 'path';\nimport inquirer from 'inquirer';\nimport chalk from 'chalk';\nimport boxen from 'boxen';\nimport { log } from '../../scripts/modules/utils.js';\nimport { getRulesProfile } from './rule-transformer.js';\nimport { RULE_PROFILES } from '../constants/profiles.js';\n\n// =============================================================================\n// PROFILE DETECTION\n// =============================================================================\n\n/**\n * Get the display name for a profile\n * @param {string} profileName - The profile name\n * @returns {string} - The display name\n */\nexport function getProfileDisplayName(profileName) {\n\ttry {\n\t\tconst profile = getRulesProfile(profileName);\n\t\treturn profile.displayName || profileName;\n\t} catch (error) {\n\t\treturn profileName;\n\t}\n}\n\n/**\n * Get installed profiles in the project directory\n * @param {string} projectRoot - Project directory path\n * @returns {string[]} - Array of installed profile names\n */\nexport function getInstalledProfiles(projectRoot) {\n\tconst installedProfiles = [];\n\n\tfor (const profileName of RULE_PROFILES) {\n\t\ttry {\n\t\t\tconst profile = getRulesProfile(profileName);\n\t\t\tconst profileDir = path.join(projectRoot, profile.profileDir);\n\n\t\t\t// Check if profile directory exists (skip root directory check)\n\t\t\tif (profile.profileDir === '.' || fs.existsSync(profileDir)) {\n\t\t\t\t// Check if any files from the profile's fileMap exist\n\t\t\t\tconst rulesDir = path.join(projectRoot, profile.rulesDir);\n\t\t\t\tif (fs.existsSync(rulesDir)) {\n\t\t\t\t\tconst ruleFiles = Object.values(profile.fileMap);\n\t\t\t\t\tconst hasRuleFiles = ruleFiles.some((ruleFile) =>\n\t\t\t\t\t\tfs.existsSync(path.join(rulesDir, ruleFile))\n\t\t\t\t\t);\n\t\t\t\t\tif (hasRuleFiles) {\n\t\t\t\t\t\tinstalledProfiles.push(profileName);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Skip profiles that can't be loaded\n\t\t}\n\t}\n\n\treturn installedProfiles;\n}\n\n/**\n * Check if removing specified profiles would leave no profiles installed\n * @param {string} projectRoot - Project root directory\n * @param {string[]} profilesToRemove - Array of profile names to remove\n * @returns {boolean} - True if removal would leave no profiles\n */\nexport function wouldRemovalLeaveNoProfiles(projectRoot, profilesToRemove) {\n\tconst installedProfiles = getInstalledProfiles(projectRoot);\n\n\t// If no profiles are currently installed, removal cannot leave no profiles\n\tif (installedProfiles.length === 0) {\n\t\treturn false;\n\t}\n\n\tconst remainingProfiles = installedProfiles.filter(\n\t\t(profile) => !profilesToRemove.includes(profile)\n\t);\n\treturn remainingProfiles.length === 0;\n}\n\n// =============================================================================\n// PROFILE SETUP\n// =============================================================================\n\n// Note: Profile choices are now generated dynamically within runInteractiveProfilesSetup()\n// to ensure proper alphabetical sorting and pagination configuration\n\n/**\n * Launches an interactive prompt for selecting which rule profiles to include in your project.\n *\n * This function dynamically lists all available profiles (from RULE_PROFILES) and presents them as checkboxes.\n * The user must select at least one profile (no defaults are pre-selected). The result is an array of selected profile names.\n *\n * Used by both project initialization (init) and the CLI 'task-master rules setup' command.\n *\n * @returns {Promise<string[]>} Array of selected profile names (e.g., ['cursor', 'windsurf'])\n */\nexport async function runInteractiveProfilesSetup() {\n\t// Generate the profile list dynamically with proper display names, alphabetized\n\tconst profileDescriptions = RULE_PROFILES.map((profileName) => {\n\t\tconst displayName = getProfileDisplayName(profileName);\n\t\tconst profile = getRulesProfile(profileName);\n\n\t\t// Determine description based on profile capabilities\n\t\tlet description;\n\t\tconst hasRules = Object.keys(profile.fileMap).length > 0;\n\t\tconst hasMcpConfig = profile.mcpConfig === true;\n\n\t\tif (!profile.includeDefaultRules) {\n\t\t\t// Integration guide profiles (claude, codex, gemini, opencode, zed, amp) - don't include standard coding rules\n\t\t\tif (profileName === 'claude') {\n\t\t\t\tdescription = 'Integration guide with Task Master slash commands';\n\t\t\t} else if (profileName === 'codex') {\n\t\t\t\tdescription = 'Comprehensive Task Master integration guide';\n\t\t\t} else if (hasMcpConfig) {\n\t\t\t\tdescription = 'Integration guide and MCP config';\n\t\t\t} else {\n\t\t\t\tdescription = 'Integration guide';\n\t\t\t}\n\t\t} else if (hasRules && hasMcpConfig) {\n\t\t\t// Full rule profiles with MCP config\n\t\t\tif (profileName === 'roo') {\n\t\t\t\tdescription = 'Rule profile, MCP config, and agent modes';\n\t\t\t} else {\n\t\t\t\tdescription = 'Rule profile and MCP config';\n\t\t\t}\n\t\t} else if (hasRules) {\n\t\t\t// Rule profiles without MCP config\n\t\t\tdescription = 'Rule profile';\n\t\t}\n\n\t\treturn {\n\t\t\tprofileName,\n\t\t\tdisplayName,\n\t\t\tdescription\n\t\t};\n\t}).sort((a, b) => a.displayName.localeCompare(b.displayName));\n\n\tconst profileListText = profileDescriptions\n\t\t.map(\n\t\t\t({ displayName, description }) =>\n\t\t\t\t`${chalk.white('• ')}${chalk.yellow(displayName)}${chalk.white(` - ${description}`)}`\n\t\t)\n\t\t.join('\\n');\n\n\tconsole.log(\n\t\tboxen(\n\t\t\t`${chalk.white.bold('Rule Profiles Setup')}\\n\\n${chalk.white(\n\t\t\t\t'Rule profiles help enforce best practices and conventions for Task Master.\\n' +\n\t\t\t\t\t'Each profile provides coding guidelines tailored for specific AI coding environments.\\n\\n'\n\t\t\t)}${chalk.cyan('Available Profiles:')}\\n${profileListText}`,\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'blue',\n\t\t\t\tborderStyle: 'round',\n\t\t\t\tmargin: { top: 1, bottom: 1 }\n\t\t\t}\n\t\t)\n\t);\n\n\t// Generate choices in the same order as the display text above\n\tconst sortedChoices = profileDescriptions.map(\n\t\t({ profileName, displayName }) => ({\n\t\t\tname: displayName,\n\t\t\tvalue: profileName\n\t\t})\n\t);\n\n\tconst ruleProfilesQuestion = {\n\t\ttype: 'checkbox',\n\t\tname: 'ruleProfiles',\n\t\tmessage: 'Which rule profiles would you like to add to your project?',\n\t\tchoices: sortedChoices,\n\t\tpageSize: sortedChoices.length, // Show all options without pagination\n\t\tloop: false, // Disable loop scrolling\n\t\tvalidate: (input) => input.length > 0 || 'You must select at least one.'\n\t};\n\tconst { ruleProfiles } = await inquirer.prompt([ruleProfilesQuestion]);\n\treturn ruleProfiles;\n}\n\n// =============================================================================\n// PROFILE SUMMARY\n// =============================================================================\n\n/**\n * Generate appropriate summary message for a profile based on its type\n * @param {string} profileName - Name of the profile\n * @param {Object} addResult - Result object with success/failed counts\n * @returns {string} Formatted summary message\n */\nexport function generateProfileSummary(profileName, addResult) {\n\tconst profileConfig = getRulesProfile(profileName);\n\n\tif (!profileConfig.includeDefaultRules) {\n\t\t// Integration guide profiles (claude, codex, gemini, amp)\n\t\treturn `Summary for ${profileName}: Integration guide installed.`;\n\t} else {\n\t\t// Rule profiles with coding guidelines\n\t\treturn `Summary for ${profileName}: ${addResult.success} files processed, ${addResult.failed} failed.`;\n\t}\n}\n\n/**\n * Generate appropriate summary message for profile removal\n * @param {string} profileName - Name of the profile\n * @param {Object} removeResult - Result object from removal operation\n * @returns {string} Formatted summary message\n */\nexport function generateProfileRemovalSummary(profileName, removeResult) {\n\tif (removeResult.skipped) {\n\t\treturn `Summary for ${profileName}: Skipped (default or protected files)`;\n\t}\n\n\tif (removeResult.error && !removeResult.success) {\n\t\treturn `Summary for ${profileName}: Failed to remove - ${removeResult.error}`;\n\t}\n\n\tconst profileConfig = getRulesProfile(profileName);\n\n\tif (!profileConfig.includeDefaultRules) {\n\t\t// Integration guide profiles (claude, codex, gemini, amp)\n\t\tconst baseMessage = `Summary for ${profileName}: Integration guide removed`;\n\t\tif (removeResult.notice) {\n\t\t\treturn `${baseMessage} (${removeResult.notice})`;\n\t\t}\n\t\treturn baseMessage;\n\t} else {\n\t\t// Rule profiles with coding guidelines\n\t\tconst baseMessage = `Summary for ${profileName}: Rule profile removed`;\n\t\tif (removeResult.notice) {\n\t\t\treturn `${baseMessage} (${removeResult.notice})`;\n\t\t}\n\t\treturn baseMessage;\n\t}\n}\n\n/**\n * Categorize profiles and generate final summary statistics\n * @param {Array} addResults - Array of add result objects\n * @returns {Object} Object with categorized profiles and totals\n */\nexport function categorizeProfileResults(addResults) {\n\tconst successfulProfiles = [];\n\tlet totalSuccess = 0;\n\tlet totalFailed = 0;\n\n\taddResults.forEach((r) => {\n\t\ttotalSuccess += r.success;\n\t\ttotalFailed += r.failed;\n\n\t\t// All profiles are considered successful if they completed without major errors\n\t\tif (r.success > 0 || r.failed === 0) {\n\t\t\tsuccessfulProfiles.push(r.profileName);\n\t\t}\n\t});\n\n\treturn {\n\t\tsuccessfulProfiles,\n\t\tallSuccessfulProfiles: successfulProfiles,\n\t\ttotalSuccess,\n\t\ttotalFailed\n\t};\n}\n\n/**\n * Categorize removal results and generate final summary statistics\n * @param {Array} removalResults - Array of removal result objects\n * @returns {Object} Object with categorized removal results\n */\nexport function categorizeRemovalResults(removalResults) {\n\tconst successfulRemovals = [];\n\tconst skippedRemovals = [];\n\tconst failedRemovals = [];\n\tconst removalsWithNotices = [];\n\n\tremovalResults.forEach((result) => {\n\t\tif (result.success) {\n\t\t\tsuccessfulRemovals.push(result.profileName);\n\t\t} else if (result.skipped) {\n\t\t\tskippedRemovals.push(result.profileName);\n\t\t} else if (result.error) {\n\t\t\tfailedRemovals.push(result);\n\t\t}\n\n\t\tif (result.notice) {\n\t\t\tremovalsWithNotices.push(result);\n\t\t}\n\t});\n\n\treturn {\n\t\tsuccessfulRemovals,\n\t\tskippedRemovals,\n\t\tfailedRemovals,\n\t\tremovalsWithNotices\n\t};\n}\n"], ["/claude-task-master/scripts/modules/prompt-manager.js", "import fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { log } from './utils.js';\nimport Ajv from 'ajv';\nimport addFormats from 'ajv-formats';\n\n/**\n * Manages prompt templates for AI interactions\n */\nexport class PromptManager {\n\tconstructor() {\n\t\tconst __filename = fileURLToPath(import.meta.url);\n\t\tconst __dirname = path.dirname(__filename);\n\t\tthis.promptsDir = path.join(__dirname, '..', '..', 'src', 'prompts');\n\t\tthis.cache = new Map();\n\t\tthis.setupValidation();\n\t}\n\n\t/**\n\t * Set up JSON schema validation\n\t * @private\n\t */\n\tsetupValidation() {\n\t\tthis.ajv = new Ajv({ allErrors: true, strict: false });\n\t\taddFormats(this.ajv);\n\n\t\ttry {\n\t\t\t// Load schema from src/prompts/schemas\n\t\t\tconst schemaPath = path.join(\n\t\t\t\tthis.promptsDir,\n\t\t\t\t'schemas',\n\t\t\t\t'prompt-template.schema.json'\n\t\t\t);\n\t\t\tconst schemaContent = fs.readFileSync(schemaPath, 'utf-8');\n\t\t\tconst schema = JSON.parse(schemaContent);\n\n\t\t\tthis.validatePrompt = this.ajv.compile(schema);\n\t\t\tlog('info', '✓ JSON schema validation enabled');\n\t\t} catch (error) {\n\t\t\tlog('warn', `⚠ Schema validation disabled: ${error.message}`);\n\t\t\tthis.validatePrompt = () => true; // Fallback to no validation\n\t\t}\n\t}\n\n\t/**\n\t * Load a prompt template and render it with variables\n\t * @param {string} promptId - The prompt template ID\n\t * @param {Object} variables - Variables to inject into the template\n\t * @param {string} [variantKey] - Optional specific variant to use\n\t * @returns {{systemPrompt: string, userPrompt: string, metadata: Object}}\n\t */\n\tloadPrompt(promptId, variables = {}, variantKey = null) {\n\t\ttry {\n\t\t\t// Check cache first\n\t\t\tconst cacheKey = `${promptId}-${JSON.stringify(variables)}-${variantKey}`;\n\t\t\tif (this.cache.has(cacheKey)) {\n\t\t\t\treturn this.cache.get(cacheKey);\n\t\t\t}\n\n\t\t\t// Load template\n\t\t\tconst template = this.loadTemplate(promptId);\n\n\t\t\t// Validate parameters if schema validation is available\n\t\t\tif (this.validatePrompt && this.validatePrompt !== true) {\n\t\t\t\tthis.validateParameters(template, variables);\n\t\t\t}\n\n\t\t\t// Select the variant - use specified key or select based on conditions\n\t\t\tconst variant = variantKey\n\t\t\t\t? { ...template.prompts[variantKey], name: variantKey }\n\t\t\t\t: this.selectVariant(template, variables);\n\n\t\t\t// Render the prompts with variables\n\t\t\tconst rendered = {\n\t\t\t\tsystemPrompt: this.renderTemplate(variant.system, variables),\n\t\t\t\tuserPrompt: this.renderTemplate(variant.user, variables),\n\t\t\t\tmetadata: {\n\t\t\t\t\ttemplateId: template.id,\n\t\t\t\t\tversion: template.version,\n\t\t\t\t\tvariant: variant.name || 'default',\n\t\t\t\t\tparameters: variables\n\t\t\t\t}\n\t\t\t};\n\n\t\t\t// Cache the result\n\t\t\tthis.cache.set(cacheKey, rendered);\n\n\t\t\treturn rendered;\n\t\t} catch (error) {\n\t\t\tlog('error', `Failed to load prompt ${promptId}: ${error.message}`);\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\t/**\n\t * Load a prompt template from disk\n\t * @private\n\t */\n\tloadTemplate(promptId) {\n\t\tconst templatePath = path.join(this.promptsDir, `${promptId}.json`);\n\n\t\ttry {\n\t\t\tconst content = fs.readFileSync(templatePath, 'utf-8');\n\t\t\tconst template = JSON.parse(content);\n\n\t\t\t// Schema validation if available (do this first for detailed errors)\n\t\t\tif (this.validatePrompt && this.validatePrompt !== true) {\n\t\t\t\tconst valid = this.validatePrompt(template);\n\t\t\t\tif (!valid) {\n\t\t\t\t\tconst errors = this.validatePrompt.errors\n\t\t\t\t\t\t.map((err) => `${err.instancePath || 'root'}: ${err.message}`)\n\t\t\t\t\t\t.join(', ');\n\t\t\t\t\tthrow new Error(`Schema validation failed: ${errors}`);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Fallback basic validation if no schema validation available\n\t\t\t\tif (!template.id || !template.prompts || !template.prompts.default) {\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t'Invalid template structure: missing required fields (id, prompts.default)'\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn template;\n\t\t} catch (error) {\n\t\t\tif (error.code === 'ENOENT') {\n\t\t\t\tthrow new Error(`Prompt template '${promptId}' not found`);\n\t\t\t}\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\t/**\n\t * Validate parameters against template schema\n\t * @private\n\t */\n\tvalidateParameters(template, variables) {\n\t\tif (!template.parameters) return;\n\n\t\tconst errors = [];\n\n\t\tfor (const [paramName, paramConfig] of Object.entries(\n\t\t\ttemplate.parameters\n\t\t)) {\n\t\t\tconst value = variables[paramName];\n\n\t\t\t// Check required parameters\n\t\t\tif (paramConfig.required && value === undefined) {\n\t\t\t\terrors.push(`Required parameter '${paramName}' missing`);\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Skip validation for undefined optional parameters\n\t\t\tif (value === undefined) continue;\n\n\t\t\t// Type validation\n\t\t\tif (!this.validateParameterType(value, paramConfig.type)) {\n\t\t\t\terrors.push(\n\t\t\t\t\t`Parameter '${paramName}' expected ${paramConfig.type}, got ${typeof value}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Enum validation\n\t\t\tif (paramConfig.enum && !paramConfig.enum.includes(value)) {\n\t\t\t\terrors.push(\n\t\t\t\t\t`Parameter '${paramName}' must be one of: ${paramConfig.enum.join(', ')}`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Pattern validation for strings\n\t\t\tif (paramConfig.pattern && typeof value === 'string') {\n\t\t\t\tconst regex = new RegExp(paramConfig.pattern);\n\t\t\t\tif (!regex.test(value)) {\n\t\t\t\t\terrors.push(\n\t\t\t\t\t\t`Parameter '${paramName}' does not match required pattern: ${paramConfig.pattern}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Range validation for numbers\n\t\t\tif (typeof value === 'number') {\n\t\t\t\tif (paramConfig.minimum !== undefined && value < paramConfig.minimum) {\n\t\t\t\t\terrors.push(\n\t\t\t\t\t\t`Parameter '${paramName}' must be >= ${paramConfig.minimum}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t\tif (paramConfig.maximum !== undefined && value > paramConfig.maximum) {\n\t\t\t\t\terrors.push(\n\t\t\t\t\t\t`Parameter '${paramName}' must be <= ${paramConfig.maximum}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (errors.length > 0) {\n\t\t\tthrow new Error(`Parameter validation failed: ${errors.join('; ')}`);\n\t\t}\n\t}\n\n\t/**\n\t * Validate parameter type\n\t * @private\n\t */\n\tvalidateParameterType(value, expectedType) {\n\t\tswitch (expectedType) {\n\t\t\tcase 'string':\n\t\t\t\treturn typeof value === 'string';\n\t\t\tcase 'number':\n\t\t\t\treturn typeof value === 'number';\n\t\t\tcase 'boolean':\n\t\t\t\treturn typeof value === 'boolean';\n\t\t\tcase 'array':\n\t\t\t\treturn Array.isArray(value);\n\t\t\tcase 'object':\n\t\t\t\treturn (\n\t\t\t\t\ttypeof value === 'object' && value !== null && !Array.isArray(value)\n\t\t\t\t);\n\t\t\tdefault:\n\t\t\t\treturn true;\n\t\t}\n\t}\n\n\t/**\n\t * Select the best variant based on conditions\n\t * @private\n\t */\n\tselectVariant(template, variables) {\n\t\t// Check each variant's condition\n\t\tfor (const [name, variant] of Object.entries(template.prompts)) {\n\t\t\tif (name === 'default') continue;\n\n\t\t\tif (\n\t\t\t\tvariant.condition &&\n\t\t\t\tthis.evaluateCondition(variant.condition, variables)\n\t\t\t) {\n\t\t\t\treturn { ...variant, name };\n\t\t\t}\n\t\t}\n\n\t\t// Fall back to default\n\t\treturn { ...template.prompts.default, name: 'default' };\n\t}\n\n\t/**\n\t * Evaluate a condition string\n\t * @private\n\t */\n\tevaluateCondition(condition, variables) {\n\t\ttry {\n\t\t\t// Create a safe evaluation context\n\t\t\tconst context = { ...variables };\n\n\t\t\t// Simple condition evaluation (can be enhanced)\n\t\t\t// For now, supports basic comparisons\n\t\t\tconst func = new Function(...Object.keys(context), `return ${condition}`);\n\t\t\treturn func(...Object.values(context));\n\t\t} catch (error) {\n\t\t\tlog('warn', `Failed to evaluate condition: ${condition}`);\n\t\t\treturn false;\n\t\t}\n\t}\n\n\t/**\n\t * Render a template string with variables\n\t * @private\n\t */\n\trenderTemplate(template, variables) {\n\t\tlet rendered = template;\n\n\t\t// Handle helper functions like (eq variable \"value\")\n\t\trendered = rendered.replace(\n\t\t\t/\\(eq\\s+(\\w+(?:\\.\\w+)*)\\s+\"([^\"]+)\"\\)/g,\n\t\t\t(match, path, compareValue) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\treturn value === compareValue ? 'true' : 'false';\n\t\t\t}\n\t\t);\n\n\t\t// Handle not helper function like (not variable)\n\t\trendered = rendered.replace(/\\(not\\s+(\\w+(?:\\.\\w+)*)\\)/g, (match, path) => {\n\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\treturn !value ? 'true' : 'false';\n\t\t});\n\n\t\t// Handle gt (greater than) helper function like (gt variable 0)\n\t\trendered = rendered.replace(\n\t\t\t/\\(gt\\s+(\\w+(?:\\.\\w+)*)\\s+(\\d+(?:\\.\\d+)?)\\)/g,\n\t\t\t(match, path, compareValue) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\tconst numValue = parseFloat(compareValue);\n\t\t\t\treturn typeof value === 'number' && value > numValue ? 'true' : 'false';\n\t\t\t}\n\t\t);\n\n\t\t// Handle gte (greater than or equal) helper function like (gte variable 0)\n\t\trendered = rendered.replace(\n\t\t\t/\\(gte\\s+(\\w+(?:\\.\\w+)*)\\s+(\\d+(?:\\.\\d+)?)\\)/g,\n\t\t\t(match, path, compareValue) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\tconst numValue = parseFloat(compareValue);\n\t\t\t\treturn typeof value === 'number' && value >= numValue\n\t\t\t\t\t? 'true'\n\t\t\t\t\t: 'false';\n\t\t\t}\n\t\t);\n\n\t\t// Handle conditionals with else {{#if variable}}...{{else}}...{{/if}}\n\t\trendered = rendered.replace(\n\t\t\t/\\{\\{#if\\s+([^}]+)\\}\\}([\\s\\S]*?)(?:\\{\\{else\\}\\}([\\s\\S]*?))?\\{\\{\\/if\\}\\}/g,\n\t\t\t(match, condition, trueContent, falseContent = '') => {\n\t\t\t\t// Handle boolean values and helper function results\n\t\t\t\tlet value;\n\t\t\t\tif (condition === 'true') {\n\t\t\t\t\tvalue = true;\n\t\t\t\t} else if (condition === 'false') {\n\t\t\t\t\tvalue = false;\n\t\t\t\t} else {\n\t\t\t\t\tvalue = this.getNestedValue(variables, condition);\n\t\t\t\t}\n\t\t\t\treturn value ? trueContent : falseContent;\n\t\t\t}\n\t\t);\n\n\t\t// Handle each loops {{#each array}}...{{/each}}\n\t\trendered = rendered.replace(\n\t\t\t/\\{\\{#each\\s+(\\w+(?:\\.\\w+)*)\\}\\}([\\s\\S]*?)\\{\\{\\/each\\}\\}/g,\n\t\t\t(match, path, content) => {\n\t\t\t\tconst array = this.getNestedValue(variables, path);\n\t\t\t\tif (!Array.isArray(array)) return '';\n\n\t\t\t\treturn array\n\t\t\t\t\t.map((item, index) => {\n\t\t\t\t\t\t// Create a context with item properties and special variables\n\t\t\t\t\t\tconst itemContext = {\n\t\t\t\t\t\t\t...variables,\n\t\t\t\t\t\t\t...item,\n\t\t\t\t\t\t\t'@index': index,\n\t\t\t\t\t\t\t'@first': index === 0,\n\t\t\t\t\t\t\t'@last': index === array.length - 1\n\t\t\t\t\t\t};\n\n\t\t\t\t\t\t// Recursively render the content with item context\n\t\t\t\t\t\treturn this.renderTemplate(content, itemContext);\n\t\t\t\t\t})\n\t\t\t\t\t.join('');\n\t\t\t}\n\t\t);\n\n\t\t// Handle json helper {{{json variable}}} (triple braces for raw output)\n\t\trendered = rendered.replace(\n\t\t\t/\\{\\{\\{json\\s+(\\w+(?:\\.\\w+)*)\\}\\}\\}/g,\n\t\t\t(match, path) => {\n\t\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\t\treturn value !== undefined ? JSON.stringify(value, null, 2) : '';\n\t\t\t}\n\t\t);\n\n\t\t// Handle variable substitution {{variable}}\n\t\trendered = rendered.replace(/\\{\\{(\\w+(?:\\.\\w+)*)\\}\\}/g, (match, path) => {\n\t\t\tconst value = this.getNestedValue(variables, path);\n\t\t\treturn value !== undefined ? value : '';\n\t\t});\n\n\t\treturn rendered;\n\t}\n\n\t/**\n\t * Get nested value from object using dot notation\n\t * @private\n\t */\n\tgetNestedValue(obj, path) {\n\t\treturn path\n\t\t\t.split('.')\n\t\t\t.reduce(\n\t\t\t\t(current, key) =>\n\t\t\t\t\tcurrent && current[key] !== undefined ? current[key] : undefined,\n\t\t\t\tobj\n\t\t\t);\n\t}\n\n\t/**\n\t * Validate all prompt templates\n\t */\n\tvalidateAllPrompts() {\n\t\tconst results = { total: 0, errors: [], valid: [] };\n\n\t\ttry {\n\t\t\tconst files = fs.readdirSync(this.promptsDir);\n\t\t\tconst promptFiles = files.filter((file) => file.endsWith('.json'));\n\n\t\t\tfor (const file of promptFiles) {\n\t\t\t\tconst promptId = file.replace('.json', '');\n\t\t\t\tresults.total++;\n\n\t\t\t\ttry {\n\t\t\t\t\tthis.loadTemplate(promptId);\n\t\t\t\t\tresults.valid.push(promptId);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tresults.errors.push(`${promptId}: ${error.message}`);\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\tresults.errors.push(\n\t\t\t\t`Failed to read templates directory: ${error.message}`\n\t\t\t);\n\t\t}\n\n\t\treturn results;\n\t}\n\n\t/**\n\t * List all available prompt templates\n\t */\n\tlistPrompts() {\n\t\ttry {\n\t\t\tconst files = fs.readdirSync(this.promptsDir);\n\t\t\tconst prompts = [];\n\n\t\t\tfor (const file of files) {\n\t\t\t\tif (!file.endsWith('.json')) continue;\n\n\t\t\t\tconst promptId = file.replace('.json', '');\n\t\t\t\ttry {\n\t\t\t\t\tconst template = this.loadTemplate(promptId);\n\t\t\t\t\tprompts.push({\n\t\t\t\t\t\tid: template.id,\n\t\t\t\t\t\tdescription: template.description,\n\t\t\t\t\t\tversion: template.version,\n\t\t\t\t\t\tparameters: template.parameters,\n\t\t\t\t\t\ttags: template.metadata?.tags || []\n\t\t\t\t\t});\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog('warn', `Failed to load template ${promptId}: ${error.message}`);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn prompts;\n\t\t} catch (error) {\n\t\t\tif (error.code === 'ENOENT') {\n\t\t\t\t// Templates directory doesn't exist yet\n\t\t\t\treturn [];\n\t\t\t}\n\t\t\tthrow error;\n\t\t}\n\t}\n\n\t/**\n\t * Validate template structure\n\t */\n\tvalidateTemplate(templatePath) {\n\t\ttry {\n\t\t\tconst content = fs.readFileSync(templatePath, 'utf-8');\n\t\t\tconst template = JSON.parse(content);\n\n\t\t\t// Check required fields\n\t\t\tconst required = ['id', 'version', 'description', 'prompts'];\n\t\t\tfor (const field of required) {\n\t\t\t\tif (!template[field]) {\n\t\t\t\t\treturn { valid: false, error: `Missing required field: ${field}` };\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Check default prompt exists\n\t\t\tif (!template.prompts.default) {\n\t\t\t\treturn { valid: false, error: 'Missing default prompt variant' };\n\t\t\t}\n\n\t\t\t// Check each variant has required fields\n\t\t\tfor (const [name, variant] of Object.entries(template.prompts)) {\n\t\t\t\tif (!variant.system || !variant.user) {\n\t\t\t\t\treturn {\n\t\t\t\t\t\tvalid: false,\n\t\t\t\t\t\terror: `Variant '${name}' missing system or user prompt`\n\t\t\t\t\t};\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Schema validation if available\n\t\t\tif (this.validatePrompt && this.validatePrompt !== true) {\n\t\t\t\tconst valid = this.validatePrompt(template);\n\t\t\t\tif (!valid) {\n\t\t\t\t\tconst errors = this.validatePrompt.errors\n\t\t\t\t\t\t.map((err) => `${err.instancePath || 'root'}: ${err.message}`)\n\t\t\t\t\t\t.join(', ');\n\t\t\t\t\treturn { valid: false, error: `Schema validation failed: ${errors}` };\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn { valid: true };\n\t\t} catch (error) {\n\t\t\treturn { valid: false, error: error.message };\n\t\t}\n\t}\n}\n\n// Singleton instance\nlet promptManager = null;\n\n/**\n * Get or create the prompt manager instance\n * @returns {PromptManager}\n */\nexport function getPromptManager() {\n\tif (!promptManager) {\n\t\tpromptManager = new PromptManager();\n\t}\n\treturn promptManager;\n}\n"], ["/claude-task-master/scripts/modules/task-manager/migrate.js", "import fs from 'fs';\nimport path from 'path';\nimport chalk from 'chalk';\nimport { fileURLToPath } from 'url';\nimport { createLogWrapper } from '../../../mcp-server/src/tools/utils.js';\nimport { findProjectRoot } from '../utils.js';\nimport {\n\tLEGACY_CONFIG_FILE,\n\tTASKMASTER_CONFIG_FILE\n} from '../../../src/constants/paths.js';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\n// Create a simple log wrapper for CLI use\nconst log = createLogWrapper({\n\tinfo: (msg) => console.log(chalk.blue('ℹ'), msg),\n\twarn: (msg) => console.log(chalk.yellow('⚠'), msg),\n\terror: (msg) => console.error(chalk.red('✗'), msg),\n\tsuccess: (msg) => console.log(chalk.green('✓'), msg)\n});\n\n/**\n * Main migration function\n * @param {Object} options - Migration options\n */\nexport async function migrateProject(options = {}) {\n\tconst projectRoot = findProjectRoot() || process.cwd();\n\n\tlog.info(`Starting migration in: ${projectRoot}`);\n\n\t// Check if .taskmaster directory already exists\n\tconst taskmasterDir = path.join(projectRoot, '.taskmaster');\n\tif (fs.existsSync(taskmasterDir) && !options.force) {\n\t\tlog.warn(\n\t\t\t'.taskmaster directory already exists. Use --force to overwrite or skip migration.'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Analyze what needs to be migrated\n\tconst migrationPlan = analyzeMigrationNeeds(projectRoot);\n\n\tif (migrationPlan.length === 0) {\n\t\tlog.info(\n\t\t\t'No files to migrate. Project may already be using the new structure.'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Show migration plan\n\tlog.info('Migration plan:');\n\tfor (const item of migrationPlan) {\n\t\tconst action = options.dryRun ? 'Would move' : 'Will move';\n\t\tlog.info(` ${action}: ${item.from} → ${item.to}`);\n\t}\n\n\tif (options.dryRun) {\n\t\tlog.info(\n\t\t\t'Dry run complete. Use --dry-run=false to perform actual migration.'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Confirm migration\n\tif (!options.yes) {\n\t\tconst readline = await import('readline');\n\t\tconst rl = readline.createInterface({\n\t\t\tinput: process.stdin,\n\t\t\toutput: process.stdout\n\t\t});\n\n\t\tconst answer = await new Promise((resolve) => {\n\t\t\trl.question('Proceed with migration? (y/N): ', resolve);\n\t\t});\n\t\trl.close();\n\n\t\tif (answer.toLowerCase() !== 'y' && answer.toLowerCase() !== 'yes') {\n\t\t\tlog.info('Migration cancelled.');\n\t\t\treturn;\n\t\t}\n\t}\n\n\t// Perform migration\n\ttry {\n\t\tawait performMigration(projectRoot, migrationPlan, options);\n\t\tlog.success('Migration completed successfully!');\n\t\tlog.info('You can now use the new .taskmaster directory structure.');\n\t\tif (!options.cleanup) {\n\t\t\tlog.info(\n\t\t\t\t'Old files were preserved. Use --cleanup to remove them after verification.'\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tlog.error(`Migration failed: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\n/**\n * Analyze what files need to be migrated\n * @param {string} projectRoot - Project root directory\n * @returns {Array} Migration plan items\n */\nfunction analyzeMigrationNeeds(projectRoot) {\n\tconst migrationPlan = [];\n\n\t// Check for tasks directory\n\tconst tasksDir = path.join(projectRoot, 'tasks');\n\tif (fs.existsSync(tasksDir)) {\n\t\tconst tasksFiles = fs.readdirSync(tasksDir);\n\t\tfor (const file of tasksFiles) {\n\t\t\tmigrationPlan.push({\n\t\t\t\tfrom: path.join('tasks', file),\n\t\t\t\tto: path.join('.taskmaster', 'tasks', file),\n\t\t\t\ttype: 'task'\n\t\t\t});\n\t\t}\n\t}\n\n\t// Check for scripts directory files\n\tconst scriptsDir = path.join(projectRoot, 'scripts');\n\tif (fs.existsSync(scriptsDir)) {\n\t\tconst scriptsFiles = fs.readdirSync(scriptsDir);\n\t\tfor (const file of scriptsFiles) {\n\t\t\tconst filePath = path.join(scriptsDir, file);\n\t\t\tif (fs.statSync(filePath).isFile()) {\n\t\t\t\t// Categorize files more intelligently\n\t\t\t\tlet destination;\n\t\t\t\tconst lowerFile = file.toLowerCase();\n\n\t\t\t\tif (\n\t\t\t\t\tlowerFile.includes('example') ||\n\t\t\t\t\tlowerFile.includes('template') ||\n\t\t\t\t\tlowerFile.includes('boilerplate') ||\n\t\t\t\t\tlowerFile.includes('sample')\n\t\t\t\t) {\n\t\t\t\t\t// Template/example files go to templates (including example_prd.txt)\n\t\t\t\t\tdestination = path.join('.taskmaster', 'templates', file);\n\t\t\t\t} else if (\n\t\t\t\t\tlowerFile.includes('complexity') &&\n\t\t\t\t\tlowerFile.includes('report') &&\n\t\t\t\t\tlowerFile.endsWith('.json')\n\t\t\t\t) {\n\t\t\t\t\t// Only actual complexity reports go to reports\n\t\t\t\t\tdestination = path.join('.taskmaster', 'reports', file);\n\t\t\t\t} else if (\n\t\t\t\t\tlowerFile.includes('prd') ||\n\t\t\t\t\tlowerFile.endsWith('.md') ||\n\t\t\t\t\tlowerFile.endsWith('.txt')\n\t\t\t\t) {\n\t\t\t\t\t// Documentation files go to docs (but not examples or reports)\n\t\t\t\t\tdestination = path.join('.taskmaster', 'docs', file);\n\t\t\t\t} else {\n\t\t\t\t\t// Other files stay in scripts or get skipped - don't force everything into templates\n\t\t\t\t\tlog.warn(\n\t\t\t\t\t\t`Skipping migration of '${file}' - uncertain categorization. You may need to move this manually.`\n\t\t\t\t\t);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tmigrationPlan.push({\n\t\t\t\t\tfrom: path.join('scripts', file),\n\t\t\t\t\tto: destination,\n\t\t\t\t\ttype: 'script'\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check for .taskmasterconfig\n\tconst oldConfig = path.join(projectRoot, LEGACY_CONFIG_FILE);\n\tif (fs.existsSync(oldConfig)) {\n\t\tmigrationPlan.push({\n\t\t\tfrom: LEGACY_CONFIG_FILE,\n\t\t\tto: TASKMASTER_CONFIG_FILE,\n\t\t\ttype: 'config'\n\t\t});\n\t}\n\n\treturn migrationPlan;\n}\n\n/**\n * Perform the actual migration\n * @param {string} projectRoot - Project root directory\n * @param {Array} migrationPlan - List of files to migrate\n * @param {Object} options - Migration options\n */\nasync function performMigration(projectRoot, migrationPlan, options) {\n\t// Create .taskmaster directory\n\tconst taskmasterDir = path.join(projectRoot, '.taskmaster');\n\tif (!fs.existsSync(taskmasterDir)) {\n\t\tfs.mkdirSync(taskmasterDir, { recursive: true });\n\t}\n\n\t// Group migration items by destination directory to create only needed subdirs\n\tconst neededDirs = new Set();\n\tfor (const item of migrationPlan) {\n\t\tconst destDir = path.dirname(item.to);\n\t\tneededDirs.add(destDir);\n\t}\n\n\t// Create only the directories we actually need\n\tfor (const dir of neededDirs) {\n\t\tconst fullDirPath = path.join(projectRoot, dir);\n\t\tif (!fs.existsSync(fullDirPath)) {\n\t\t\tfs.mkdirSync(fullDirPath, { recursive: true });\n\t\t\tlog.info(`Created directory: ${dir}`);\n\t\t}\n\t}\n\n\t// Create backup if requested\n\tif (options.backup) {\n\t\tconst backupDir = path.join(projectRoot, '.taskmaster-migration-backup');\n\t\tlog.info(`Creating backup in: ${backupDir}`);\n\t\tif (fs.existsSync(backupDir)) {\n\t\t\tfs.rmSync(backupDir, { recursive: true, force: true });\n\t\t}\n\t\tfs.mkdirSync(backupDir, { recursive: true });\n\t}\n\n\t// Migrate files\n\tfor (const item of migrationPlan) {\n\t\tconst fromPath = path.join(projectRoot, item.from);\n\t\tconst toPath = path.join(projectRoot, item.to);\n\n\t\tif (!fs.existsSync(fromPath)) {\n\t\t\tlog.warn(`Source file not found: ${item.from}`);\n\t\t\tcontinue;\n\t\t}\n\n\t\t// Create backup if requested\n\t\tif (options.backup) {\n\t\t\tconst backupPath = path.join(\n\t\t\t\tprojectRoot,\n\t\t\t\t'.taskmaster-migration-backup',\n\t\t\t\titem.from\n\t\t\t);\n\t\t\tconst backupDir = path.dirname(backupPath);\n\t\t\tif (!fs.existsSync(backupDir)) {\n\t\t\t\tfs.mkdirSync(backupDir, { recursive: true });\n\t\t\t}\n\t\t\tfs.copyFileSync(fromPath, backupPath);\n\t\t}\n\n\t\t// Ensure destination directory exists\n\t\tconst toDir = path.dirname(toPath);\n\t\tif (!fs.existsSync(toDir)) {\n\t\t\tfs.mkdirSync(toDir, { recursive: true });\n\t\t}\n\n\t\t// Copy file\n\t\tfs.copyFileSync(fromPath, toPath);\n\t\tlog.info(`Migrated: ${item.from} → ${item.to}`);\n\n\t\t// Remove original if cleanup is requested\n\t\tif (options.cleanup) {\n\t\t\tfs.unlinkSync(fromPath);\n\t\t}\n\t}\n\n\t// Clean up empty directories if cleanup is requested\n\tif (options.cleanup) {\n\t\tconst dirsToCheck = ['tasks', 'scripts'];\n\t\tfor (const dir of dirsToCheck) {\n\t\t\tconst dirPath = path.join(projectRoot, dir);\n\t\t\tif (fs.existsSync(dirPath)) {\n\t\t\t\ttry {\n\t\t\t\t\tconst files = fs.readdirSync(dirPath);\n\t\t\t\t\tif (files.length === 0) {\n\t\t\t\t\t\tfs.rmdirSync(dirPath);\n\t\t\t\t\t\tlog.info(`Removed empty directory: ${dir}`);\n\t\t\t\t\t}\n\t\t\t\t} catch (error) {\n\t\t\t\t\t// Directory not empty or other error, skip\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nexport default { migrateProject };\n"], ["/claude-task-master/mcp-server/src/custom-sdk/language-model.js", "/**\n * src/ai-providers/custom-sdk/mcp/language-model.js\n *\n * MCP Language Model implementation following AI SDK LanguageModelV1 interface.\n * Uses MCP session.requestSampling() for AI operations.\n */\n\nimport {\n\tconvertToMCPFormat,\n\tconvertFromMCPFormat\n} from './message-converter.js';\nimport { MCPError, mapMCPError } from './errors.js';\nimport { extractJson } from './json-extractor.js';\nimport {\n\tconvertSchemaToInstructions,\n\tenhancePromptForJSON\n} from './schema-converter.js';\n\n/**\n * MCP Language Model implementing AI SDK LanguageModelV1 interface\n */\nexport class MCPLanguageModel {\n\tspecificationVersion = 'v1';\n\tdefaultObjectGenerationMode = 'json';\n\tsupportsImageUrls = false;\n\tsupportsStructuredOutputs = true;\n\n\tconstructor(options) {\n\t\tthis.session = options.session; // MCP session object\n\t\tthis.modelId = options.modelId;\n\t\tthis.settings = options.settings || {};\n\t\tthis.provider = 'mcp-ai-sdk';\n\t\tthis.maxTokens = this.settings.maxTokens;\n\t\tthis.temperature = this.settings.temperature;\n\n\t\tthis.validateSession();\n\t}\n\n\t/**\n\t * Validate that the MCP session has required capabilities\n\t */\n\tvalidateSession() {\n\t\tif (!this.session?.clientCapabilities?.sampling) {\n\t\t\tthrow new MCPError('MCP session must have client sampling capabilities');\n\t\t}\n\t}\n\n\t/**\n\t * Generate text using MCP session sampling\n\t * @param {object} options - Generation options\n\t * @param {Array} options.prompt - AI SDK prompt format\n\t * @param {AbortSignal} options.abortSignal - Abort signal\n\t * @returns {Promise<object>} Generation result in AI SDK format\n\t */\n\tasync doGenerate(options) {\n\t\ttry {\n\t\t\t// Convert AI SDK prompt to MCP format\n\t\t\tconst { messages, systemPrompt } = convertToMCPFormat(options.prompt);\n\n\t\t\t// Use MCP session.requestSampling (same as MCPRemoteProvider)\n\t\t\tconst response = await this.session.requestSampling(\n\t\t\t\t{\n\t\t\t\t\tmessages,\n\t\t\t\t\tsystemPrompt,\n\t\t\t\t\ttemperature: this.settings.temperature,\n\t\t\t\t\tmaxTokens: this.settings.maxTokens,\n\t\t\t\t\tincludeContext: 'thisServer'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t// signal: options.abortSignal,\n\t\t\t\t\ttimeout: 240000 // 4 minutes timeout\n\t\t\t\t}\n\t\t\t);\n\n\t\t\t// Convert MCP response back to AI SDK format\n\t\t\tconst result = convertFromMCPFormat(response);\n\n\t\t\treturn {\n\t\t\t\ttext: result.text,\n\t\t\t\tfinishReason: result.finishReason || 'stop',\n\t\t\t\tusage: {\n\t\t\t\t\tpromptTokens: result.usage?.inputTokens || 0,\n\t\t\t\t\tcompletionTokens: result.usage?.outputTokens || 0,\n\t\t\t\t\ttotalTokens:\n\t\t\t\t\t\t(result.usage?.inputTokens || 0) + (result.usage?.outputTokens || 0)\n\t\t\t\t},\n\t\t\t\trawResponse: response,\n\t\t\t\twarnings: result.warnings\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthrow mapMCPError(error);\n\t\t}\n\t}\n\n\t/**\n\t * Generate structured object using MCP session sampling\n\t * @param {object} options - Generation options\n\t * @param {Array} options.prompt - AI SDK prompt format\n\t * @param {import('zod').ZodSchema} options.schema - Zod schema for validation\n\t * @param {string} [options.mode='json'] - Generation mode ('json' or 'tool')\n\t * @param {AbortSignal} options.abortSignal - Abort signal\n\t * @returns {Promise<object>} Generation result with structured object\n\t */\n\tasync doGenerateObject(options) {\n\t\ttry {\n\t\t\tconst { schema, mode = 'json', ...restOptions } = options;\n\n\t\t\tif (!schema) {\n\t\t\t\tthrow new MCPError('Schema is required for object generation');\n\t\t\t}\n\n\t\t\t// Convert schema to JSON instructions\n\t\t\tconst objectName = restOptions.objectName || 'generated_object';\n\t\t\tconst jsonInstructions = convertSchemaToInstructions(schema, objectName);\n\n\t\t\t// Enhance prompt with JSON generation instructions\n\t\t\tconst enhancedPrompt = enhancePromptForJSON(\n\t\t\t\toptions.prompt,\n\t\t\t\tjsonInstructions\n\t\t\t);\n\n\t\t\t// Convert enhanced prompt to MCP format\n\t\t\tconst { messages, systemPrompt } = convertToMCPFormat(enhancedPrompt);\n\n\t\t\t// Use MCP session.requestSampling with enhanced prompt\n\t\t\tconst response = await this.session.requestSampling(\n\t\t\t\t{\n\t\t\t\t\tmessages,\n\t\t\t\t\tsystemPrompt,\n\t\t\t\t\ttemperature: this.settings.temperature,\n\t\t\t\t\tmaxTokens: this.settings.maxTokens,\n\t\t\t\t\tincludeContext: 'thisServer'\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\ttimeout: 240000 // 4 minutes timeout\n\t\t\t\t}\n\t\t\t);\n\n\t\t\t// Convert MCP response back to AI SDK format\n\t\t\tconst result = convertFromMCPFormat(response);\n\n\t\t\t// Extract JSON from the response text\n\t\t\tconst jsonText = extractJson(result.text);\n\n\t\t\t// Parse and validate JSON\n\t\t\tlet parsedObject;\n\t\t\ttry {\n\t\t\t\tparsedObject = JSON.parse(jsonText);\n\t\t\t} catch (parseError) {\n\t\t\t\tthrow new MCPError(\n\t\t\t\t\t`Failed to parse JSON response: ${parseError.message}. Response: ${result.text.substring(0, 200)}...`\n\t\t\t\t);\n\t\t\t}\n\n\t\t\t// Validate against schema\n\t\t\ttry {\n\t\t\t\tconst validatedObject = schema.parse(parsedObject);\n\n\t\t\t\treturn {\n\t\t\t\t\tobject: validatedObject,\n\t\t\t\t\tfinishReason: result.finishReason || 'stop',\n\t\t\t\t\tusage: {\n\t\t\t\t\t\tpromptTokens: result.usage?.inputTokens || 0,\n\t\t\t\t\t\tcompletionTokens: result.usage?.outputTokens || 0,\n\t\t\t\t\t\ttotalTokens:\n\t\t\t\t\t\t\t(result.usage?.inputTokens || 0) +\n\t\t\t\t\t\t\t(result.usage?.outputTokens || 0)\n\t\t\t\t\t},\n\t\t\t\t\trawResponse: response,\n\t\t\t\t\twarnings: result.warnings\n\t\t\t\t};\n\t\t\t} catch (validationError) {\n\t\t\t\tthrow new MCPError(\n\t\t\t\t\t`Generated object does not match schema: ${validationError.message}. Generated: ${JSON.stringify(parsedObject, null, 2)}`\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (error) {\n\t\t\tthrow mapMCPError(error);\n\t\t}\n\t}\n\n\t/**\n\t * Stream text generation using MCP session sampling\n\t * Note: MCP may not support native streaming, so this may simulate streaming\n\t * @param {object} options - Generation options\n\t * @returns {AsyncIterable} Stream of generation chunks\n\t */\n\tasync doStream(options) {\n\t\ttry {\n\t\t\t// For now, simulate streaming by chunking the complete response\n\t\t\t// TODO: Implement native streaming if MCP supports it\n\t\t\tconst result = await this.doGenerate(options);\n\n\t\t\t// Create async generator that yields chunks\n\t\t\treturn this.simulateStreaming(result);\n\t\t} catch (error) {\n\t\t\tthrow mapMCPError(error);\n\t\t}\n\t}\n\n\t/**\n\t * Simulate streaming by chunking a complete response\n\t * @param {object} result - Complete generation result\n\t * @returns {AsyncIterable} Simulated stream chunks\n\t */\n\tasync *simulateStreaming(result) {\n\t\tconst text = result.text;\n\t\tconst chunkSize = Math.max(1, Math.floor(text.length / 10)); // 10 chunks\n\n\t\tfor (let i = 0; i < text.length; i += chunkSize) {\n\t\t\tconst chunk = text.slice(i, i + chunkSize);\n\t\t\tconst isLast = i + chunkSize >= text.length;\n\n\t\t\tyield {\n\t\t\t\ttype: 'text-delta',\n\t\t\t\ttextDelta: chunk\n\t\t\t};\n\n\t\t\t// Small delay to simulate streaming\n\t\t\tawait new Promise((resolve) => setTimeout(resolve, 50));\n\t\t}\n\n\t\t// Final chunk with finish reason and usage\n\t\tyield {\n\t\t\ttype: 'finish',\n\t\t\tfinishReason: result.finishReason,\n\t\t\tusage: result.usage\n\t\t};\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/next-task.js", "/**\n * tools/next-task.js\n * Tool to find the next task to work on based on dependencies and status\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { nextTaskDirect } from '../core/task-master-core.js';\nimport {\n\tresolveTasksPath,\n\tresolveComplexityReportPath\n} from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the nextTask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerNextTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'next_task',\n\t\tdescription:\n\t\t\t'Find the next task to work on based on dependencies and status',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tcomplexityReport: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Path to the complexity report file (relative to project root or absolute)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Finding next task with args: ${JSON.stringify(args)}`);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Resolve the path to tasks.json using new path utilities\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = resolveTasksPath(args, session);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Resolve the path to complexity report (optional)\n\t\t\t\tlet complexityReportPath;\n\t\t\t\ttry {\n\t\t\t\t\tcomplexityReportPath = resolveComplexityReportPath(\n\t\t\t\t\t\t{ ...args, tag: resolvedTag },\n\t\t\t\t\t\tsession\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding complexity report: ${error.message}`);\n\t\t\t\t\t// This is optional, so we don't fail the operation\n\t\t\t\t\tcomplexityReportPath = null;\n\t\t\t\t}\n\n\t\t\t\tconst result = await nextTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\treportPath: complexityReportPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tlog.info(`Next task result: ${result.success ? 'found' : 'none'}`);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error finding next task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error finding next task: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/utils/create-mcp-config.js", "import fs from 'fs';\nimport path from 'path';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Return JSON with existing mcp.json formatting style\nfunction formatJSONWithTabs(obj) {\n\tlet json = JSON.stringify(obj, null, '\\t');\n\n\tjson = json.replace(\n\t\t/(\\[\\n\\t+)([^[\\]]+?)(\\n\\t+\\])/g,\n\t\t(match, openBracket, content, closeBracket) => {\n\t\t\t// Only convert to single line if content doesn't contain nested objects/arrays\n\t\t\tif (!content.includes('{') && !content.includes('[')) {\n\t\t\t\tconst singleLineContent = content\n\t\t\t\t\t.replace(/\\n\\t+/g, ' ')\n\t\t\t\t\t.replace(/\\s+/g, ' ')\n\t\t\t\t\t.trim();\n\t\t\t\treturn `[${singleLineContent}]`;\n\t\t\t}\n\t\t\treturn match;\n\t\t}\n\t);\n\n\treturn json;\n}\n\n// Structure matches project conventions (see scripts/init.js)\nexport function setupMCPConfiguration(projectRoot, mcpConfigPath) {\n\t// Handle null mcpConfigPath (e.g., for Claude/Codex profiles)\n\tif (!mcpConfigPath) {\n\t\tlog(\n\t\t\t'debug',\n\t\t\t'[MCP Config] No mcpConfigPath provided, skipping MCP configuration setup'\n\t\t);\n\t\treturn;\n\t}\n\n\t// Build the full path to the MCP config file\n\tconst mcpPath = path.join(projectRoot, mcpConfigPath);\n\tconst configDir = path.dirname(mcpPath);\n\n\tlog('info', `Setting up MCP configuration at ${mcpPath}...`);\n\n\t// New MCP config to be added - references the installed package\n\tconst newMCPServer = {\n\t\t'task-master-ai': {\n\t\t\tcommand: 'npx',\n\t\t\targs: ['-y', '--package=task-master-ai', 'task-master-ai'],\n\t\t\tenv: {\n\t\t\t\tANTHROPIC_API_KEY: 'YOUR_ANTHROPIC_API_KEY_HERE',\n\t\t\t\tPERPLEXITY_API_KEY: 'YOUR_PERPLEXITY_API_KEY_HERE',\n\t\t\t\tOPENAI_API_KEY: 'YOUR_OPENAI_KEY_HERE',\n\t\t\t\tGOOGLE_API_KEY: 'YOUR_GOOGLE_KEY_HERE',\n\t\t\t\tXAI_API_KEY: 'YOUR_XAI_KEY_HERE',\n\t\t\t\tOPENROUTER_API_KEY: 'YOUR_OPENROUTER_KEY_HERE',\n\t\t\t\tMISTRAL_API_KEY: 'YOUR_MISTRAL_KEY_HERE',\n\t\t\t\tAZURE_OPENAI_API_KEY: 'YOUR_AZURE_KEY_HERE',\n\t\t\t\tOLLAMA_API_KEY: 'YOUR_OLLAMA_API_KEY_HERE'\n\t\t\t}\n\t\t}\n\t};\n\n\t// Create config directory if it doesn't exist\n\tif (!fs.existsSync(configDir)) {\n\t\tfs.mkdirSync(configDir, { recursive: true });\n\t}\n\n\tif (fs.existsSync(mcpPath)) {\n\t\tlog(\n\t\t\t'info',\n\t\t\t'MCP configuration file already exists, checking for existing task-master-ai...'\n\t\t);\n\t\ttry {\n\t\t\t// Read existing config\n\t\t\tconst mcpConfig = JSON.parse(fs.readFileSync(mcpPath, 'utf8'));\n\t\t\t// Initialize mcpServers if it doesn't exist\n\t\t\tif (!mcpConfig.mcpServers) {\n\t\t\t\tmcpConfig.mcpServers = {};\n\t\t\t}\n\t\t\t// Check if any existing server configuration already has task-master-ai in its args\n\t\t\tconst hasMCPString = Object.values(mcpConfig.mcpServers).some(\n\t\t\t\t(server) =>\n\t\t\t\t\tserver.args &&\n\t\t\t\t\tArray.isArray(server.args) &&\n\t\t\t\t\tserver.args.some(\n\t\t\t\t\t\t(arg) => typeof arg === 'string' && arg.includes('task-master-ai')\n\t\t\t\t\t)\n\t\t\t);\n\t\t\tif (hasMCPString) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'Found existing task-master-ai MCP configuration in mcp.json, leaving untouched'\n\t\t\t\t);\n\t\t\t\treturn; // Exit early, don't modify the existing configuration\n\t\t\t}\n\t\t\t// Add the task-master-ai server if it doesn't exist\n\t\t\tif (!mcpConfig.mcpServers['task-master-ai']) {\n\t\t\t\tmcpConfig.mcpServers['task-master-ai'] = newMCPServer['task-master-ai'];\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'Added task-master-ai server to existing MCP configuration'\n\t\t\t\t);\n\t\t\t} else {\n\t\t\t\tlog('info', 'task-master-ai server already configured in mcp.json');\n\t\t\t}\n\t\t\t// Write the updated configuration\n\t\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(mcpConfig) + '\\n');\n\t\t\tlog('success', 'Updated MCP configuration file');\n\t\t} catch (error) {\n\t\t\tlog('error', `Failed to update MCP configuration: ${error.message}`);\n\t\t\t// Create a backup before potentially modifying\n\t\t\tconst backupPath = `${mcpPath}.backup-${Date.now()}`;\n\t\t\tif (fs.existsSync(mcpPath)) {\n\t\t\t\tfs.copyFileSync(mcpPath, backupPath);\n\t\t\t\tlog('info', `Created backup of existing mcp.json at ${backupPath}`);\n\t\t\t}\n\t\t\t// Create new configuration\n\t\t\tconst newMCPConfig = {\n\t\t\t\tmcpServers: newMCPServer\n\t\t\t};\n\t\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(newMCPConfig) + '\\n');\n\t\t\tlog(\n\t\t\t\t'warn',\n\t\t\t\t'Created new MCP configuration file (backup of original file was created if it existed)'\n\t\t\t);\n\t\t}\n\t} else {\n\t\t// If mcp.json doesn't exist, create it\n\t\tconst newMCPConfig = {\n\t\t\tmcpServers: newMCPServer\n\t\t};\n\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(newMCPConfig) + '\\n');\n\t\tlog('success', `Created MCP configuration file at ${mcpPath}`);\n\t}\n\n\t// Add note to console about MCP integration\n\tlog('info', 'MCP server will use the installed task-master-ai package');\n}\n\n/**\n * Remove Task Master MCP server configuration from an existing mcp.json file\n * Only removes Task Master entries, preserving other MCP servers\n * @param {string} projectRoot - Target project directory\n * @param {string} mcpConfigPath - Relative path to MCP config file (e.g., '.cursor/mcp.json')\n * @returns {Object} Result object with success status and details\n */\nexport function removeTaskMasterMCPConfiguration(projectRoot, mcpConfigPath) {\n\t// Handle null mcpConfigPath (e.g., for Claude/Codex profiles)\n\tif (!mcpConfigPath) {\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tremoved: false,\n\t\t\tdeleted: false,\n\t\t\terror: null,\n\t\t\thasOtherServers: false\n\t\t};\n\t}\n\n\tconst mcpPath = path.join(projectRoot, mcpConfigPath);\n\n\tlet result = {\n\t\tsuccess: false,\n\t\tremoved: false,\n\t\tdeleted: false,\n\t\terror: null,\n\t\thasOtherServers: false\n\t};\n\n\tif (!fs.existsSync(mcpPath)) {\n\t\tresult.success = true;\n\t\tresult.removed = false;\n\t\tlog('debug', `[MCP Config] MCP config file does not exist: ${mcpPath}`);\n\t\treturn result;\n\t}\n\n\ttry {\n\t\t// Read existing config\n\t\tconst mcpConfig = JSON.parse(fs.readFileSync(mcpPath, 'utf8'));\n\n\t\tif (!mcpConfig.mcpServers) {\n\t\t\tresult.success = true;\n\t\t\tresult.removed = false;\n\t\t\tlog('debug', `[MCP Config] No mcpServers section found in: ${mcpPath}`);\n\t\t\treturn result;\n\t\t}\n\n\t\t// Check if Task Master is configured\n\t\tconst hasTaskMaster =\n\t\t\tmcpConfig.mcpServers['task-master-ai'] ||\n\t\t\tObject.values(mcpConfig.mcpServers).some(\n\t\t\t\t(server) =>\n\t\t\t\t\tserver.args &&\n\t\t\t\t\tArray.isArray(server.args) &&\n\t\t\t\t\tserver.args.some(\n\t\t\t\t\t\t(arg) => typeof arg === 'string' && arg.includes('task-master-ai')\n\t\t\t\t\t)\n\t\t\t);\n\n\t\tif (!hasTaskMaster) {\n\t\t\tresult.success = true;\n\t\t\tresult.removed = false;\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[MCP Config] Task Master not found in MCP config: ${mcpPath}`\n\t\t\t);\n\t\t\treturn result;\n\t\t}\n\n\t\t// Remove task-master-ai server\n\t\tdelete mcpConfig.mcpServers['task-master-ai'];\n\n\t\t// Also remove any servers that have task-master-ai in their args\n\t\tObject.keys(mcpConfig.mcpServers).forEach((serverName) => {\n\t\t\tconst server = mcpConfig.mcpServers[serverName];\n\t\t\tif (\n\t\t\t\tserver.args &&\n\t\t\t\tArray.isArray(server.args) &&\n\t\t\t\tserver.args.some(\n\t\t\t\t\t(arg) => typeof arg === 'string' && arg.includes('task-master-ai')\n\t\t\t\t)\n\t\t\t) {\n\t\t\t\tdelete mcpConfig.mcpServers[serverName];\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[MCP Config] Removed server '${serverName}' containing task-master-ai`\n\t\t\t\t);\n\t\t\t}\n\t\t});\n\n\t\t// Check if there are other MCP servers remaining\n\t\tconst remainingServers = Object.keys(mcpConfig.mcpServers);\n\t\tresult.hasOtherServers = remainingServers.length > 0;\n\n\t\tif (result.hasOtherServers) {\n\t\t\t// Write back the modified config with remaining servers\n\t\t\tfs.writeFileSync(mcpPath, formatJSONWithTabs(mcpConfig) + '\\n');\n\t\t\tresult.success = true;\n\t\t\tresult.removed = true;\n\t\t\tresult.deleted = false;\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`[MCP Config] Removed Task Master from MCP config, preserving other servers: ${remainingServers.join(', ')}`\n\t\t\t);\n\t\t} else {\n\t\t\t// No other servers, delete the entire file\n\t\t\tfs.rmSync(mcpPath, { force: true });\n\t\t\tresult.success = true;\n\t\t\tresult.removed = true;\n\t\t\tresult.deleted = true;\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t`[MCP Config] Removed MCP config file (no other servers remaining): ${mcpPath}`\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tresult.error = error.message;\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[MCP Config] Failed to remove Task Master from MCP config: ${error.message}`\n\t\t);\n\t}\n\n\treturn result;\n}\n"], ["/claude-task-master/mcp-server/src/tools/generate.js", "/**\n * tools/generate.js\n * Tool to generate individual task files from tasks.json\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { generateTaskFilesDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\nimport path from 'path';\n\n/**\n * Register the generate tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerGenerateTool(server) {\n\tserver.addTool({\n\t\tname: 'generate',\n\t\tdescription:\n\t\t\t'Generates individual task files in tasks/ directory based on tasks.json',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\toutput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Output directory (default: same directory as tasks file)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Generating task files with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst outputDir = args.output\n\t\t\t\t\t? path.resolve(args.projectRoot, args.output)\n\t\t\t\t\t: path.dirname(tasksJsonPath);\n\n\t\t\t\tconst result = await generateTaskFilesDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\toutputDir: outputDir,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully generated task files: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to generate task files: ${result.error?.message || 'Unknown error'}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error generating task files',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in generate tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/add-tag.js", "/**\n * tools/add-tag.js\n * Tool to create a new tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the addTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddTagTool(server) {\n\tserver.addTool({\n\t\tname: 'add_tag',\n\t\tdescription: 'Create a new tag for organizing tasks in different contexts',\n\t\tparameters: z.object({\n\t\t\tname: z.string().describe('Name of the new tag to create'),\n\t\t\tcopyFromCurrent: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Whether to copy tasks from the current tag (default: false)'\n\t\t\t\t),\n\t\t\tcopyFromTag: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Specific tag to copy tasks from'),\n\t\t\tfromBranch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Create tag name from current git branch (ignores name parameter)'\n\t\t\t\t),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Optional description for the tag'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting add-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await addTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tname: args.name,\n\t\t\t\t\t\tcopyFromCurrent: args.copyFromCurrent,\n\t\t\t\t\t\tcopyFromTag: args.copyFromTag,\n\t\t\t\t\t\tfromBranch: args.fromBranch,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error creating tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in add-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/task-master.js", "/**\n * task-master.js\n * This module provides a centralized path management system for the Task Master application.\n * It exports the TaskMaster class and the initTaskMaster factory function to create a single,\n * authoritative source for all critical file and directory paths, resolving circular dependencies.\n */\n\nimport path from 'path';\nimport fs from 'fs';\nimport {\n\tTASKMASTER_DIR,\n\tTASKMASTER_TASKS_FILE,\n\tLEGACY_TASKS_FILE,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_REPORTS_DIR,\n\tTASKMASTER_CONFIG_FILE,\n\tLEGACY_CONFIG_FILE,\n\tCOMPLEXITY_REPORT_FILE\n} from './constants/paths.js';\nimport { findProjectRoot } from './utils/path-utils.js';\n\n/**\n * TaskMaster class manages all the paths for the application.\n * An instance of this class is created by the initTaskMaster function.\n */\nexport class TaskMaster {\n\t#paths;\n\t#tag;\n\n\t/**\n\t * The constructor is intended to be used only by the initTaskMaster factory function.\n\t * @param {object} paths - A pre-resolved object of all application paths.\n\t * @param {string|undefined} tag - The current tag.\n\t */\n\tconstructor(paths, tag) {\n\t\tthis.#paths = Object.freeze({ ...paths });\n\t\tthis.#tag = tag;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the project root.\n\t */\n\tgetProjectRoot() {\n\t\treturn this.#paths.projectRoot;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the .taskmaster directory.\n\t */\n\tgetTaskMasterDir() {\n\t\treturn this.#paths.taskMasterDir;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the tasks.json file.\n\t */\n\tgetTasksPath() {\n\t\treturn this.#paths.tasksPath;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the PRD file.\n\t */\n\tgetPrdPath() {\n\t\treturn this.#paths.prdPath;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the complexity report.\n\t */\n\tgetComplexityReportPath() {\n\t\tif (this.#paths.complexityReportPath) {\n\t\t\treturn this.#paths.complexityReportPath;\n\t\t}\n\n\t\tconst complexityReportFile =\n\t\t\tthis.getCurrentTag() !== 'master'\n\t\t\t\t? COMPLEXITY_REPORT_FILE.replace(\n\t\t\t\t\t\t'.json',\n\t\t\t\t\t\t`_${this.getCurrentTag()}.json`\n\t\t\t\t\t)\n\t\t\t\t: COMPLEXITY_REPORT_FILE;\n\n\t\treturn path.join(this.#paths.projectRoot, complexityReportFile);\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the config.json file.\n\t */\n\tgetConfigPath() {\n\t\treturn this.#paths.configPath;\n\t}\n\n\t/**\n\t * @returns {string|null} The absolute path to the state.json file.\n\t */\n\tgetStatePath() {\n\t\treturn this.#paths.statePath;\n\t}\n\n\t/**\n\t * @returns {object} A frozen object containing all resolved paths.\n\t */\n\tgetAllPaths() {\n\t\treturn this.#paths;\n\t}\n\n\t/**\n\t * Gets the current tag from state.json or falls back to defaultTag from config\n\t * @returns {string} The current tag name\n\t */\n\tgetCurrentTag() {\n\t\tif (this.#tag) {\n\t\t\treturn this.#tag;\n\t\t}\n\n\t\ttry {\n\t\t\t// Try to read current tag from state.json using fs directly\n\t\t\tif (fs.existsSync(this.#paths.statePath)) {\n\t\t\t\tconst rawState = fs.readFileSync(this.#paths.statePath, 'utf8');\n\t\t\t\tconst stateData = JSON.parse(rawState);\n\t\t\t\tif (stateData && stateData.currentTag) {\n\t\t\t\t\treturn stateData.currentTag;\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Ignore errors, fall back to default\n\t\t}\n\n\t\t// Fall back to defaultTag from config using fs directly\n\t\ttry {\n\t\t\tif (fs.existsSync(this.#paths.configPath)) {\n\t\t\t\tconst rawConfig = fs.readFileSync(this.#paths.configPath, 'utf8');\n\t\t\t\tconst configData = JSON.parse(rawConfig);\n\t\t\t\tif (configData && configData.global && configData.global.defaultTag) {\n\t\t\t\t\treturn configData.global.defaultTag;\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (error) {\n\t\t\t// Ignore errors, use hardcoded default\n\t\t}\n\n\t\t// Final fallback\n\t\treturn 'master';\n\t}\n}\n\n/**\n * Initializes a TaskMaster instance with resolved paths.\n * This function centralizes path resolution logic.\n *\n * @param {object} [overrides={}] - An object with possible path overrides.\n * @param {string} [overrides.projectRoot]\n * @param {string} [overrides.tasksPath]\n * @param {string} [overrides.prdPath]\n * @param {string} [overrides.complexityReportPath]\n * @param {string} [overrides.configPath]\n * @param {string} [overrides.statePath]\n * @param {string} [overrides.tag]\n * @returns {TaskMaster} An initialized TaskMaster instance.\n */\nexport function initTaskMaster(overrides = {}) {\n\tconst resolvePath = (\n\t\tpathType,\n\t\toverride,\n\t\tdefaultPaths = [],\n\t\tbasePath = null,\n\t\tcreateParentDirs = false\n\t) => {\n\t\tif (typeof override === 'string') {\n\t\t\tconst resolvedPath = path.isAbsolute(override)\n\t\t\t\t? override\n\t\t\t\t: path.resolve(basePath || process.cwd(), override);\n\n\t\t\tif (createParentDirs) {\n\t\t\t\t// For output paths, create parent directory if it doesn't exist\n\t\t\t\tconst parentDir = path.dirname(resolvedPath);\n\t\t\t\tif (!fs.existsSync(parentDir)) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tfs.mkdirSync(parentDir, { recursive: true });\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t\t`Could not create directory for ${pathType}: ${parentDir}. Error: ${error.message}`\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Original validation logic\n\t\t\t\tif (!fs.existsSync(resolvedPath)) {\n\t\t\t\t\tthrow new Error(\n\t\t\t\t\t\t`${pathType} override path does not exist: ${resolvedPath}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resolvedPath;\n\t\t}\n\n\t\tif (override === true) {\n\t\t\t// Required path - search defaults and fail if not found\n\t\t\tfor (const defaultPath of defaultPaths) {\n\t\t\t\tconst fullPath = path.isAbsolute(defaultPath)\n\t\t\t\t\t? defaultPath\n\t\t\t\t\t: path.join(basePath || process.cwd(), defaultPath);\n\t\t\t\tif (fs.existsSync(fullPath)) {\n\t\t\t\t\treturn fullPath;\n\t\t\t\t}\n\t\t\t}\n\t\t\tthrow new Error(\n\t\t\t\t`Required ${pathType} not found. Searched: ${defaultPaths.join(', ')}`\n\t\t\t);\n\t\t}\n\n\t\t// Optional path (override === false/undefined) - search defaults, return null if not found\n\t\tfor (const defaultPath of defaultPaths) {\n\t\t\tconst fullPath = path.isAbsolute(defaultPath)\n\t\t\t\t? defaultPath\n\t\t\t\t: path.join(basePath || process.cwd(), defaultPath);\n\t\t\tif (fs.existsSync(fullPath)) {\n\t\t\t\treturn fullPath;\n\t\t\t}\n\t\t}\n\n\t\treturn null;\n\t};\n\n\tconst paths = {};\n\n\t// Project Root\n\tif (overrides.projectRoot) {\n\t\tconst resolvedOverride = path.resolve(overrides.projectRoot);\n\t\tif (!fs.existsSync(resolvedOverride)) {\n\t\t\tthrow new Error(\n\t\t\t\t`Project root override path does not exist: ${resolvedOverride}`\n\t\t\t);\n\t\t}\n\n\t\tconst hasTaskmasterDir = fs.existsSync(\n\t\t\tpath.join(resolvedOverride, TASKMASTER_DIR)\n\t\t);\n\t\tconst hasLegacyConfig = fs.existsSync(\n\t\t\tpath.join(resolvedOverride, LEGACY_CONFIG_FILE)\n\t\t);\n\n\t\tif (!hasTaskmasterDir && !hasLegacyConfig) {\n\t\t\tthrow new Error(\n\t\t\t\t`Project root override is not a valid taskmaster project: ${resolvedOverride}`\n\t\t\t);\n\t\t}\n\n\t\tpaths.projectRoot = resolvedOverride;\n\t} else {\n\t\t// findProjectRoot now always returns a value (fallback to cwd)\n\t\tpaths.projectRoot = findProjectRoot();\n\t}\n\n\t// TaskMaster Directory\n\tif ('taskMasterDir' in overrides) {\n\t\tpaths.taskMasterDir = resolvePath(\n\t\t\t'taskmaster directory',\n\t\t\toverrides.taskMasterDir,\n\t\t\t[TASKMASTER_DIR],\n\t\t\tpaths.projectRoot\n\t\t);\n\t} else {\n\t\tpaths.taskMasterDir = resolvePath(\n\t\t\t'taskmaster directory',\n\t\t\tfalse,\n\t\t\t[TASKMASTER_DIR],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\t// Always set default paths first\n\t// These can be overridden below if needed\n\tpaths.configPath = path.join(paths.projectRoot, TASKMASTER_CONFIG_FILE);\n\tpaths.statePath = path.join(\n\t\tpaths.taskMasterDir || path.join(paths.projectRoot, TASKMASTER_DIR),\n\t\t'state.json'\n\t);\n\tpaths.tasksPath = path.join(paths.projectRoot, TASKMASTER_TASKS_FILE);\n\n\t// Handle overrides - only validate/resolve if explicitly provided\n\tif ('configPath' in overrides) {\n\t\tpaths.configPath = resolvePath(\n\t\t\t'config file',\n\t\t\toverrides.configPath,\n\t\t\t[TASKMASTER_CONFIG_FILE, LEGACY_CONFIG_FILE],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\tif ('statePath' in overrides) {\n\t\tpaths.statePath = resolvePath(\n\t\t\t'state file',\n\t\t\toverrides.statePath,\n\t\t\t['state.json'],\n\t\t\tpaths.taskMasterDir\n\t\t);\n\t}\n\n\tif ('tasksPath' in overrides) {\n\t\tpaths.tasksPath = resolvePath(\n\t\t\t'tasks file',\n\t\t\toverrides.tasksPath,\n\t\t\t[TASKMASTER_TASKS_FILE, LEGACY_TASKS_FILE],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\tif ('prdPath' in overrides) {\n\t\tpaths.prdPath = resolvePath(\n\t\t\t'PRD file',\n\t\t\toverrides.prdPath,\n\t\t\t[\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'PRD.md'),\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'prd.md'),\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'PRD.txt'),\n\t\t\t\tpath.join(TASKMASTER_DOCS_DIR, 'prd.txt'),\n\t\t\t\tpath.join('scripts', 'PRD.md'),\n\t\t\t\tpath.join('scripts', 'prd.md'),\n\t\t\t\tpath.join('scripts', 'PRD.txt'),\n\t\t\t\tpath.join('scripts', 'prd.txt'),\n\t\t\t\t'PRD.md',\n\t\t\t\t'prd.md',\n\t\t\t\t'PRD.txt',\n\t\t\t\t'prd.txt'\n\t\t\t],\n\t\t\tpaths.projectRoot\n\t\t);\n\t}\n\n\tif ('complexityReportPath' in overrides) {\n\t\tpaths.complexityReportPath = resolvePath(\n\t\t\t'complexity report',\n\t\t\toverrides.complexityReportPath,\n\t\t\t[\n\t\t\t\tpath.join(TASKMASTER_REPORTS_DIR, 'task-complexity-report.json'),\n\t\t\t\tpath.join(TASKMASTER_REPORTS_DIR, 'complexity-report.json'),\n\t\t\t\tpath.join('scripts', 'task-complexity-report.json'),\n\t\t\t\tpath.join('scripts', 'complexity-report.json'),\n\t\t\t\t'task-complexity-report.json',\n\t\t\t\t'complexity-report.json'\n\t\t\t],\n\t\t\tpaths.projectRoot,\n\t\t\ttrue // Enable parent directory creation for output paths\n\t\t);\n\t}\n\n\treturn new TaskMaster(paths, overrides.tag);\n}\n"], ["/claude-task-master/src/utils/manage-gitignore.js", "// Utility to manage .gitignore files with task file preferences and template merging\nimport fs from 'fs';\nimport path from 'path';\n\n// Constants\nconst TASK_FILES_COMMENT = '# Task files';\nconst TASK_JSON_PATTERN = 'tasks.json';\nconst TASK_DIR_PATTERN = 'tasks/';\n\n/**\n * Normalizes a line by removing comments and trimming whitespace\n * @param {string} line - Line to normalize\n * @returns {string} Normalized line\n */\nfunction normalizeLine(line) {\n\treturn line.trim().replace(/^#/, '').trim();\n}\n\n/**\n * Checks if a line is task-related (tasks.json or tasks/)\n * @param {string} line - Line to check\n * @returns {boolean} True if line is task-related\n */\nfunction isTaskLine(line) {\n\tconst normalized = normalizeLine(line);\n\treturn normalized === TASK_JSON_PATTERN || normalized === TASK_DIR_PATTERN;\n}\n\n/**\n * Adjusts task-related lines in template based on storage preference\n * @param {string[]} templateLines - Array of template lines\n * @param {boolean} storeTasksInGit - Whether to comment out task lines\n * @returns {string[]} Adjusted template lines\n */\nfunction adjustTaskLinesInTemplate(templateLines, storeTasksInGit) {\n\treturn templateLines.map((line) => {\n\t\tif (isTaskLine(line)) {\n\t\t\tconst normalized = normalizeLine(line);\n\t\t\t// Preserve original trailing whitespace from the line\n\t\t\tconst originalTrailingSpace = line.match(/\\s*$/)[0];\n\t\t\treturn storeTasksInGit\n\t\t\t\t? `# ${normalized}${originalTrailingSpace}`\n\t\t\t\t: `${normalized}${originalTrailingSpace}`;\n\t\t}\n\t\treturn line;\n\t});\n}\n\n/**\n * Removes existing task files section from content\n * @param {string[]} existingLines - Existing file lines\n * @returns {string[]} Lines with task section removed\n */\nfunction removeExistingTaskSection(existingLines) {\n\tconst cleanedLines = [];\n\tlet inTaskSection = false;\n\n\tfor (const line of existingLines) {\n\t\t// Start of task files section\n\t\tif (line.trim() === TASK_FILES_COMMENT) {\n\t\t\tinTaskSection = true;\n\t\t\tcontinue;\n\t\t}\n\n\t\t// Task lines (commented or not)\n\t\tif (isTaskLine(line)) {\n\t\t\tcontinue;\n\t\t}\n\n\t\t// Empty lines within task section\n\t\tif (inTaskSection && !line.trim()) {\n\t\t\tcontinue;\n\t\t}\n\n\t\t// End of task section (any non-empty, non-task line)\n\t\tif (inTaskSection && line.trim() && !isTaskLine(line)) {\n\t\t\tinTaskSection = false;\n\t\t}\n\n\t\t// Keep all other lines\n\t\tif (!inTaskSection) {\n\t\t\tcleanedLines.push(line);\n\t\t}\n\t}\n\n\treturn cleanedLines;\n}\n\n/**\n * Filters template lines to only include new content not already present\n * @param {string[]} templateLines - Template lines\n * @param {Set<string>} existingLinesSet - Set of existing trimmed lines\n * @returns {string[]} New lines to add\n */\nfunction filterNewTemplateLines(templateLines, existingLinesSet) {\n\treturn templateLines.filter((line) => {\n\t\tconst trimmed = line.trim();\n\t\tif (!trimmed) return false;\n\n\t\t// Skip task-related lines (handled separately)\n\t\tif (isTaskLine(line) || trimmed === TASK_FILES_COMMENT) {\n\t\t\treturn false;\n\t\t}\n\n\t\t// Include only if not already present\n\t\treturn !existingLinesSet.has(trimmed);\n\t});\n}\n\n/**\n * Builds the task files section based on storage preference\n * @param {boolean} storeTasksInGit - Whether to comment out task lines\n * @returns {string[]} Task files section lines\n */\nfunction buildTaskFilesSection(storeTasksInGit) {\n\tconst section = [TASK_FILES_COMMENT];\n\n\tif (storeTasksInGit) {\n\t\tsection.push(`# ${TASK_JSON_PATTERN}`, `# ${TASK_DIR_PATTERN} `);\n\t} else {\n\t\tsection.push(TASK_JSON_PATTERN, `${TASK_DIR_PATTERN} `);\n\t}\n\n\treturn section;\n}\n\n/**\n * Adds a separator line if needed (avoids double spacing)\n * @param {string[]} lines - Current lines array\n */\nfunction addSeparatorIfNeeded(lines) {\n\tif (lines.some((line) => line.trim())) {\n\t\tconst lastLine = lines[lines.length - 1];\n\t\tif (lastLine && lastLine.trim()) {\n\t\t\tlines.push('');\n\t\t}\n\t}\n}\n\n/**\n * Validates input parameters\n * @param {string} targetPath - Path to .gitignore file\n * @param {string} content - Template content\n * @param {boolean} storeTasksInGit - Storage preference\n * @throws {Error} If validation fails\n */\nfunction validateInputs(targetPath, content, storeTasksInGit) {\n\tif (!targetPath || typeof targetPath !== 'string') {\n\t\tthrow new Error('targetPath must be a non-empty string');\n\t}\n\n\tif (!targetPath.endsWith('.gitignore')) {\n\t\tthrow new Error('targetPath must end with .gitignore');\n\t}\n\n\tif (!content || typeof content !== 'string') {\n\t\tthrow new Error('content must be a non-empty string');\n\t}\n\n\tif (typeof storeTasksInGit !== 'boolean') {\n\t\tthrow new Error('storeTasksInGit must be a boolean');\n\t}\n}\n\n/**\n * Creates a new .gitignore file from template\n * @param {string} targetPath - Path to create file at\n * @param {string[]} templateLines - Adjusted template lines\n * @param {function} log - Logging function\n */\nfunction createNewGitignoreFile(targetPath, templateLines, log) {\n\ttry {\n\t\tfs.writeFileSync(targetPath, templateLines.join('\\n') + '\\n');\n\t\tif (typeof log === 'function') {\n\t\t\tlog('success', `Created ${targetPath} with full template`);\n\t\t}\n\t} catch (error) {\n\t\tif (typeof log === 'function') {\n\t\t\tlog('error', `Failed to create ${targetPath}: ${error.message}`);\n\t\t}\n\t\tthrow error;\n\t}\n}\n\n/**\n * Merges template content with existing .gitignore file\n * @param {string} targetPath - Path to existing file\n * @param {string[]} templateLines - Adjusted template lines\n * @param {boolean} storeTasksInGit - Storage preference\n * @param {function} log - Logging function\n */\nfunction mergeWithExistingFile(\n\ttargetPath,\n\ttemplateLines,\n\tstoreTasksInGit,\n\tlog\n) {\n\ttry {\n\t\t// Read and process existing file\n\t\tconst existingContent = fs.readFileSync(targetPath, 'utf8');\n\t\tconst existingLines = existingContent.split('\\n');\n\n\t\t// Remove existing task section\n\t\tconst cleanedExistingLines = removeExistingTaskSection(existingLines);\n\n\t\t// Find new template lines to add\n\t\tconst existingLinesSet = new Set(\n\t\t\tcleanedExistingLines.map((line) => line.trim()).filter((line) => line)\n\t\t);\n\t\tconst newLines = filterNewTemplateLines(templateLines, existingLinesSet);\n\n\t\t// Build final content\n\t\tconst finalLines = [...cleanedExistingLines];\n\n\t\t// Add new template content\n\t\tif (newLines.length > 0) {\n\t\t\taddSeparatorIfNeeded(finalLines);\n\t\t\tfinalLines.push(...newLines);\n\t\t}\n\n\t\t// Add task files section\n\t\taddSeparatorIfNeeded(finalLines);\n\t\tfinalLines.push(...buildTaskFilesSection(storeTasksInGit));\n\n\t\t// Write result\n\t\tfs.writeFileSync(targetPath, finalLines.join('\\n') + '\\n');\n\n\t\tif (typeof log === 'function') {\n\t\t\tconst hasNewContent =\n\t\t\t\tnewLines.length > 0 ? ' and merged new content' : '';\n\t\t\tlog(\n\t\t\t\t'success',\n\t\t\t\t`Updated ${targetPath} according to user preference${hasNewContent}`\n\t\t\t);\n\t\t}\n\t} catch (error) {\n\t\tif (typeof log === 'function') {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`Failed to merge content with ${targetPath}: ${error.message}`\n\t\t\t);\n\t\t}\n\t\tthrow error;\n\t}\n}\n\n/**\n * Manages .gitignore file creation and updates with task file preferences\n * @param {string} targetPath - Path to the .gitignore file\n * @param {string} content - Template content for .gitignore\n * @param {boolean} storeTasksInGit - Whether to store tasks in git or not\n * @param {function} log - Logging function (level, message)\n * @throws {Error} If validation or file operations fail\n */\nfunction manageGitignoreFile(\n\ttargetPath,\n\tcontent,\n\tstoreTasksInGit = true,\n\tlog = null\n) {\n\t// Validate inputs\n\tvalidateInputs(targetPath, content, storeTasksInGit);\n\n\t// Process template with task preference\n\tconst templateLines = content.split('\\n');\n\tconst adjustedTemplateLines = adjustTaskLinesInTemplate(\n\t\ttemplateLines,\n\t\tstoreTasksInGit\n\t);\n\n\t// Handle file creation or merging\n\tif (!fs.existsSync(targetPath)) {\n\t\tcreateNewGitignoreFile(targetPath, adjustedTemplateLines, log);\n\t} else {\n\t\tmergeWithExistingFile(\n\t\t\ttargetPath,\n\t\t\tadjustedTemplateLines,\n\t\t\tstoreTasksInGit,\n\t\t\tlog\n\t\t);\n\t}\n}\n\nexport default manageGitignoreFile;\nexport {\n\tmanageGitignoreFile,\n\tnormalizeLine,\n\tisTaskLine,\n\tbuildTaskFilesSection,\n\tTASK_FILES_COMMENT,\n\tTASK_JSON_PATTERN,\n\tTASK_DIR_PATTERN\n};\n"], ["/claude-task-master/mcp-server/src/tools/use-tag.js", "/**\n * tools/use-tag.js\n * Tool to switch to a different tag context\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { useTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the useTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerUseTagTool(server) {\n\tserver.addTool({\n\t\tname: 'use_tag',\n\t\tdescription: 'Switch to a different tag context for task operations',\n\t\tparameters: z.object({\n\t\t\tname: z.string().describe('Name of the tag to switch to'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting use-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await useTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tname: args.name,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error switching tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in use-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/profiles/claude.js", "// Claude Code profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n// Helper function to recursively copy directory (adopted from Roo profile)\nfunction copyRecursiveSync(src, dest) {\n\tconst exists = fs.existsSync(src);\n\tconst stats = exists && fs.statSync(src);\n\tconst isDirectory = exists && stats.isDirectory();\n\tif (isDirectory) {\n\t\tif (!fs.existsSync(dest)) fs.mkdirSync(dest, { recursive: true });\n\t\tfs.readdirSync(src).forEach((childItemName) => {\n\t\t\tcopyRecursiveSync(\n\t\t\t\tpath.join(src, childItemName),\n\t\t\t\tpath.join(dest, childItemName)\n\t\t\t);\n\t\t});\n\t} else {\n\t\tfs.copyFileSync(src, dest);\n\t}\n}\n\n// Helper function to recursively remove directory\nfunction removeDirectoryRecursive(dirPath) {\n\tif (fs.existsSync(dirPath)) {\n\t\ttry {\n\t\t\tfs.rmSync(dirPath, { recursive: true, force: true });\n\t\t\treturn true;\n\t\t} catch (err) {\n\t\t\tlog('error', `Failed to remove directory ${dirPath}: ${err.message}`);\n\t\t\treturn false;\n\t\t}\n\t}\n\treturn true;\n}\n\n// Lifecycle functions for Claude Code profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// Copy .claude directory recursively\n\tconst claudeSourceDir = path.join(assetsDir, 'claude');\n\tconst claudeDestDir = path.join(targetDir, '.claude');\n\n\tif (!fs.existsSync(claudeSourceDir)) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Claude] Source directory does not exist: ${claudeSourceDir}`\n\t\t);\n\t\treturn;\n\t}\n\n\ttry {\n\t\tcopyRecursiveSync(claudeSourceDir, claudeDestDir);\n\t\tlog('debug', `[Claude] Copied .claude directory to ${claudeDestDir}`);\n\t} catch (err) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Claude] An error occurred during directory copy: ${err.message}`\n\t\t);\n\t}\n\n\t// Handle CLAUDE.md import for non-destructive integration\n\tconst sourceFile = path.join(assetsDir, 'AGENTS.md');\n\tconst userClaudeFile = path.join(targetDir, 'CLAUDE.md');\n\tconst taskMasterClaudeFile = path.join(targetDir, '.taskmaster', 'CLAUDE.md');\n\tconst importLine = '@./.taskmaster/CLAUDE.md';\n\tconst importSection = `\\n## Task Master AI Instructions\\n**Import Task Master's development workflow commands and guidelines, treat as if import is in the main CLAUDE.md file.**\\n${importLine}`;\n\n\tif (fs.existsSync(sourceFile)) {\n\t\ttry {\n\t\t\t// Ensure .taskmaster directory exists\n\t\t\tconst taskMasterDir = path.join(targetDir, '.taskmaster');\n\t\t\tif (!fs.existsSync(taskMasterDir)) {\n\t\t\t\tfs.mkdirSync(taskMasterDir, { recursive: true });\n\t\t\t}\n\n\t\t\t// Copy Task Master instructions to .taskmaster/CLAUDE.md\n\t\t\tfs.copyFileSync(sourceFile, taskMasterClaudeFile);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Claude] Created Task Master instructions at ${taskMasterClaudeFile}`\n\t\t\t);\n\n\t\t\t// Handle user's CLAUDE.md\n\t\t\tif (fs.existsSync(userClaudeFile)) {\n\t\t\t\t// Check if import already exists\n\t\t\t\tconst content = fs.readFileSync(userClaudeFile, 'utf8');\n\t\t\t\tif (!content.includes(importLine)) {\n\t\t\t\t\t// Append import section at the end\n\t\t\t\t\tconst updatedContent = content.trim() + '\\n' + importSection + '\\n';\n\t\t\t\t\tfs.writeFileSync(userClaudeFile, updatedContent);\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Claude] Added Task Master import to existing ${userClaudeFile}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Claude] Task Master import already present in ${userClaudeFile}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Create minimal CLAUDE.md with the import section\n\t\t\t\tconst minimalContent = `# Claude Code Instructions\\n${importSection}\\n`;\n\t\t\t\tfs.writeFileSync(userClaudeFile, minimalContent);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`[Claude] Created ${userClaudeFile} with Task Master import`\n\t\t\t\t);\n\t\t\t}\n\t\t} catch (err) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Claude] Failed to set up Claude instructions: ${err.message}`\n\t\t\t);\n\t\t}\n\t}\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\t// Remove .claude directory recursively\n\tconst claudeDir = path.join(targetDir, '.claude');\n\tif (removeDirectoryRecursive(claudeDir)) {\n\t\tlog('debug', `[Claude] Removed .claude directory from ${claudeDir}`);\n\t}\n\n\t// Clean up CLAUDE.md import\n\tconst userClaudeFile = path.join(targetDir, 'CLAUDE.md');\n\tconst taskMasterClaudeFile = path.join(targetDir, '.taskmaster', 'CLAUDE.md');\n\tconst importLine = '@./.taskmaster/CLAUDE.md';\n\n\ttry {\n\t\t// Remove Task Master CLAUDE.md from .taskmaster\n\t\tif (fs.existsSync(taskMasterClaudeFile)) {\n\t\t\tfs.rmSync(taskMasterClaudeFile, { force: true });\n\t\t\tlog('debug', `[Claude] Removed ${taskMasterClaudeFile}`);\n\t\t}\n\n\t\t// Clean up import from user's CLAUDE.md\n\t\tif (fs.existsSync(userClaudeFile)) {\n\t\t\tconst content = fs.readFileSync(userClaudeFile, 'utf8');\n\t\t\tconst lines = content.split('\\n');\n\t\t\tconst filteredLines = [];\n\t\t\tlet skipNextLines = 0;\n\n\t\t\t// Remove the Task Master section\n\t\t\tfor (let i = 0; i < lines.length; i++) {\n\t\t\t\tif (skipNextLines > 0) {\n\t\t\t\t\tskipNextLines--;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Check if this is the start of our Task Master section\n\t\t\t\tif (lines[i].includes('## Task Master AI Instructions')) {\n\t\t\t\t\t// Skip this line and the next two lines (bold text and import)\n\t\t\t\t\tskipNextLines = 2;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Also remove standalone import lines (for backward compatibility)\n\t\t\t\tif (lines[i].trim() === importLine) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tfilteredLines.push(lines[i]);\n\t\t\t}\n\n\t\t\t// Join back and clean up excessive newlines\n\t\t\tlet updatedContent = filteredLines\n\t\t\t\t.join('\\n')\n\t\t\t\t.replace(/\\n{3,}/g, '\\n\\n')\n\t\t\t\t.trim();\n\n\t\t\t// Check if file only contained our minimal template\n\t\t\tif (\n\t\t\t\tupdatedContent === '# Claude Code Instructions' ||\n\t\t\t\tupdatedContent === ''\n\t\t\t) {\n\t\t\t\t// File only contained our import, remove it\n\t\t\t\tfs.rmSync(userClaudeFile, { force: true });\n\t\t\t\tlog('debug', `[Claude] Removed empty ${userClaudeFile}`);\n\t\t\t} else {\n\t\t\t\t// Write back without the import\n\t\t\t\tfs.writeFileSync(userClaudeFile, updatedContent + '\\n');\n\t\t\t\tlog(\n\t\t\t\t\t'debug',\n\t\t\t\t\t`[Claude] Removed Task Master import from ${userClaudeFile}`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t} catch (err) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[Claude] Failed to remove Claude instructions: ${err.message}`\n\t\t);\n\t}\n}\n\n/**\n * Transform standard MCP config format to Claude format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed Claude configuration object\n */\nfunction transformToClaudeFormat(mcpConfig) {\n\tconst claudeConfig = {};\n\n\t// Transform mcpServers to servers (keeping the same structure but adding type)\n\tif (mcpConfig.mcpServers) {\n\t\tclaudeConfig.mcpServers = {};\n\n\t\tfor (const [serverName, serverConfig] of Object.entries(\n\t\t\tmcpConfig.mcpServers\n\t\t)) {\n\t\t\t// Transform server configuration with type as first key\n\t\t\tconst reorderedServer = {};\n\n\t\t\t// Add type: \"stdio\" as the first key\n\t\t\treorderedServer.type = 'stdio';\n\n\t\t\t// Then add the rest of the properties in order\n\t\t\tif (serverConfig.command) reorderedServer.command = serverConfig.command;\n\t\t\tif (serverConfig.args) reorderedServer.args = serverConfig.args;\n\t\t\tif (serverConfig.env) reorderedServer.env = serverConfig.env;\n\n\t\t\t// Add any other properties that might exist\n\t\t\tObject.keys(serverConfig).forEach((key) => {\n\t\t\t\tif (!['command', 'args', 'env', 'type'].includes(key)) {\n\t\t\t\t\treorderedServer[key] = serverConfig[key];\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tclaudeConfig.mcpServers[serverName] = reorderedServer;\n\t\t}\n\t}\n\n\treturn claudeConfig;\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\t// For Claude, post-convert is the same as add since we don't transform rules\n\tonAddRulesProfile(targetDir, assetsDir);\n\n\t// Transform MCP configuration to Claude format\n\tconst mcpConfigPath = path.join(targetDir, '.mcp.json');\n\tif (fs.existsSync(mcpConfigPath)) {\n\t\ttry {\n\t\t\tconst mcpConfig = JSON.parse(fs.readFileSync(mcpConfigPath, 'utf8'));\n\t\t\tconst claudeConfig = transformToClaudeFormat(mcpConfig);\n\n\t\t\t// Write back the transformed configuration\n\t\t\tfs.writeFileSync(\n\t\t\t\tmcpConfigPath,\n\t\t\t\tJSON.stringify(claudeConfig, null, '\\t') + '\\n'\n\t\t\t);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Claude] Transformed MCP configuration to Claude format at ${mcpConfigPath}`\n\t\t\t);\n\t\t} catch (err) {\n\t\t\tlog(\n\t\t\t\t'error',\n\t\t\t\t`[Claude] Failed to transform MCP configuration: ${err.message}`\n\t\t\t);\n\t\t}\n\t}\n}\n\n// Create and export claude profile using the base factory\nexport const claudeProfile = createProfile({\n\tname: 'claude',\n\tdisplayName: 'Claude Code',\n\turl: 'claude.ai',\n\tdocsUrl: 'docs.anthropic.com/en/docs/claude-code',\n\tprofileDir: '.', // Root directory\n\trulesDir: '.', // No specific rules directory needed\n\tmcpConfigName: '.mcp.json', // Place MCP config in project root\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': '.taskmaster/CLAUDE.md'\n\t},\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/mcp-server/src/tools/copy-tag.js", "/**\n * tools/copy-tag.js\n * Tool to copy an existing tag to a new tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { copyTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the copyTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerCopyTagTool(server) {\n\tserver.addTool({\n\t\tname: 'copy_tag',\n\t\tdescription:\n\t\t\t'Copy an existing tag to create a new tag with all tasks and metadata',\n\t\tparameters: z.object({\n\t\t\tsourceName: z.string().describe('Name of the source tag to copy from'),\n\t\t\ttargetName: z.string().describe('Name of the new tag to create'),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Optional description for the new tag'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting copy-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await copyTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tsourceName: args.sourceName,\n\t\t\t\t\t\ttargetName: args.targetName,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error copying tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in copy-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/research.js", "/**\n * tools/research.js\n * Tool to perform AI-powered research queries with project context\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { researchDirect } from '../core/task-master-core.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the research tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerResearchTool(server) {\n\tserver.addTool({\n\t\tname: 'research',\n\t\tdescription: 'Perform AI-powered research queries with project context',\n\n\t\tparameters: z.object({\n\t\t\tquery: z.string().describe('Research query/prompt (required)'),\n\t\t\ttaskIds: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Comma-separated list of task/subtask IDs for context (e.g., \"15,16.2,17\")'\n\t\t\t\t),\n\t\t\tfilePaths: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Comma-separated list of file paths for context (e.g., \"src/api.js,docs/readme.md\")'\n\t\t\t\t),\n\t\t\tcustomContext: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Additional custom context text to include in the research'),\n\t\t\tincludeProjectTree: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Include project file tree structure in context (default: false)'\n\t\t\t\t),\n\t\t\tdetailLevel: z\n\t\t\t\t.enum(['low', 'medium', 'high'])\n\t\t\t\t.optional()\n\t\t\t\t.describe('Detail level for the research response (default: medium)'),\n\t\t\tsaveTo: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Automatically save research results to specified task/subtask ID (e.g., \"15\" or \"15.2\")'\n\t\t\t\t),\n\t\t\tsaveToFile: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Save research results to .taskmaster/docs/research/ directory (default: false)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(\n\t\t\t\t\t`Starting research with query: \"${args.query.substring(0, 100)}${args.query.length > 100 ? '...' : ''}\"`\n\t\t\t\t);\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await researchDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\tquery: args.query,\n\t\t\t\t\t\ttaskIds: args.taskIds,\n\t\t\t\t\t\tfilePaths: args.filePaths,\n\t\t\t\t\t\tcustomContext: args.customContext,\n\t\t\t\t\t\tincludeProjectTree: args.includeProjectTree || false,\n\t\t\t\t\t\tdetailLevel: args.detailLevel || 'medium',\n\t\t\t\t\t\tsaveTo: args.saveTo,\n\t\t\t\t\t\tsaveToFile: args.saveToFile || false,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error performing research',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in research tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/delete-tag.js", "/**\n * tools/delete-tag.js\n * Tool to delete an existing tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { deleteTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the deleteTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerDeleteTagTool(server) {\n\tserver.addTool({\n\t\tname: 'delete_tag',\n\t\tdescription: 'Delete an existing tag and all its tasks',\n\t\tparameters: z.object({\n\t\t\tname: z.string().describe('Name of the tag to delete'),\n\t\t\tyes: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Skip confirmation prompts (default: true for MCP)'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting delete-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function (always skip confirmation for MCP)\n\t\t\t\tconst result = await deleteTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tname: args.name,\n\t\t\t\t\t\tyes: args.yes !== undefined ? args.yes : true, // Default to true for MCP\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error deleting tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in delete-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/rename-tag.js", "/**\n * tools/rename-tag.js\n * Tool to rename an existing tag\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { renameTagDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\n\n/**\n * Register the renameTag tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRenameTagTool(server) {\n\tserver.addTool({\n\t\tname: 'rename_tag',\n\t\tdescription: 'Rename an existing tag',\n\t\tparameters: z.object({\n\t\t\toldName: z.string().describe('Current name of the tag to rename'),\n\t\t\tnewName: z.string().describe('New name for the tag'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting rename-tag with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function\n\t\t\t\tconst result = await renameTagDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\toldName: args.oldName,\n\t\t\t\t\t\tnewName: args.newName,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error renaming tag',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in rename-tag tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/profiles/amp.js", "// Amp profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to Amp format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed Amp configuration object\n */\nfunction transformToAmpFormat(mcpConfig) {\n\tconst ampConfig = {};\n\n\t// Transform mcpServers to amp.mcpServers\n\tif (mcpConfig.mcpServers) {\n\t\tampConfig['amp.mcpServers'] = mcpConfig.mcpServers;\n\t}\n\n\t// Preserve any other existing settings\n\tfor (const [key, value] of Object.entries(mcpConfig)) {\n\t\tif (key !== 'mcpServers') {\n\t\t\tampConfig[key] = value;\n\t\t}\n\t}\n\n\treturn ampConfig;\n}\n\n// Lifecycle functions for Amp profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// Handle AGENT.md import for non-destructive integration (Amp uses AGENT.md, copies from AGENTS.md)\n\tconst sourceFile = path.join(assetsDir, 'AGENTS.md');\n\tconst userAgentFile = path.join(targetDir, 'AGENT.md');\n\tconst taskMasterAgentFile = path.join(targetDir, '.taskmaster', 'AGENT.md');\n\tconst importLine = '@./.taskmaster/AGENT.md';\n\tconst importSection = `\\n## Task Master AI Instructions\\n**Import Task Master's development workflow commands and guidelines, treat as if import is in the main AGENT.md file.**\\n${importLine}`;\n\n\tif (fs.existsSync(sourceFile)) {\n\t\ttry {\n\t\t\t// Ensure .taskmaster directory exists\n\t\t\tconst taskMasterDir = path.join(targetDir, '.taskmaster');\n\t\t\tif (!fs.existsSync(taskMasterDir)) {\n\t\t\t\tfs.mkdirSync(taskMasterDir, { recursive: true });\n\t\t\t}\n\n\t\t\t// Copy Task Master instructions to .taskmaster/AGENT.md\n\t\t\tfs.copyFileSync(sourceFile, taskMasterAgentFile);\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`[Amp] Created Task Master instructions at ${taskMasterAgentFile}`\n\t\t\t);\n\n\t\t\t// Handle user's AGENT.md\n\t\t\tif (fs.existsSync(userAgentFile)) {\n\t\t\t\t// Check if import already exists\n\t\t\t\tconst content = fs.readFileSync(userAgentFile, 'utf8');\n\t\t\t\tif (!content.includes(importLine)) {\n\t\t\t\t\t// Append import section at the end\n\t\t\t\t\tconst updatedContent = content.trim() + '\\n' + importSection + '\\n';\n\t\t\t\t\tfs.writeFileSync(userAgentFile, updatedContent);\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Amp] Added Task Master import to existing ${userAgentFile}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog(\n\t\t\t\t\t\t'info',\n\t\t\t\t\t\t`[Amp] Task Master import already present in ${userAgentFile}`\n\t\t\t\t\t);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Create minimal AGENT.md with the import section\n\t\t\t\tconst minimalContent = `# Amp Instructions\\n${importSection}\\n`;\n\t\t\t\tfs.writeFileSync(userAgentFile, minimalContent);\n\t\t\t\tlog('info', `[Amp] Created ${userAgentFile} with Task Master import`);\n\t\t\t}\n\t\t} catch (err) {\n\t\t\tlog('error', `[Amp] Failed to set up Amp instructions: ${err.message}`);\n\t\t}\n\t}\n\n\t// MCP transformation will be handled in onPostConvertRulesProfile\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\t// Clean up AGENT.md import (Amp uses AGENT.md, not AGENTS.md)\n\tconst userAgentFile = path.join(targetDir, 'AGENT.md');\n\tconst taskMasterAgentFile = path.join(targetDir, '.taskmaster', 'AGENT.md');\n\tconst importLine = '@./.taskmaster/AGENT.md';\n\n\ttry {\n\t\t// Remove Task Master AGENT.md from .taskmaster\n\t\tif (fs.existsSync(taskMasterAgentFile)) {\n\t\t\tfs.rmSync(taskMasterAgentFile, { force: true });\n\t\t\tlog('debug', `[Amp] Removed ${taskMasterAgentFile}`);\n\t\t}\n\n\t\t// Clean up import from user's AGENT.md\n\t\tif (fs.existsSync(userAgentFile)) {\n\t\t\tconst content = fs.readFileSync(userAgentFile, 'utf8');\n\t\t\tconst lines = content.split('\\n');\n\t\t\tconst filteredLines = [];\n\t\t\tlet skipNextLines = 0;\n\n\t\t\t// Remove the Task Master section\n\t\t\tfor (let i = 0; i < lines.length; i++) {\n\t\t\t\tif (skipNextLines > 0) {\n\t\t\t\t\tskipNextLines--;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Check if this is the start of our Task Master section\n\t\t\t\tif (lines[i].includes('## Task Master AI Instructions')) {\n\t\t\t\t\t// Skip this line and the next two lines (bold text and import)\n\t\t\t\t\tskipNextLines = 2;\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// Also remove standalone import lines (for backward compatibility)\n\t\t\t\tif (lines[i].trim() === importLine) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tfilteredLines.push(lines[i]);\n\t\t\t}\n\n\t\t\t// Join back and clean up excessive newlines\n\t\t\tlet updatedContent = filteredLines\n\t\t\t\t.join('\\n')\n\t\t\t\t.replace(/\\n{3,}/g, '\\n\\n')\n\t\t\t\t.trim();\n\n\t\t\t// Check if file only contained our minimal template\n\t\t\tif (updatedContent === '# Amp Instructions' || updatedContent === '') {\n\t\t\t\t// File only contained our import, remove it\n\t\t\t\tfs.rmSync(userAgentFile, { force: true });\n\t\t\t\tlog('debug', `[Amp] Removed empty ${userAgentFile}`);\n\t\t\t} else {\n\t\t\t\t// Write back without the import\n\t\t\t\tfs.writeFileSync(userAgentFile, updatedContent + '\\n');\n\t\t\t\tlog('debug', `[Amp] Removed Task Master import from ${userAgentFile}`);\n\t\t\t}\n\t\t}\n\t} catch (err) {\n\t\tlog('error', `[Amp] Failed to remove Amp instructions: ${err.message}`);\n\t}\n\n\t// MCP Removal: Remove amp.mcpServers section\n\tconst mcpConfigPath = path.join(targetDir, '.vscode', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Amp] No .vscode/settings.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the amp.mcpServers section and task-master-ai server\n\t\tif (\n\t\t\tconfig['amp.mcpServers'] &&\n\t\t\tconfig['amp.mcpServers']['task-master-ai']\n\t\t) {\n\t\t\t// Remove task-master-ai server\n\t\t\tdelete config['amp.mcpServers']['task-master-ai'];\n\n\t\t\t// Check if there are other MCP servers in amp.mcpServers\n\t\t\tconst remainingServers = Object.keys(config['amp.mcpServers']);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire amp.mcpServers section\n\t\t\t\tdelete config['amp.mcpServers'];\n\t\t\t\tlog('debug', '[Amp] Removed empty amp.mcpServers section');\n\t\t\t}\n\n\t\t\t// Check if config is now empty\n\t\t\tconst remainingKeys = Object.keys(config);\n\n\t\t\tif (remainingKeys.length === 0) {\n\t\t\t\t// Config is empty, remove entire file\n\t\t\t\tfs.rmSync(mcpConfigPath, { force: true });\n\t\t\t\tlog('info', '[Amp] Removed empty settings.json file');\n\n\t\t\t\t// Check if .vscode directory is empty\n\t\t\t\tconst vscodeDirPath = path.join(targetDir, '.vscode');\n\t\t\t\tif (fs.existsSync(vscodeDirPath)) {\n\t\t\t\t\tconst remainingContents = fs.readdirSync(vscodeDirPath);\n\t\t\t\t\tif (remainingContents.length === 0) {\n\t\t\t\t\t\tfs.rmSync(vscodeDirPath, { recursive: true, force: true });\n\t\t\t\t\t\tlog('debug', '[Amp] Removed empty .vscode directory');\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\tmcpConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, '\\t') + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[Amp] Removed TaskMaster from settings.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[Amp] TaskMaster not found in amp.mcpServers');\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `[Amp] Failed to clean up settings.json: ${error.message}`);\n\t}\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\t// Handle AGENT.md setup (same as onAddRulesProfile)\n\tonAddRulesProfile(targetDir, assetsDir);\n\n\t// Transform MCP config to Amp format\n\tconst mcpConfigPath = path.join(targetDir, '.vscode', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Amp] No .vscode/settings.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in Amp format (has amp.mcpServers)\n\t\tif (mcpConfig['amp.mcpServers']) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[Amp] settings.json already in Amp format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to Amp format\n\t\tconst ampConfig = transformToAmpFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\tmcpConfigPath,\n\t\t\tJSON.stringify(ampConfig, null, '\\t') + '\\n'\n\t\t);\n\n\t\tlog('info', '[Amp] Transformed settings.json to Amp format');\n\t\tlog('debug', '[Amp] Renamed mcpServers to amp.mcpServers');\n\t} catch (error) {\n\t\tlog('error', `[Amp] Failed to transform settings.json: ${error.message}`);\n\t}\n}\n\n// Create and export amp profile using the base factory\nexport const ampProfile = createProfile({\n\tname: 'amp',\n\tdisplayName: 'Amp',\n\turl: 'ampcode.com',\n\tdocsUrl: 'ampcode.com/manual',\n\tprofileDir: '.vscode',\n\trulesDir: '.',\n\tmcpConfig: true,\n\tmcpConfigName: 'settings.json',\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': '.taskmaster/AGENT.md'\n\t},\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/src/profiles/zed.js", "// Zed profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to Zed format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed Zed configuration object\n */\nfunction transformToZedFormat(mcpConfig) {\n\tconst zedConfig = {};\n\n\t// Transform mcpServers to context_servers\n\tif (mcpConfig.mcpServers) {\n\t\tzedConfig['context_servers'] = mcpConfig.mcpServers;\n\t}\n\n\t// Preserve any other existing settings\n\tfor (const [key, value] of Object.entries(mcpConfig)) {\n\t\tif (key !== 'mcpServers') {\n\t\t\tzedConfig[key] = value;\n\t\t}\n\t}\n\n\treturn zedConfig;\n}\n\n// Lifecycle functions for Zed profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// MCP transformation will be handled in onPostConvertRulesProfile\n\t// File copying is handled by the base profile via fileMap\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\t// Clean up .rules (Zed uses .rules directly in root)\n\tconst userRulesFile = path.join(targetDir, '.rules');\n\n\ttry {\n\t\t// Remove Task Master .rules\n\t\tif (fs.existsSync(userRulesFile)) {\n\t\t\tfs.rmSync(userRulesFile, { force: true });\n\t\t\tlog('debug', `[Zed] Removed ${userRulesFile}`);\n\t\t}\n\t} catch (err) {\n\t\tlog('error', `[Zed] Failed to remove Zed instructions: ${err.message}`);\n\t}\n\n\t// MCP Removal: Remove context_servers section\n\tconst mcpConfigPath = path.join(targetDir, '.zed', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Zed] No .zed/settings.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the context_servers section and task-master-ai server\n\t\tif (\n\t\t\tconfig['context_servers'] &&\n\t\t\tconfig['context_servers']['task-master-ai']\n\t\t) {\n\t\t\t// Remove task-master-ai server\n\t\t\tdelete config['context_servers']['task-master-ai'];\n\n\t\t\t// Check if there are other MCP servers in context_servers\n\t\t\tconst remainingServers = Object.keys(config['context_servers']);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire context_servers section\n\t\t\t\tdelete config['context_servers'];\n\t\t\t\tlog('debug', '[Zed] Removed empty context_servers section');\n\t\t\t}\n\n\t\t\t// Check if config is now empty\n\t\t\tconst remainingKeys = Object.keys(config);\n\n\t\t\tif (remainingKeys.length === 0) {\n\t\t\t\t// Config is empty, remove entire file\n\t\t\t\tfs.rmSync(mcpConfigPath, { force: true });\n\t\t\t\tlog('info', '[Zed] Removed empty settings.json file');\n\n\t\t\t\t// Check if .zed directory is empty\n\t\t\t\tconst zedDirPath = path.join(targetDir, '.zed');\n\t\t\t\tif (fs.existsSync(zedDirPath)) {\n\t\t\t\t\tconst remainingContents = fs.readdirSync(zedDirPath);\n\t\t\t\t\tif (remainingContents.length === 0) {\n\t\t\t\t\t\tfs.rmSync(zedDirPath, { recursive: true, force: true });\n\t\t\t\t\t\tlog('debug', '[Zed] Removed empty .zed directory');\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\tmcpConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, '\\t') + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[Zed] Removed TaskMaster from settings.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[Zed] TaskMaster not found in context_servers');\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `[Zed] Failed to clean up settings.json: ${error.message}`);\n\t}\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\t// Handle .rules setup (same as onAddRulesProfile)\n\tonAddRulesProfile(targetDir, assetsDir);\n\n\t// Transform MCP config to Zed format\n\tconst mcpConfigPath = path.join(targetDir, '.zed', 'settings.json');\n\n\tif (!fs.existsSync(mcpConfigPath)) {\n\t\tlog('debug', '[Zed] No .zed/settings.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(mcpConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in Zed format (has context_servers)\n\t\tif (mcpConfig['context_servers']) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[Zed] settings.json already in Zed format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to Zed format\n\t\tconst zedConfig = transformToZedFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\tmcpConfigPath,\n\t\t\tJSON.stringify(zedConfig, null, '\\t') + '\\n'\n\t\t);\n\n\t\tlog('info', '[Zed] Transformed settings.json to Zed format');\n\t\tlog('debug', '[Zed] Renamed mcpServers to context_servers');\n\t} catch (error) {\n\t\tlog('error', `[Zed] Failed to transform settings.json: ${error.message}`);\n\t}\n}\n\n// Create and export zed profile using the base factory\nexport const zedProfile = createProfile({\n\tname: 'zed',\n\tdisplayName: 'Zed',\n\turl: 'zed.dev',\n\tdocsUrl: 'zed.dev/docs',\n\tprofileDir: '.zed',\n\trulesDir: '.',\n\tmcpConfig: true,\n\tmcpConfigName: 'settings.json',\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': '.rules'\n\t},\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/mcp-server/src/tools/add-task.js", "/**\n * tools/add-task.js\n * Tool to add a new task using AI\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addTaskDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the addTask tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddTaskTool(server) {\n\tserver.addTool({\n\t\tname: 'add_task',\n\t\tdescription: 'Add a new task using AI',\n\t\tparameters: z.object({\n\t\t\tprompt: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Description of the task to add (required if not using manual fields)'\n\t\t\t\t),\n\t\t\ttitle: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Task title (for manual task creation)'),\n\t\t\tdescription: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Task description (for manual task creation)'),\n\t\t\tdetails: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Implementation details (for manual task creation)'),\n\t\t\ttestStrategy: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Test strategy (for manual task creation)'),\n\t\t\tdependencies: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Comma-separated list of task IDs this task depends on'),\n\t\t\tpriority: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Task priority (high, medium, low)'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Path to the tasks file (default: tasks/tasks.json)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Whether to use research capabilities for task creation')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting add-task with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct functionP\n\t\t\t\tconst result = await addTaskDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tprompt: args.prompt,\n\t\t\t\t\t\ttitle: args.title,\n\t\t\t\t\t\tdescription: args.description,\n\t\t\t\t\t\tdetails: args.details,\n\t\t\t\t\t\ttestStrategy: args.testStrategy,\n\t\t\t\t\t\tdependencies: args.dependencies,\n\t\t\t\t\t\tpriority: args.priority,\n\t\t\t\t\t\tresearch: args.research,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error adding task',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in add-task tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/validate-dependencies.js", "/**\n * tools/validate-dependencies.js\n * Tool for validating task dependencies\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { validateDependenciesDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the validateDependencies tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerValidateDependenciesTool(server) {\n\tserver.addTool({\n\t\tname: 'validate_dependencies',\n\t\tdescription:\n\t\t\t'Check tasks for dependency issues (like circular references or links to non-existent tasks) without making changes.',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(`Validating dependencies with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await validateDependenciesDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(\n\t\t\t\t\t\t`Successfully validated dependencies: ${result.data.message}`\n\t\t\t\t\t);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to validate dependencies: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error validating dependencies',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in validateDependencies tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/add-dependency.js", "/**\n * tools/add-dependency.js\n * Tool for adding a dependency to a task\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { addDependencyDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the addDependency tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerAddDependencyTool(server) {\n\tserver.addTool({\n\t\tname: 'add_dependency',\n\t\tdescription: 'Add a dependency relationship between two tasks',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('ID of task that will depend on another task'),\n\t\t\tdependsOn: z\n\t\t\t\t.string()\n\t\t\t\t.describe('ID of task that will become a dependency'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Adding dependency for task ${args.id} to depend on ${args.dependsOn}`\n\t\t\t\t);\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\t// Call the direct function with the resolved path\n\t\t\t\tconst result = await addDependencyDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\t// Pass the explicitly resolved path\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\t// Pass other relevant args\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tdependsOn: args.dependsOn,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t\t// Remove context object\n\t\t\t\t);\n\n\t\t\t\t// Log result\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully added dependency: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to add dependency: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\t// Use handleApiResult to format the response\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error adding dependency',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in addDependency tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/remove-dependency.js", "/**\n * tools/remove-dependency.js\n * Tool for removing a dependency from a task\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { removeDependencyDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the removeDependency tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRemoveDependencyTool(server) {\n\tserver.addTool({\n\t\tname: 'remove_dependency',\n\t\tdescription: 'Remove a dependency from a task',\n\t\tparameters: z.object({\n\t\t\tid: z.string().describe('Task ID to remove dependency from'),\n\t\t\tdependsOn: z.string().describe('Task ID to remove as a dependency'),\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Absolute path to the tasks file (default: tasks/tasks.json)'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tlog.info(\n\t\t\t\t\t`Removing dependency for task ${args.id} from ${args.dependsOn} with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await removeDependencyDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tid: args.id,\n\t\t\t\t\t\tdependsOn: args.dependsOn,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully removed dependency: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to remove dependency: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error removing dependency',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in removeDependency tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/task-manager/response-language.js", "import {\n\tgetConfig,\n\tisConfigFilePresent,\n\twriteConfig\n} from '../config-manager.js';\nimport { findConfigPath } from '../../../src/utils/path-utils.js';\nimport { log } from '../utils.js';\n\nfunction setResponseLanguage(lang, options = {}) {\n\tconst { mcpLog, projectRoot } = options;\n\n\tconst report = (level, ...args) => {\n\t\tif (mcpLog && typeof mcpLog[level] === 'function') {\n\t\t\tmcpLog[level](...args);\n\t\t}\n\t};\n\n\t// Use centralized config path finding instead of hardcoded path\n\tconst configPath = findConfigPath(null, { projectRoot });\n\tconst configExists = isConfigFilePresent(projectRoot);\n\n\tlog(\n\t\t'debug',\n\t\t`Checking for config file using findConfigPath, found: ${configPath}`\n\t);\n\tlog(\n\t\t'debug',\n\t\t`Checking config file using isConfigFilePresent(), exists: ${configExists}`\n\t);\n\n\tif (!configExists) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CONFIG_MISSING',\n\t\t\t\tmessage:\n\t\t\t\t\t'The configuration file is missing. Run \"task-master init\" to create it.'\n\t\t\t}\n\t\t};\n\t}\n\n\t// Validate response language\n\tif (typeof lang !== 'string' || lang.trim() === '') {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'INVALID_RESPONSE_LANGUAGE',\n\t\t\t\tmessage: `Invalid response language: ${lang}. Must be a non-empty string.`\n\t\t\t}\n\t\t};\n\t}\n\n\ttry {\n\t\tconst currentConfig = getConfig(projectRoot);\n\t\tcurrentConfig.global.responseLanguage = lang;\n\t\tconst writeResult = writeConfig(currentConfig, projectRoot);\n\n\t\tif (!writeResult) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'WRITE_ERROR',\n\t\t\t\t\tmessage: 'Error writing updated configuration to configuration file'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst successMessage = `Successfully set response language to: ${lang}`;\n\t\treport('info', successMessage);\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\tresponseLanguage: lang,\n\t\t\t\tmessage: successMessage\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\treport('error', `Error setting response language: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'SET_RESPONSE_LANGUAGE_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}\n\nexport default setResponseLanguage;\n"], ["/claude-task-master/mcp-server/src/tools/fix-dependencies.js", "/**\n * tools/fix-dependencies.js\n * Tool for automatically fixing invalid task dependencies\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { fixDependenciesDirect } from '../core/task-master-core.js';\nimport { findTasksPath } from '../core/utils/path-utils.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n/**\n * Register the fixDependencies tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerFixDependenciesTool(server) {\n\tserver.addTool({\n\t\tname: 'fix_dependencies',\n\t\tdescription: 'Fix invalid dependencies in tasks automatically',\n\t\tparameters: z.object({\n\t\t\tfile: z.string().optional().describe('Absolute path to the tasks file'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Fixing dependencies with args: ${JSON.stringify(args)}`);\n\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tlet tasksJsonPath;\n\t\t\t\ttry {\n\t\t\t\t\ttasksJsonPath = findTasksPath(\n\t\t\t\t\t\t{ projectRoot: args.projectRoot, file: args.file },\n\t\t\t\t\t\tlog\n\t\t\t\t\t);\n\t\t\t\t} catch (error) {\n\t\t\t\t\tlog.error(`Error finding tasks.json: ${error.message}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t`Failed to find tasks.json: ${error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await fixDependenciesDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\ttasksJsonPath: tasksJsonPath,\n\t\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info(`Successfully fixed dependencies: ${result.data.message}`);\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(`Failed to fix dependencies: ${result.error.message}`);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error fixing dependencies',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in fixDependencies tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/src/profiles/base-profile.js", "// Base profile factory for rule-transformer\nimport path from 'path';\n\n/**\n * Creates a standardized profile configuration for different editors\n * @param {Object} editorConfig - Editor-specific configuration\n * @param {string} editorConfig.name - Profile name (e.g., 'cursor', 'vscode')\n * @param {string} [editorConfig.displayName] - Display name for the editor (defaults to name)\n * @param {string} editorConfig.url - Editor website URL\n * @param {string} editorConfig.docsUrl - Editor documentation URL\n * @param {string} editorConfig.profileDir - Directory for profile configuration\n * @param {string} [editorConfig.rulesDir] - Directory for rules files (defaults to profileDir/rules)\n * @param {boolean} [editorConfig.mcpConfig=true] - Whether to create MCP configuration\n * @param {string} [editorConfig.mcpConfigName='mcp.json'] - Name of MCP config file\n * @param {string} [editorConfig.fileExtension='.mdc'] - Source file extension\n * @param {string} [editorConfig.targetExtension='.md'] - Target file extension\n * @param {Object} [editorConfig.toolMappings={}] - Tool name mappings\n * @param {Array} [editorConfig.customReplacements=[]] - Custom text replacements\n * @param {Object} [editorConfig.fileMap={}] - Custom file name mappings\n * @param {boolean} [editorConfig.supportsRulesSubdirectories=false] - Whether to use taskmaster/ subdirectory for taskmaster-specific rules (only Cursor uses this by default)\n * @param {boolean} [editorConfig.includeDefaultRules=true] - Whether to include default rule files\n * @param {Function} [editorConfig.onAdd] - Lifecycle hook for profile addition\n * @param {Function} [editorConfig.onRemove] - Lifecycle hook for profile removal\n * @param {Function} [editorConfig.onPostConvert] - Lifecycle hook for post-conversion\n * @returns {Object} - Complete profile configuration\n */\nexport function createProfile(editorConfig) {\n\tconst {\n\t\tname,\n\t\tdisplayName = name,\n\t\turl,\n\t\tdocsUrl,\n\t\tprofileDir = `.${name.toLowerCase()}`,\n\t\trulesDir = `${profileDir}/rules`,\n\t\tmcpConfig = true,\n\t\tmcpConfigName = mcpConfig ? 'mcp.json' : null,\n\t\tfileExtension = '.mdc',\n\t\ttargetExtension = '.md',\n\t\ttoolMappings = {},\n\t\tcustomReplacements = [],\n\t\tfileMap = {},\n\t\tsupportsRulesSubdirectories = false,\n\t\tincludeDefaultRules = true,\n\t\tonAdd,\n\t\tonRemove,\n\t\tonPostConvert\n\t} = editorConfig;\n\n\tconst mcpConfigPath = mcpConfigName\n\t\t? path.join(profileDir, mcpConfigName)\n\t\t: null;\n\n\t// Standard file mapping with custom overrides\n\t// Use taskmaster subdirectory only if profile supports it\n\tconst taskmasterPrefix = supportsRulesSubdirectories ? 'taskmaster/' : '';\n\tconst defaultFileMap = {\n\t\t'rules/cursor_rules.mdc': `${name.toLowerCase()}_rules${targetExtension}`,\n\t\t'rules/dev_workflow.mdc': `${taskmasterPrefix}dev_workflow${targetExtension}`,\n\t\t'rules/self_improve.mdc': `self_improve${targetExtension}`,\n\t\t'rules/taskmaster.mdc': `${taskmasterPrefix}taskmaster${targetExtension}`\n\t};\n\n\t// Build final fileMap - merge defaults with custom entries when includeDefaultRules is true\n\tconst finalFileMap = includeDefaultRules\n\t\t? { ...defaultFileMap, ...fileMap }\n\t\t: fileMap;\n\n\t// Base global replacements that work for all editors\n\tconst baseGlobalReplacements = [\n\t\t// Handle URLs in any context\n\t\t{ from: /cursor\\.so/gi, to: url },\n\t\t{ from: /cursor\\s*\\.\\s*so/gi, to: url },\n\t\t{ from: /https?:\\/\\/cursor\\.so/gi, to: `https://${url}` },\n\t\t{ from: /https?:\\/\\/www\\.cursor\\.so/gi, to: `https://www.${url}` },\n\n\t\t// Handle tool references\n\t\t{ from: /\\bedit_file\\b/gi, to: toolMappings.edit_file || 'edit_file' },\n\t\t{\n\t\t\tfrom: /\\bsearch tool\\b/gi,\n\t\t\tto: `${toolMappings.search || 'search'} tool`\n\t\t},\n\t\t{ from: /\\bSearch Tool\\b/g, to: `${toolMappings.search || 'Search'} Tool` },\n\n\t\t// Handle basic terms with proper case handling\n\t\t{\n\t\t\tfrom: /\\bcursor\\b/gi,\n\t\t\tto: (match) =>\n\t\t\t\tmatch.charAt(0) === 'C' ? displayName : name.toLowerCase()\n\t\t},\n\t\t{ from: /Cursor/g, to: displayName },\n\t\t{ from: /CURSOR/g, to: displayName.toUpperCase() },\n\n\t\t// Handle file extensions if different\n\t\t...(targetExtension !== fileExtension\n\t\t\t? [\n\t\t\t\t\t{\n\t\t\t\t\t\tfrom: new RegExp(`\\\\${fileExtension}(?!\\\\])\\\\b`, 'g'),\n\t\t\t\t\t\tto: targetExtension\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t: []),\n\n\t\t// Handle documentation URLs\n\t\t{ from: /docs\\.cursor\\.com/gi, to: docsUrl },\n\n\t\t// Custom editor-specific replacements\n\t\t...customReplacements\n\t];\n\n\t// Standard tool mappings\n\tconst defaultToolMappings = {\n\t\tsearch: 'search',\n\t\tread_file: 'read_file',\n\t\tedit_file: 'edit_file',\n\t\tcreate_file: 'create_file',\n\t\trun_command: 'run_command',\n\t\tterminal_command: 'terminal_command',\n\t\tuse_mcp: 'use_mcp',\n\t\tswitch_mode: 'switch_mode',\n\t\t...toolMappings\n\t};\n\n\t// Create conversion config\n\tconst conversionConfig = {\n\t\t// Profile name replacements\n\t\tprofileTerms: [\n\t\t\t{ from: /cursor\\.so/g, to: url },\n\t\t\t{ from: /\\[cursor\\.so\\]/g, to: `[${url}]` },\n\t\t\t{ from: /href=\"https:\\/\\/cursor\\.so/g, to: `href=\"https://${url}` },\n\t\t\t{ from: /\\(https:\\/\\/cursor\\.so/g, to: `(https://${url}` },\n\t\t\t{\n\t\t\t\tfrom: /\\bcursor\\b/gi,\n\t\t\t\tto: (match) => (match === 'Cursor' ? displayName : name.toLowerCase())\n\t\t\t},\n\t\t\t{ from: /Cursor/g, to: displayName }\n\t\t],\n\n\t\t// File extension replacements\n\t\tfileExtensions:\n\t\t\ttargetExtension !== fileExtension\n\t\t\t\t? [\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfrom: new RegExp(`\\\\${fileExtension}\\\\b`, 'g'),\n\t\t\t\t\t\t\tto: targetExtension\n\t\t\t\t\t\t}\n\t\t\t\t\t]\n\t\t\t\t: [],\n\n\t\t// Documentation URL replacements\n\t\tdocUrls: [\n\t\t\t{\n\t\t\t\tfrom: new RegExp(`https:\\\\/\\\\/docs\\\\.cursor\\\\.com\\\\/[^\\\\s)'\\\"]+`, 'g'),\n\t\t\t\tto: (match) => match.replace('docs.cursor.com', docsUrl)\n\t\t\t},\n\t\t\t{\n\t\t\t\tfrom: new RegExp(`https:\\\\/\\\\/${docsUrl}\\\\/`, 'g'),\n\t\t\t\tto: `https://${docsUrl}/`\n\t\t\t}\n\t\t],\n\n\t\t// Tool references - direct replacements\n\t\ttoolNames: defaultToolMappings,\n\n\t\t// Tool references in context - more specific replacements\n\t\ttoolContexts: Object.entries(defaultToolMappings).flatMap(\n\t\t\t([original, mapped]) => [\n\t\t\t\t{\n\t\t\t\t\tfrom: new RegExp(`\\\\b${original} tool\\\\b`, 'g'),\n\t\t\t\t\tto: `${mapped} tool`\n\t\t\t\t},\n\t\t\t\t{ from: new RegExp(`\\\\bthe ${original}\\\\b`, 'g'), to: `the ${mapped}` },\n\t\t\t\t{ from: new RegExp(`\\\\bThe ${original}\\\\b`, 'g'), to: `The ${mapped}` },\n\t\t\t\t{\n\t\t\t\t\tfrom: new RegExp(`\\\\bCursor ${original}\\\\b`, 'g'),\n\t\t\t\t\tto: `${displayName} ${mapped}`\n\t\t\t\t}\n\t\t\t]\n\t\t),\n\n\t\t// Tool group and category names\n\t\ttoolGroups: [\n\t\t\t{ from: /\\bSearch tools\\b/g, to: 'Read Group tools' },\n\t\t\t{ from: /\\bEdit tools\\b/g, to: 'Edit Group tools' },\n\t\t\t{ from: /\\bRun tools\\b/g, to: 'Command Group tools' },\n\t\t\t{ from: /\\bMCP servers\\b/g, to: 'MCP Group tools' },\n\t\t\t{ from: /\\bSearch Group\\b/g, to: 'Read Group' },\n\t\t\t{ from: /\\bEdit Group\\b/g, to: 'Edit Group' },\n\t\t\t{ from: /\\bRun Group\\b/g, to: 'Command Group' }\n\t\t],\n\n\t\t// File references in markdown links\n\t\tfileReferences: {\n\t\t\tpathPattern: /\\[(.+?)\\]\\(mdc:\\.cursor\\/rules\\/(.+?)\\.mdc\\)/g,\n\t\t\treplacement: (match, text, filePath) => {\n\t\t\t\tconst baseName = path.basename(filePath, '.mdc');\n\t\t\t\tconst newFileName =\n\t\t\t\t\tfinalFileMap[`rules/${baseName}.mdc`] ||\n\t\t\t\t\t`${baseName}${targetExtension}`;\n\t\t\t\t// Update the link text to match the new filename (strip directory path for display)\n\t\t\t\tconst newLinkText = path.basename(newFileName);\n\t\t\t\t// For Cursor, keep the mdc: protocol; for others, use standard relative paths\n\t\t\t\tif (name.toLowerCase() === 'cursor') {\n\t\t\t\t\treturn `[${newLinkText}](mdc:${rulesDir}/${newFileName})`;\n\t\t\t\t} else {\n\t\t\t\t\treturn `[${newLinkText}](${rulesDir}/${newFileName})`;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t};\n\n\tfunction getTargetRuleFilename(sourceFilename) {\n\t\tif (finalFileMap[sourceFilename]) {\n\t\t\treturn finalFileMap[sourceFilename];\n\t\t}\n\t\treturn targetExtension !== fileExtension\n\t\t\t? sourceFilename.replace(\n\t\t\t\t\tnew RegExp(`\\\\${fileExtension}$`),\n\t\t\t\t\ttargetExtension\n\t\t\t\t)\n\t\t\t: sourceFilename;\n\t}\n\n\treturn {\n\t\tprofileName: name, // Use name for programmatic access (tests expect this)\n\t\tdisplayName: displayName, // Keep displayName for UI purposes\n\t\tprofileDir,\n\t\trulesDir,\n\t\tmcpConfig,\n\t\tmcpConfigName,\n\t\tmcpConfigPath,\n\t\tsupportsRulesSubdirectories,\n\t\tincludeDefaultRules,\n\t\tfileMap: finalFileMap,\n\t\tglobalReplacements: baseGlobalReplacements,\n\t\tconversionConfig,\n\t\tgetTargetRuleFilename,\n\t\ttargetExtension,\n\t\t// Optional lifecycle hooks\n\t\t...(onAdd && { onAddRulesProfile: onAdd }),\n\t\t...(onRemove && { onRemoveRulesProfile: onRemove }),\n\t\t...(onPostConvert && { onPostConvertRulesProfile: onPostConvert })\n\t};\n}\n\n// Common tool mappings for editors that share similar tool sets\nexport const COMMON_TOOL_MAPPINGS = {\n\t// Most editors (Cursor, Cline, Windsurf) keep original tool names\n\tSTANDARD: {},\n\n\t// Roo Code uses different tool names\n\tROO_STYLE: {\n\t\tedit_file: 'apply_diff',\n\t\tsearch: 'search_files',\n\t\tcreate_file: 'write_to_file',\n\t\trun_command: 'execute_command',\n\t\tterminal_command: 'execute_command',\n\t\tuse_mcp: 'use_mcp_tool'\n\t}\n};\n"], ["/claude-task-master/src/profiles/vscode.js", "// VS Code conversion profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { log } from '../../scripts/modules/utils.js';\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to VS Code format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed VS Code configuration object\n */\nfunction transformToVSCodeFormat(mcpConfig) {\n\tconst vscodeConfig = {};\n\n\t// Transform mcpServers to servers\n\tif (mcpConfig.mcpServers) {\n\t\tvscodeConfig.servers = {};\n\n\t\tfor (const [serverName, serverConfig] of Object.entries(\n\t\t\tmcpConfig.mcpServers\n\t\t)) {\n\t\t\t// Transform server configuration\n\t\t\tconst transformedServer = {\n\t\t\t\t...serverConfig\n\t\t\t};\n\n\t\t\t// Add type: \"stdio\" after the env block\n\t\t\tif (transformedServer.env) {\n\t\t\t\t// Reorder properties: keep command, args, env, then add type\n\t\t\t\tconst reorderedServer = {};\n\t\t\t\tif (transformedServer.command)\n\t\t\t\t\treorderedServer.command = transformedServer.command;\n\t\t\t\tif (transformedServer.args)\n\t\t\t\t\treorderedServer.args = transformedServer.args;\n\t\t\t\tif (transformedServer.env) reorderedServer.env = transformedServer.env;\n\t\t\t\treorderedServer.type = 'stdio';\n\n\t\t\t\t// Add any other properties that might exist\n\t\t\t\tObject.keys(transformedServer).forEach((key) => {\n\t\t\t\t\tif (!['command', 'args', 'env', 'type'].includes(key)) {\n\t\t\t\t\t\treorderedServer[key] = transformedServer[key];\n\t\t\t\t\t}\n\t\t\t\t});\n\n\t\t\t\tvscodeConfig.servers[serverName] = reorderedServer;\n\t\t\t} else {\n\t\t\t\t// If no env block, just add type at the end\n\t\t\t\ttransformedServer.type = 'stdio';\n\t\t\t\tvscodeConfig.servers[serverName] = transformedServer;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn vscodeConfig;\n}\n\n/**\n * Lifecycle function called after MCP config generation to transform to VS Code format\n * @param {string} targetDir - Target project directory\n * @param {string} assetsDir - Assets directory (unused for VS Code)\n */\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\tconst vscodeConfigPath = path.join(targetDir, '.vscode', 'mcp.json');\n\n\tif (!fs.existsSync(vscodeConfigPath)) {\n\t\tlog('debug', '[VS Code] No .vscode/mcp.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(vscodeConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in VS Code format (has servers instead of mcpServers)\n\t\tif (mcpConfig.servers) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[VS Code] mcp.json already in VS Code format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to VS Code format\n\t\tconst vscodeConfig = transformToVSCodeFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\tvscodeConfigPath,\n\t\t\tJSON.stringify(vscodeConfig, null, 2) + '\\n'\n\t\t);\n\n\t\tlog('info', '[VS Code] Transformed mcp.json to VS Code format');\n\t\tlog('debug', `[VS Code] Renamed mcpServers->servers, added type: \"stdio\"`);\n\t} catch (error) {\n\t\tlog('error', `[VS Code] Failed to transform mcp.json: ${error.message}`);\n\t}\n}\n\n/**\n * Lifecycle function called when removing VS Code profile\n * @param {string} targetDir - Target project directory\n */\nfunction onRemoveRulesProfile(targetDir) {\n\tconst vscodeConfigPath = path.join(targetDir, '.vscode', 'mcp.json');\n\n\tif (!fs.existsSync(vscodeConfigPath)) {\n\t\tlog('debug', '[VS Code] No .vscode/mcp.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(vscodeConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the servers section and task-master-ai server\n\t\tif (config.servers && config.servers['task-master-ai']) {\n\t\t\t// Remove task-master-ai server\n\t\t\tdelete config.servers['task-master-ai'];\n\n\t\t\t// Check if there are other MCP servers\n\t\t\tconst remainingServers = Object.keys(config.servers);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire file\n\t\t\t\tfs.rmSync(vscodeConfigPath, { force: true });\n\t\t\t\tlog('info', '[VS Code] Removed empty mcp.json file');\n\n\t\t\t\t// Also remove .vscode directory if it's empty\n\t\t\t\tconst vscodeDir = path.dirname(vscodeConfigPath);\n\t\t\t\ttry {\n\t\t\t\t\tconst dirContents = fs.readdirSync(vscodeDir);\n\t\t\t\t\tif (dirContents.length === 0) {\n\t\t\t\t\t\tfs.rmSync(vscodeDir, { recursive: true, force: true });\n\t\t\t\t\t\tlog('debug', '[VS Code] Removed empty .vscode directory');\n\t\t\t\t\t}\n\t\t\t\t} catch (err) {\n\t\t\t\t\t// Directory might not be empty or might not exist, that's fine\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\tvscodeConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, 2) + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[VS Code] Removed TaskMaster from mcp.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[VS Code] TaskMaster not found in mcp.json');\n\t\t}\n\t} catch (error) {\n\t\tlog('error', `[VS Code] Failed to clean up mcp.json: ${error.message}`);\n\t}\n}\n\n// Create and export vscode profile using the base factory\nexport const vscodeProfile = createProfile({\n\tname: 'vscode',\n\tdisplayName: 'VS Code',\n\turl: 'code.visualstudio.com',\n\tdocsUrl: 'code.visualstudio.com/docs',\n\trulesDir: '.github/instructions', // VS Code instructions location\n\tprofileDir: '.vscode', // VS Code configuration directory\n\tmcpConfigName: 'mcp.json', // VS Code uses mcp.json in .vscode directory\n\ttargetExtension: '.instructions.md',\n\tcustomReplacements: [\n\t\t// Core VS Code directory structure changes\n\t\t{ from: /\\.cursor\\/rules/g, to: '.github/instructions' },\n\t\t{ from: /\\.cursor\\/mcp\\.json/g, to: '.vscode/mcp.json' },\n\n\t\t// Fix any remaining vscode/rules references that might be created during transformation\n\t\t{ from: /\\.vscode\\/rules/g, to: '.github/instructions' },\n\n\t\t// VS Code custom instructions format - use applyTo with quoted patterns instead of globs\n\t\t{ from: /^globs:\\s*(.+)$/gm, to: 'applyTo: \"$1\"' },\n\n\t\t// Remove unsupported property - alwaysApply\n\t\t{ from: /^alwaysApply:\\s*(true|false)\\s*\\n?/gm, to: '' },\n\n\t\t// Essential markdown link transformations for VS Code structure\n\t\t{\n\t\t\tfrom: /\\[(.+?)\\]\\(mdc:\\.cursor\\/rules\\/(.+?)\\.mdc\\)/g,\n\t\t\tto: '[$1](.github/instructions/$2.instructions.md)'\n\t\t},\n\n\t\t// VS Code specific terminology\n\t\t{ from: /rules directory/g, to: 'instructions directory' },\n\t\t{ from: /cursor rules/gi, to: 'VS Code instructions' }\n\t],\n\tonPostConvert: onPostConvertRulesProfile,\n\tonRemove: onRemoveRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onPostConvertRulesProfile, onRemoveRulesProfile };\n"], ["/claude-task-master/mcp-server/src/core/utils/path-utils.js", "import path from 'path';\nimport {\n\tfindTasksPath as coreFindTasksPath,\n\tfindPRDPath as coreFindPrdPath,\n\tfindComplexityReportPath as coreFindComplexityReportPath,\n\tfindProjectRoot as coreFindProjectRoot,\n\tnormalizeProjectRoot\n} from '../../../../src/utils/path-utils.js';\nimport { PROJECT_MARKERS } from '../../../../src/constants/paths.js';\n\n/**\n * MCP-specific path utilities that extend core path utilities with session support\n * This module handles session-specific path resolution for the MCP server\n */\n\n/**\n * Silent logger for MCP context to prevent console output\n */\nconst silentLogger = {\n\tinfo: () => {},\n\twarn: () => {},\n\terror: () => {},\n\tdebug: () => {},\n\tsuccess: () => {}\n};\n\n/**\n * Cache for last found project root to improve performance\n */\nexport const lastFoundProjectRoot = null;\n\n/**\n * Find PRD file with MCP support\n * @param {string} [explicitPath] - Explicit path to PRD file (highest priority)\n * @param {Object} [args] - Arguments object for context\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to PRD file or null if not found\n */\nexport function findPrdPath(explicitPath, args = null, log = silentLogger) {\n\treturn coreFindPrdPath(explicitPath, args, log);\n}\n\n/**\n * Resolve tasks.json path from arguments\n * Prioritizes explicit path parameter, then uses fallback logic\n * @param {Object} args - Arguments object containing projectRoot and optional file path\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to tasks.json or null if not found\n */\nexport function resolveTasksPath(args, log = silentLogger) {\n\t// Get explicit path from args.file if provided\n\tconst explicitPath = args?.file;\n\tconst rawProjectRoot = args?.projectRoot;\n\n\t// If explicit path is provided and absolute, use it directly\n\tif (explicitPath && path.isAbsolute(explicitPath)) {\n\t\treturn explicitPath;\n\t}\n\n\t// Normalize project root if provided\n\tconst projectRoot = rawProjectRoot\n\t\t? normalizeProjectRoot(rawProjectRoot)\n\t\t: null;\n\n\t// If explicit path is relative, resolve it relative to normalized projectRoot\n\tif (explicitPath && projectRoot) {\n\t\treturn path.resolve(projectRoot, explicitPath);\n\t}\n\n\t// Use core findTasksPath with explicit path and normalized projectRoot context\n\tif (projectRoot) {\n\t\treturn coreFindTasksPath(explicitPath, { projectRoot }, log);\n\t}\n\n\t// Fallback to core function without projectRoot context\n\treturn coreFindTasksPath(explicitPath, null, log);\n}\n\n/**\n * Resolve PRD path from arguments\n * @param {Object} args - Arguments object containing projectRoot and optional input path\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to PRD file or null if not found\n */\nexport function resolvePrdPath(args, log = silentLogger) {\n\t// Get explicit path from args.input if provided\n\tconst explicitPath = args?.input;\n\tconst rawProjectRoot = args?.projectRoot;\n\n\t// If explicit path is provided and absolute, use it directly\n\tif (explicitPath && path.isAbsolute(explicitPath)) {\n\t\treturn explicitPath;\n\t}\n\n\t// Normalize project root if provided\n\tconst projectRoot = rawProjectRoot\n\t\t? normalizeProjectRoot(rawProjectRoot)\n\t\t: null;\n\n\t// If explicit path is relative, resolve it relative to normalized projectRoot\n\tif (explicitPath && projectRoot) {\n\t\treturn path.resolve(projectRoot, explicitPath);\n\t}\n\n\t// Use core findPRDPath with explicit path and normalized projectRoot context\n\tif (projectRoot) {\n\t\treturn coreFindPrdPath(explicitPath, { projectRoot }, log);\n\t}\n\n\t// Fallback to core function without projectRoot context\n\treturn coreFindPrdPath(explicitPath, null, log);\n}\n\n/**\n * Resolve complexity report path from arguments\n * @param {Object} args - Arguments object containing projectRoot and optional complexityReport path\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to complexity report or null if not found\n */\nexport function resolveComplexityReportPath(args, log = silentLogger) {\n\t// Get explicit path from args.complexityReport if provided\n\tconst explicitPath = args?.complexityReport;\n\tconst rawProjectRoot = args?.projectRoot;\n\tconst tag = args?.tag;\n\n\t// If explicit path is provided and absolute, use it directly\n\tif (explicitPath && path.isAbsolute(explicitPath)) {\n\t\treturn explicitPath;\n\t}\n\n\t// Normalize project root if provided\n\tconst projectRoot = rawProjectRoot\n\t\t? normalizeProjectRoot(rawProjectRoot)\n\t\t: null;\n\n\t// If explicit path is relative, resolve it relative to normalized projectRoot\n\tif (explicitPath && projectRoot) {\n\t\treturn path.resolve(projectRoot, explicitPath);\n\t}\n\n\t// Use core findComplexityReportPath with explicit path and normalized projectRoot context\n\tif (projectRoot) {\n\t\treturn coreFindComplexityReportPath(\n\t\t\texplicitPath,\n\t\t\t{ projectRoot, tag },\n\t\t\tlog\n\t\t);\n\t}\n\n\t// Fallback to core function without projectRoot context\n\treturn coreFindComplexityReportPath(explicitPath, null, log);\n}\n\n/**\n * Resolve any project-relative path from arguments\n * @param {string} relativePath - Relative path to resolve\n * @param {Object} args - Arguments object containing projectRoot\n * @returns {string} - Resolved absolute path\n */\nexport function resolveProjectPath(relativePath, args) {\n\t// Ensure we have a projectRoot from args\n\tif (!args?.projectRoot) {\n\t\tthrow new Error('projectRoot is required in args to resolve project paths');\n\t}\n\n\t// Normalize the project root to prevent double .taskmaster paths\n\tconst projectRoot = normalizeProjectRoot(args.projectRoot);\n\n\t// If already absolute, return as-is\n\tif (path.isAbsolute(relativePath)) {\n\t\treturn relativePath;\n\t}\n\n\t// Resolve relative to normalized projectRoot\n\treturn path.resolve(projectRoot, relativePath);\n}\n\n/**\n * Find project root using core utility\n * @param {string} [startDir] - Directory to start searching from\n * @returns {string|null} - Project root path or null if not found\n */\nexport function findProjectRoot(startDir) {\n\treturn coreFindProjectRoot(startDir);\n}\n\n// MAIN EXPORTS FOR MCP TOOLS - these are the functions MCP tools should use\n\n/**\n * Find tasks.json path from arguments - primary MCP function\n * @param {Object} args - Arguments object containing projectRoot and optional file path\n * @param {Object} [log] - Log function to prevent console logging\n * @returns {string|null} - Resolved path to tasks.json or null if not found\n */\nexport function findTasksPath(args, log = silentLogger) {\n\treturn resolveTasksPath(args, log);\n}\n\n/**\n * Find complexity report path from arguments - primary MCP function\n * @param {Object} args - Arguments object containing projectRoot and optional complexityReport path\n * @param {Object} [log] - Log function to prevent console logging\n * @returns {string|null} - Resolved path to complexity report or null if not found\n */\nexport function findComplexityReportPath(args, log = silentLogger) {\n\treturn resolveComplexityReportPath(args, log);\n}\n\n/**\n * Find PRD path - primary MCP function\n * @param {string} [explicitPath] - Explicit path to PRD file\n * @param {Object} [args] - Arguments object for context (not used in current implementation)\n * @param {Object} [log] - Logger object to prevent console logging\n * @returns {string|null} - Resolved path to PRD file or null if not found\n */\nexport function findPRDPath(explicitPath, args = null, log = silentLogger) {\n\treturn findPrdPath(explicitPath, args, log);\n}\n\n// Legacy aliases for backward compatibility - DEPRECATED\nexport const findTasksJsonPath = findTasksPath;\nexport const findComplexityReportJsonPath = findComplexityReportPath;\n\n// Re-export PROJECT_MARKERS for MCP tools that import it from this module\nexport { PROJECT_MARKERS };\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/message-converter.js", "/**\n * @fileoverview Converts AI SDK prompt format to Claude Code message format\n */\n\n/**\n * Convert AI SDK prompt to Claude Code messages format\n * @param {Array} prompt - AI SDK prompt array\n * @param {Object} [mode] - Generation mode\n * @param {string} mode.type - Mode type ('regular', 'object-json', 'object-tool')\n * @returns {{messagesPrompt: string, systemPrompt?: string}}\n */\nexport function convertToClaudeCodeMessages(prompt, mode) {\n\tconst messages = [];\n\tlet systemPrompt;\n\n\tfor (const message of prompt) {\n\t\tswitch (message.role) {\n\t\t\tcase 'system':\n\t\t\t\tsystemPrompt = message.content;\n\t\t\t\tbreak;\n\n\t\t\tcase 'user':\n\t\t\t\tif (typeof message.content === 'string') {\n\t\t\t\t\tmessages.push(message.content);\n\t\t\t\t} else {\n\t\t\t\t\t// Handle multi-part content\n\t\t\t\t\tconst textParts = message.content\n\t\t\t\t\t\t.filter((part) => part.type === 'text')\n\t\t\t\t\t\t.map((part) => part.text)\n\t\t\t\t\t\t.join('\\n');\n\n\t\t\t\t\tif (textParts) {\n\t\t\t\t\t\tmessages.push(textParts);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Note: Image parts are not supported by Claude Code CLI\n\t\t\t\t\tconst imageParts = message.content.filter(\n\t\t\t\t\t\t(part) => part.type === 'image'\n\t\t\t\t\t);\n\t\t\t\t\tif (imageParts.length > 0) {\n\t\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t\t'Claude Code CLI does not support image inputs. Images will be ignored.'\n\t\t\t\t\t\t);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase 'assistant':\n\t\t\t\tif (typeof message.content === 'string') {\n\t\t\t\t\tmessages.push(`Assistant: ${message.content}`);\n\t\t\t\t} else {\n\t\t\t\t\tconst textParts = message.content\n\t\t\t\t\t\t.filter((part) => part.type === 'text')\n\t\t\t\t\t\t.map((part) => part.text)\n\t\t\t\t\t\t.join('\\n');\n\n\t\t\t\t\tif (textParts) {\n\t\t\t\t\t\tmessages.push(`Assistant: ${textParts}`);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Handle tool calls if present\n\t\t\t\t\tconst toolCalls = message.content.filter(\n\t\t\t\t\t\t(part) => part.type === 'tool-call'\n\t\t\t\t\t);\n\t\t\t\t\tif (toolCalls.length > 0) {\n\t\t\t\t\t\t// For now, we'll just note that tool calls were made\n\t\t\t\t\t\tmessages.push(`Assistant: [Tool calls made]`);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak;\n\n\t\t\tcase 'tool':\n\t\t\t\t// Tool results could be included in the conversation\n\t\t\t\tmessages.push(\n\t\t\t\t\t`Tool Result (${message.content[0].toolName}): ${JSON.stringify(\n\t\t\t\t\t\tmessage.content[0].result\n\t\t\t\t\t)}`\n\t\t\t\t);\n\t\t\t\tbreak;\n\t\t}\n\t}\n\n\t// For the SDK, we need to provide a single prompt string\n\t// Format the conversation history properly\n\n\t// Combine system prompt with messages\n\tlet finalPrompt = '';\n\n\t// Add system prompt at the beginning if present\n\tif (systemPrompt) {\n\t\tfinalPrompt = systemPrompt;\n\t}\n\n\tif (messages.length === 0) {\n\t\treturn { messagesPrompt: finalPrompt, systemPrompt };\n\t}\n\n\t// Format messages\n\tconst formattedMessages = [];\n\tfor (let i = 0; i < messages.length; i++) {\n\t\tconst msg = messages[i];\n\t\t// Check if this is a user or assistant message based on content\n\t\tif (msg.startsWith('Assistant:') || msg.startsWith('Tool Result')) {\n\t\t\tformattedMessages.push(msg);\n\t\t} else {\n\t\t\t// User messages\n\t\t\tformattedMessages.push(`Human: ${msg}`);\n\t\t}\n\t}\n\n\t// Combine system prompt with messages\n\tif (finalPrompt) {\n\t\tfinalPrompt = finalPrompt + '\\n\\n' + formattedMessages.join('\\n\\n');\n\t} else {\n\t\tfinalPrompt = formattedMessages.join('\\n\\n');\n\t}\n\n\t// For JSON mode, add explicit instruction to ensure JSON output\n\tif (mode?.type === 'object-json') {\n\t\t// Make the JSON instruction even more explicit\n\t\tfinalPrompt = `${finalPrompt}\n\nCRITICAL INSTRUCTION: You MUST respond with ONLY valid JSON. Follow these rules EXACTLY:\n1. Start your response with an opening brace {\n2. End your response with a closing brace }\n3. Do NOT include any text before the opening brace\n4. Do NOT include any text after the closing brace\n5. Do NOT use markdown code blocks or backticks\n6. Do NOT include explanations or commentary\n7. The ENTIRE response must be valid JSON that can be parsed with JSON.parse()\n\nBegin your response with { and end with }`;\n\t}\n\n\treturn {\n\t\tmessagesPrompt: finalPrompt,\n\t\tsystemPrompt\n\t};\n}\n"], ["/claude-task-master/index.js", "#!/usr/bin/env node\n\n/**\n * Task Master\n * Copyright (c) 2025 Eyal Toledano, Ralph Khreish\n *\n * This software is licensed under the MIT License with Commons Clause.\n * You may use this software for any purpose, including commercial applications,\n * and modify and redistribute it freely, subject to the following restrictions:\n *\n * 1. You may not sell this software or offer it as a service.\n * 2. The origin of this software must not be misrepresented.\n * 3. Altered source versions must be plainly marked as such.\n *\n * For the full license text, see the LICENSE file in the root directory.\n */\n\n/**\n * Claude Task Master\n * A task management system for AI-driven development with Claude\n */\n\n// This file serves as the main entry point for the package\n// The primary functionality is provided through the CLI commands\n\nimport { fileURLToPath } from 'url';\nimport { dirname, resolve } from 'path';\nimport { createRequire } from 'module';\nimport { spawn } from 'child_process';\nimport { Command } from 'commander';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\nconst require = createRequire(import.meta.url);\n\n// Get package information\nconst packageJson = require('./package.json');\n\n// Export the path to the dev.js script for programmatic usage\nexport const devScriptPath = resolve(__dirname, './scripts/dev.js');\n\n// Export a function to initialize a new project programmatically\nexport const initProject = async (options = {}) => {\n\tconst init = await import('./scripts/init.js');\n\treturn init.initializeProject(options);\n};\n\n// Export a function to run init as a CLI command\nexport const runInitCLI = async (options = {}) => {\n\ttry {\n\t\tconst init = await import('./scripts/init.js');\n\t\tconst result = await init.initializeProject(options);\n\t\treturn result;\n\t} catch (error) {\n\t\tconsole.error('Initialization failed:', error.message);\n\t\tif (process.env.DEBUG === 'true') {\n\t\t\tconsole.error('Debug stack trace:', error.stack);\n\t\t}\n\t\tthrow error; // Re-throw to be handled by the command handler\n\t}\n};\n\n// Export version information\nexport const version = packageJson.version;\n\n// CLI implementation\nif (import.meta.url === `file://${process.argv[1]}`) {\n\tconst program = new Command();\n\n\tprogram\n\t\t.name('task-master')\n\t\t.description('Claude Task Master CLI')\n\t\t.version(version);\n\n\tprogram\n\t\t.command('init')\n\t\t.description('Initialize a new project')\n\t\t.option('-y, --yes', 'Skip prompts and use default values')\n\t\t.option('-n, --name <n>', 'Project name')\n\t\t.option('-d, --description <description>', 'Project description')\n\t\t.option('-v, --version <version>', 'Project version', '0.1.0')\n\t\t.option('-a, --author <author>', 'Author name')\n\t\t.option('--skip-install', 'Skip installing dependencies')\n\t\t.option('--dry-run', 'Show what would be done without making changes')\n\t\t.option('--aliases', 'Add shell aliases (tm, taskmaster)')\n\t\t.option('--no-aliases', 'Skip shell aliases (tm, taskmaster)')\n\t\t.option('--git', 'Initialize Git repository')\n\t\t.option('--no-git', 'Skip Git repository initialization')\n\t\t.option('--git-tasks', 'Store tasks in Git')\n\t\t.option('--no-git-tasks', 'No Git storage of tasks')\n\t\t.action(async (cmdOptions) => {\n\t\t\ttry {\n\t\t\t\tawait runInitCLI(cmdOptions);\n\t\t\t} catch (err) {\n\t\t\t\tconsole.error('Init failed:', err.message);\n\t\t\t\tprocess.exit(1);\n\t\t\t}\n\t\t});\n\n\tprogram\n\t\t.command('dev')\n\t\t.description('Run the dev.js script')\n\t\t.allowUnknownOption(true)\n\t\t.action(() => {\n\t\t\tconst args = process.argv.slice(process.argv.indexOf('dev') + 1);\n\t\t\tconst child = spawn('node', [devScriptPath, ...args], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\t// Add shortcuts for common dev.js commands\n\tprogram\n\t\t.command('list')\n\t\t.description('List all tasks')\n\t\t.action(() => {\n\t\t\tconst child = spawn('node', [devScriptPath, 'list'], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\tprogram\n\t\t.command('next')\n\t\t.description('Show the next task to work on')\n\t\t.action(() => {\n\t\t\tconst child = spawn('node', [devScriptPath, 'next'], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\tprogram\n\t\t.command('generate')\n\t\t.description('Generate task files')\n\t\t.action(() => {\n\t\t\tconst child = spawn('node', [devScriptPath, 'generate'], {\n\t\t\t\tstdio: 'inherit',\n\t\t\t\tcwd: process.cwd()\n\t\t\t});\n\n\t\t\tchild.on('close', (code) => {\n\t\t\t\tprocess.exit(code);\n\t\t\t});\n\t\t});\n\n\tprogram.parse(process.argv);\n}\n"], ["/claude-task-master/src/profiles/opencode.js", "// Opencode profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { log } from '../../scripts/modules/utils.js';\nimport { createProfile } from './base-profile.js';\n\n/**\n * Transform standard MCP config format to OpenCode format\n * @param {Object} mcpConfig - Standard MCP configuration object\n * @returns {Object} - Transformed OpenCode configuration object\n */\nfunction transformToOpenCodeFormat(mcpConfig) {\n\tconst openCodeConfig = {\n\t\t$schema: 'https://opencode.ai/config.json'\n\t};\n\n\t// Transform mcpServers to mcp\n\tif (mcpConfig.mcpServers) {\n\t\topenCodeConfig.mcp = {};\n\n\t\tfor (const [serverName, serverConfig] of Object.entries(\n\t\t\tmcpConfig.mcpServers\n\t\t)) {\n\t\t\t// Transform server configuration\n\t\t\tconst transformedServer = {\n\t\t\t\ttype: 'local'\n\t\t\t};\n\n\t\t\t// Combine command and args into single command array\n\t\t\tif (serverConfig.command && serverConfig.args) {\n\t\t\t\ttransformedServer.command = [\n\t\t\t\t\tserverConfig.command,\n\t\t\t\t\t...serverConfig.args\n\t\t\t\t];\n\t\t\t} else if (serverConfig.command) {\n\t\t\t\ttransformedServer.command = [serverConfig.command];\n\t\t\t}\n\n\t\t\t// Add enabled flag\n\t\t\ttransformedServer.enabled = true;\n\n\t\t\t// Transform env to environment\n\t\t\tif (serverConfig.env) {\n\t\t\t\ttransformedServer.environment = serverConfig.env;\n\t\t\t}\n\n\t\t\t// update with transformed config\n\t\t\topenCodeConfig.mcp[serverName] = transformedServer;\n\t\t}\n\t}\n\n\treturn openCodeConfig;\n}\n\n/**\n * Lifecycle function called after MCP config generation to transform to OpenCode format\n * @param {string} targetDir - Target project directory\n * @param {string} assetsDir - Assets directory (unused for OpenCode)\n */\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\tconst openCodeConfigPath = path.join(targetDir, 'opencode.json');\n\n\tif (!fs.existsSync(openCodeConfigPath)) {\n\t\tlog('debug', '[OpenCode] No opencode.json found to transform');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the generated standard MCP config\n\t\tconst mcpConfigContent = fs.readFileSync(openCodeConfigPath, 'utf8');\n\t\tconst mcpConfig = JSON.parse(mcpConfigContent);\n\n\t\t// Check if it's already in OpenCode format (has $schema)\n\t\tif (mcpConfig.$schema) {\n\t\t\tlog(\n\t\t\t\t'info',\n\t\t\t\t'[OpenCode] opencode.json already in OpenCode format, skipping transformation'\n\t\t\t);\n\t\t\treturn;\n\t\t}\n\n\t\t// Transform to OpenCode format\n\t\tconst openCodeConfig = transformToOpenCodeFormat(mcpConfig);\n\n\t\t// Write back the transformed config with proper formatting\n\t\tfs.writeFileSync(\n\t\t\topenCodeConfigPath,\n\t\t\tJSON.stringify(openCodeConfig, null, 2) + '\\n'\n\t\t);\n\n\t\tlog('info', '[OpenCode] Transformed opencode.json to OpenCode format');\n\t\tlog(\n\t\t\t'debug',\n\t\t\t`[OpenCode] Added schema, renamed mcpServers->mcp, combined command+args, added type/enabled, renamed env->environment`\n\t\t);\n\t} catch (error) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[OpenCode] Failed to transform opencode.json: ${error.message}`\n\t\t);\n\t}\n}\n\n/**\n * Lifecycle function called when removing OpenCode profile\n * @param {string} targetDir - Target project directory\n */\nfunction onRemoveRulesProfile(targetDir) {\n\tconst openCodeConfigPath = path.join(targetDir, 'opencode.json');\n\n\tif (!fs.existsSync(openCodeConfigPath)) {\n\t\tlog('debug', '[OpenCode] No opencode.json found to clean up');\n\t\treturn;\n\t}\n\n\ttry {\n\t\t// Read the current config\n\t\tconst configContent = fs.readFileSync(openCodeConfigPath, 'utf8');\n\t\tconst config = JSON.parse(configContent);\n\n\t\t// Check if it has the mcp section and taskmaster-ai server\n\t\tif (config.mcp && config.mcp['taskmaster-ai']) {\n\t\t\t// Remove taskmaster-ai server\n\t\t\tdelete config.mcp['taskmaster-ai'];\n\n\t\t\t// Check if there are other MCP servers\n\t\t\tconst remainingServers = Object.keys(config.mcp);\n\n\t\t\tif (remainingServers.length === 0) {\n\t\t\t\t// No other servers, remove entire mcp section\n\t\t\t\tdelete config.mcp;\n\t\t\t}\n\n\t\t\t// Check if config is now empty (only has $schema)\n\t\t\tconst remainingKeys = Object.keys(config).filter(\n\t\t\t\t(key) => key !== '$schema'\n\t\t\t);\n\n\t\t\tif (remainingKeys.length === 0) {\n\t\t\t\t// Config only has schema left, remove entire file\n\t\t\t\tfs.rmSync(openCodeConfigPath, { force: true });\n\t\t\t\tlog('info', '[OpenCode] Removed empty opencode.json file');\n\t\t\t} else {\n\t\t\t\t// Write back the modified config\n\t\t\t\tfs.writeFileSync(\n\t\t\t\t\topenCodeConfigPath,\n\t\t\t\t\tJSON.stringify(config, null, 2) + '\\n'\n\t\t\t\t);\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t'[OpenCode] Removed TaskMaster from opencode.json, preserved other configurations'\n\t\t\t\t);\n\t\t\t}\n\t\t} else {\n\t\t\tlog('debug', '[OpenCode] TaskMaster not found in opencode.json');\n\t\t}\n\t} catch (error) {\n\t\tlog(\n\t\t\t'error',\n\t\t\t`[OpenCode] Failed to clean up opencode.json: ${error.message}`\n\t\t);\n\t}\n}\n\n// Create and export opencode profile using the base factory\nexport const opencodeProfile = createProfile({\n\tname: 'opencode',\n\tdisplayName: 'OpenCode',\n\turl: 'opencode.ai',\n\tdocsUrl: 'opencode.ai/docs/',\n\tprofileDir: '.', // Root directory\n\trulesDir: '.', // Root directory for AGENTS.md\n\tmcpConfigName: 'opencode.json', // Override default 'mcp.json'\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': 'AGENTS.md'\n\t},\n\tonPostConvert: onPostConvertRulesProfile,\n\tonRemove: onRemoveRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onPostConvertRulesProfile, onRemoveRulesProfile };\n"], ["/claude-task-master/src/ai-providers/google-vertex.js", "/**\n * google-vertex.js\n * AI provider implementation for Google Vertex AI models using Vercel AI SDK.\n */\n\nimport { createVertex } from '@ai-sdk/google-vertex';\nimport { BaseAIProvider } from './base-provider.js';\nimport { resolveEnvVariable } from '../../scripts/modules/utils.js';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Vertex-specific error classes\nclass VertexAuthError extends Error {\n\tconstructor(message) {\n\t\tsuper(message);\n\t\tthis.name = 'VertexAuthError';\n\t\tthis.code = 'vertex_auth_error';\n\t}\n}\n\nclass VertexConfigError extends Error {\n\tconstructor(message) {\n\t\tsuper(message);\n\t\tthis.name = 'VertexConfigError';\n\t\tthis.code = 'vertex_config_error';\n\t}\n}\n\nclass VertexApiError extends Error {\n\tconstructor(message, statusCode) {\n\t\tsuper(message);\n\t\tthis.name = 'VertexApiError';\n\t\tthis.code = 'vertex_api_error';\n\t\tthis.statusCode = statusCode;\n\t}\n}\n\nexport class VertexAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Google Vertex AI';\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name for Google Vertex AI.\n\t * @returns {string} The environment variable name\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'GOOGLE_API_KEY';\n\t}\n\n\t/**\n\t * Validates Vertex AI-specific authentication parameters\n\t * @param {object} params - Parameters to validate\n\t * @throws {Error} If required parameters are missing\n\t */\n\tvalidateAuth(params) {\n\t\tconst { apiKey, projectId, location, credentials } = params;\n\n\t\t// Check for API key OR service account credentials\n\t\tif (!apiKey && !credentials) {\n\t\t\tthrow new VertexAuthError(\n\t\t\t\t'Either Google API key (GOOGLE_API_KEY) or service account credentials (GOOGLE_APPLICATION_CREDENTIALS) is required for Vertex AI'\n\t\t\t);\n\t\t}\n\n\t\t// Project ID is required for Vertex AI\n\t\tif (!projectId) {\n\t\t\tthrow new VertexConfigError(\n\t\t\t\t'Google Cloud project ID is required for Vertex AI. Set VERTEX_PROJECT_ID environment variable.'\n\t\t\t);\n\t\t}\n\n\t\t// Location is required for Vertex AI\n\t\tif (!location) {\n\t\t\tthrow new VertexConfigError(\n\t\t\t\t'Google Cloud location is required for Vertex AI. Set VERTEX_LOCATION environment variable (e.g., \"us-central1\").'\n\t\t\t);\n\t\t}\n\t}\n\n\t/**\n\t * Creates and returns a Google Vertex AI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.apiKey] - Google API key\n\t * @param {string} params.projectId - Google Cloud project ID\n\t * @param {string} params.location - Google Cloud location (e.g., \"us-central1\")\n\t * @param {object} [params.credentials] - Service account credentials object\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Google Vertex AI client function\n\t * @throws {Error} If required parameters are missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\t// Validate required parameters\n\t\t\tthis.validateAuth(params);\n\n\t\t\tconst { apiKey, projectId, location, credentials, baseURL } = params;\n\n\t\t\t// Configure auth options - either API key or service account\n\t\t\tconst authOptions = {};\n\t\t\tif (apiKey) {\n\t\t\t\tauthOptions.apiKey = apiKey;\n\t\t\t} else if (credentials) {\n\t\t\t\tauthOptions.googleAuthOptions = credentials;\n\t\t\t}\n\n\t\t\t// Return Vertex AI client\n\t\t\treturn createVertex({\n\t\t\t\t...authOptions,\n\t\t\t\tprojectId,\n\t\t\t\tlocation,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\t/**\n\t * Handle errors from Vertex AI\n\t * @param {string} operation - Description of the operation that failed\n\t * @param {Error} error - The error object\n\t * @throws {Error} Rethrows the error with additional context\n\t */\n\thandleError(operation, error) {\n\t\tlog('error', `Vertex AI ${operation} error:`, error);\n\n\t\t// Handle known error types\n\t\tif (\n\t\t\terror.name === 'VertexAuthError' ||\n\t\t\terror.name === 'VertexConfigError' ||\n\t\t\terror.name === 'VertexApiError'\n\t\t) {\n\t\t\tthrow error;\n\t\t}\n\n\t\t// Handle network/API errors\n\t\tif (error.response) {\n\t\t\tconst statusCode = error.response.status;\n\t\t\tconst errorMessage = error.response.data?.error?.message || error.message;\n\n\t\t\t// Categorize by status code\n\t\t\tif (statusCode === 401 || statusCode === 403) {\n\t\t\t\tthrow new VertexAuthError(`Authentication failed: ${errorMessage}`);\n\t\t\t} else if (statusCode === 400) {\n\t\t\t\tthrow new VertexConfigError(`Invalid request: ${errorMessage}`);\n\t\t\t} else {\n\t\t\t\tthrow new VertexApiError(\n\t\t\t\t\t`API error (${statusCode}): ${errorMessage}`,\n\t\t\t\t\tstatusCode\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\n\t\t// Generic error handling\n\t\tthrow new Error(`Vertex AI ${operation} failed: ${error.message}`);\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/errors.js", "/**\n * @fileoverview Error handling utilities for Claude Code provider\n */\n\nimport { APICallError, LoadAPIKeyError } from '@ai-sdk/provider';\n\n/**\n * @typedef {import('./types.js').ClaudeCodeErrorMetadata} ClaudeCodeErrorMetadata\n */\n\n/**\n * Create an API call error with Claude Code specific metadata\n * @param {Object} params - Error parameters\n * @param {string} params.message - Error message\n * @param {string} [params.code] - Error code\n * @param {number} [params.exitCode] - Process exit code\n * @param {string} [params.stderr] - Standard error output\n * @param {string} [params.promptExcerpt] - Excerpt of the prompt\n * @param {boolean} [params.isRetryable=false] - Whether the error is retryable\n * @returns {APICallError}\n */\nexport function createAPICallError({\n\tmessage,\n\tcode,\n\texitCode,\n\tstderr,\n\tpromptExcerpt,\n\tisRetryable = false\n}) {\n\t/** @type {ClaudeCodeErrorMetadata} */\n\tconst metadata = {\n\t\tcode,\n\t\texitCode,\n\t\tstderr,\n\t\tpromptExcerpt\n\t};\n\n\treturn new APICallError({\n\t\tmessage,\n\t\tisRetryable,\n\t\turl: 'claude-code-cli://command',\n\t\trequestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,\n\t\tdata: metadata\n\t});\n}\n\n/**\n * Create an authentication error\n * @param {Object} params - Error parameters\n * @param {string} params.message - Error message\n * @returns {LoadAPIKeyError}\n */\nexport function createAuthenticationError({ message }) {\n\treturn new LoadAPIKeyError({\n\t\tmessage:\n\t\t\tmessage ||\n\t\t\t'Authentication failed. Please ensure Claude Code CLI is properly authenticated.'\n\t});\n}\n\n/**\n * Create a timeout error\n * @param {Object} params - Error parameters\n * @param {string} params.message - Error message\n * @param {string} [params.promptExcerpt] - Excerpt of the prompt\n * @param {number} params.timeoutMs - Timeout in milliseconds\n * @returns {APICallError}\n */\nexport function createTimeoutError({ message, promptExcerpt, timeoutMs }) {\n\t// Store timeoutMs in metadata for potential use by error handlers\n\t/** @type {ClaudeCodeErrorMetadata & { timeoutMs: number }} */\n\tconst metadata = {\n\t\tcode: 'TIMEOUT',\n\t\tpromptExcerpt,\n\t\ttimeoutMs\n\t};\n\n\treturn new APICallError({\n\t\tmessage,\n\t\tisRetryable: true,\n\t\turl: 'claude-code-cli://command',\n\t\trequestBodyValues: promptExcerpt ? { prompt: promptExcerpt } : undefined,\n\t\tdata: metadata\n\t});\n}\n\n/**\n * Check if an error is an authentication error\n * @param {unknown} error - Error to check\n * @returns {boolean}\n */\nexport function isAuthenticationError(error) {\n\tif (error instanceof LoadAPIKeyError) return true;\n\tif (\n\t\terror instanceof APICallError &&\n\t\t/** @type {ClaudeCodeErrorMetadata} */ (error.data)?.exitCode === 401\n\t)\n\t\treturn true;\n\treturn false;\n}\n\n/**\n * Check if an error is a timeout error\n * @param {unknown} error - Error to check\n * @returns {boolean}\n */\nexport function isTimeoutError(error) {\n\tif (\n\t\terror instanceof APICallError &&\n\t\t/** @type {ClaudeCodeErrorMetadata} */ (error.data)?.code === 'TIMEOUT'\n\t)\n\t\treturn true;\n\treturn false;\n}\n\n/**\n * Get error metadata from an error\n * @param {unknown} error - Error to extract metadata from\n * @returns {ClaudeCodeErrorMetadata|undefined}\n */\nexport function getErrorMetadata(error) {\n\tif (error instanceof APICallError && error.data) {\n\t\treturn /** @type {ClaudeCodeErrorMetadata} */ (error.data);\n\t}\n\treturn undefined;\n}\n"], ["/claude-task-master/scripts/test-claude-errors.js", "#!/usr/bin/env node\n\n/**\n * test-claude-errors.js\n *\n * A test script to verify the error handling and retry logic in the callClaude function.\n * This script creates a modified version of dev.js that simulates different error scenarios.\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport dotenv from 'dotenv';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\nimport { execSync, spawn } from 'child_process';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n// Load environment variables from .env file\ndotenv.config();\n\n// Create a simple PRD for testing\nconst createTestPRD = () => {\n\treturn `# Test PRD for Error Handling\n\n## Overview\nThis is a simple test PRD to verify the error handling in the callClaude function.\n\n## Requirements\n1. Create a simple web application\n2. Implement user authentication\n3. Add a dashboard for users\n`;\n};\n\n// Create a modified version of dev.js that simulates errors\nfunction createErrorSimulationScript(errorType, failureCount = 2) {\n\t// Read the original dev.js file\n\tconst devJsPath = path.join(__dirname, 'dev.js');\n\tconst devJsContent = fs.readFileSync(devJsPath, 'utf8');\n\n\t// Create a modified version that simulates errors\n\tlet modifiedContent = devJsContent;\n\n\t// Find the anthropic.messages.create call and replace it with our mock\n\tconst anthropicCallRegex =\n\t\t/const response = await anthropic\\.messages\\.create\\(/;\n\n\tlet mockCode = '';\n\n\tswitch (errorType) {\n\t\tcase 'network':\n\t\t\tmockCode = `\n // Mock for network error simulation\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate network error for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating network error (attempt \\${currentAttempt}/\\${failureCount})\\`);\n throw new Error('Network error: Connection refused');\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tcase 'timeout':\n\t\t\tmockCode = `\n // Mock for timeout error simulation\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate timeout error for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating timeout error (attempt \\${currentAttempt}/\\${failureCount})\\`);\n throw new Error('Request timed out after 60000ms');\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tcase 'invalid-json':\n\t\t\tmockCode = `\n // Mock for invalid JSON response\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate invalid JSON for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating invalid JSON response (attempt \\${currentAttempt}/\\${failureCount})\\`);\n return {\n content: [\n {\n text: \\`\\`\\`json\\\\n{\"meta\": {\"projectName\": \"Test Project\"}, \"tasks\": [{\"id\": 1, \"title\": \"Task 1\"\\`\n }\n ]\n };\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tcase 'empty-tasks':\n\t\t\tmockCode = `\n // Mock for empty tasks array\n let currentAttempt = 0;\n const failureCount = ${failureCount};\n \n // Simulate empty tasks array for the first few attempts\n currentAttempt++;\n console.log(\\`[Mock] API call attempt \\${currentAttempt}\\`);\n \n if (currentAttempt <= failureCount) {\n console.log(\\`[Mock] Simulating empty tasks array (attempt \\${currentAttempt}/\\${failureCount})\\`);\n return {\n content: [\n {\n text: \\`\\`\\`json\\\\n{\"meta\": {\"projectName\": \"Test Project\"}, \"tasks\": []}\\\\n\\`\\`\\`\n }\n ]\n };\n }\n \n const response = await anthropic.messages.create(`;\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\t// No modification\n\t\t\tmockCode = `const response = await anthropic.messages.create(`;\n\t}\n\n\t// Replace the anthropic call with our mock\n\tmodifiedContent = modifiedContent.replace(anthropicCallRegex, mockCode);\n\n\t// Write the modified script to a temporary file\n\tconst tempScriptPath = path.join(__dirname, `temp-dev-${errorType}.js`);\n\tfs.writeFileSync(tempScriptPath, modifiedContent, 'utf8');\n\n\treturn tempScriptPath;\n}\n\n// Function to run a test with a specific error type\nasync function runErrorTest(errorType, numTasks = 5, failureCount = 2) {\n\tconsole.log(`\\n=== Test: ${errorType.toUpperCase()} Error Simulation ===`);\n\n\t// Create a test PRD\n\tconst testPRD = createTestPRD();\n\tconst testPRDPath = path.join(__dirname, `test-prd-${errorType}.txt`);\n\tfs.writeFileSync(testPRDPath, testPRD, 'utf8');\n\n\t// Create a modified dev.js that simulates the specified error\n\tconst tempScriptPath = createErrorSimulationScript(errorType, failureCount);\n\n\tconsole.log(`Created test PRD at ${testPRDPath}`);\n\tconsole.log(`Created error simulation script at ${tempScriptPath}`);\n\tconsole.log(\n\t\t`Running with error type: ${errorType}, failure count: ${failureCount}, tasks: ${numTasks}`\n\t);\n\n\ttry {\n\t\t// Run the modified script\n\t\texecSync(\n\t\t\t`node ${tempScriptPath} parse-prd --input=${testPRDPath} --tasks=${numTasks}`,\n\t\t\t{\n\t\t\t\tstdio: 'inherit'\n\t\t\t}\n\t\t);\n\t\tconsole.log(`${errorType} error test completed successfully`);\n\t} catch (error) {\n\t\tconsole.error(`${errorType} error test failed:`, error.message);\n\t} finally {\n\t\t// Clean up temporary files\n\t\tif (fs.existsSync(tempScriptPath)) {\n\t\t\tfs.unlinkSync(tempScriptPath);\n\t\t}\n\t\tif (fs.existsSync(testPRDPath)) {\n\t\t\tfs.unlinkSync(testPRDPath);\n\t\t}\n\t}\n}\n\n// Function to run all error tests\nasync function runAllErrorTests() {\n\tconsole.log('Starting error handling tests for callClaude function...');\n\n\t// Test 1: Network error with automatic retry\n\tawait runErrorTest('network', 5, 2);\n\n\t// Test 2: Timeout error with automatic retry\n\tawait runErrorTest('timeout', 5, 2);\n\n\t// Test 3: Invalid JSON response with task reduction\n\tawait runErrorTest('invalid-json', 10, 2);\n\n\t// Test 4: Empty tasks array with task reduction\n\tawait runErrorTest('empty-tasks', 15, 2);\n\n\t// Test 5: Exhausted retries (more failures than MAX_RETRIES)\n\tawait runErrorTest('network', 5, 4);\n\n\tconsole.log('\\nAll error tests completed!');\n}\n\n// Run the tests\nrunAllErrorTests().catch((error) => {\n\tconsole.error('Error running tests:', error);\n\tprocess.exit(1);\n});\n"], ["/claude-task-master/mcp-server/src/tools/parse-prd.js", "/**\n * tools/parsePRD.js\n * Tool to parse PRD document and generate tasks\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\twithNormalizedProjectRoot,\n\tcreateErrorResponse\n} from './utils.js';\nimport { parsePRDDirect } from '../core/task-master-core.js';\nimport {\n\tPRD_FILE,\n\tTASKMASTER_DOCS_DIR,\n\tTASKMASTER_TASKS_FILE\n} from '../../../src/constants/paths.js';\nimport { resolveTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the parse_prd tool\n * @param {Object} server - FastMCP server instance\n */\nexport function registerParsePRDTool(server) {\n\tserver.addTool({\n\t\tname: 'parse_prd',\n\t\tdescription: `Parse a Product Requirements Document (PRD) text file to automatically generate initial tasks. Reinitializing the project is not necessary to run this tool. It is recommended to run parse-prd after initializing the project and creating/importing a prd.txt file in the project root's ${TASKMASTER_DOCS_DIR} directory.`,\n\n\t\tparameters: z.object({\n\t\t\tinput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.default(PRD_FILE)\n\t\t\t\t.describe('Absolute path to the PRD document file (.txt, .md, etc.)'),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\ttag: z.string().optional().describe('Tag context to operate on'),\n\t\t\toutput: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`Output path for tasks.json file (default: ${TASKMASTER_TASKS_FILE})`\n\t\t\t\t),\n\t\t\tnumTasks: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Approximate number of top-level tasks to generate (default: 10). As the agent, if you have enough information, ensure to enter a number of tasks that would logically scale with project complexity. Setting to 0 will allow Taskmaster to determine the appropriate number of tasks based on the complexity of the PRD. Avoid entering numbers above 50 due to context window limitations.'\n\t\t\t\t),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe('Overwrite existing output file without prompting.'),\n\t\t\tresearch: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Enable Taskmaster to use the research role for potentially more informed task generation. Requires appropriate API key.'\n\t\t\t\t),\n\t\t\tappend: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Append generated tasks to existing file.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tconst resolvedTag = resolveTag({\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\ttag: args.tag\n\t\t\t\t});\n\t\t\t\tconst result = await parsePRDDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\t...args,\n\t\t\t\t\t\ttag: resolvedTag\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error parsing PRD',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in parse_prd: ${error.message}`);\n\t\t\t\treturn createErrorResponse(`Failed to parse PRD: ${error.message}`);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/get-operation-status.js", "// mcp-server/src/tools/get-operation-status.js\nimport { z } from 'zod';\nimport { createErrorResponse, createContentResponse } from './utils.js'; // Assuming these utils exist\n\n/**\n * Register the get_operation_status tool.\n * @param {FastMCP} server - FastMCP server instance.\n * @param {AsyncOperationManager} asyncManager - The async operation manager.\n */\nexport function registerGetOperationStatusTool(server, asyncManager) {\n\tserver.addTool({\n\t\tname: 'get_operation_status',\n\t\tdescription:\n\t\t\t'Retrieves the status and result/error of a background operation.',\n\t\tparameters: z.object({\n\t\t\toperationId: z.string().describe('The ID of the operation to check.')\n\t\t}),\n\t\texecute: async (args, { log }) => {\n\t\t\ttry {\n\t\t\t\tconst { operationId } = args;\n\t\t\t\tlog.info(`Checking status for operation ID: ${operationId}`);\n\n\t\t\t\tconst status = asyncManager.getStatus(operationId);\n\n\t\t\t\t// Status will now always return an object, but it might have status='not_found'\n\t\t\t\tif (status.status === 'not_found') {\n\t\t\t\t\tlog.warn(`Operation ID not found: ${operationId}`);\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\tstatus.error?.message || `Operation ID not found: ${operationId}`,\n\t\t\t\t\t\tstatus.error?.code || 'OPERATION_NOT_FOUND'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tlog.info(`Status for ${operationId}: ${status.status}`);\n\t\t\t\treturn createContentResponse(status);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in get_operation_status tool: ${error.message}`, {\n\t\t\t\t\tstack: error.stack\n\t\t\t\t});\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Failed to get operation status: ${error.message}`,\n\t\t\t\t\t'GET_STATUS_ERROR'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t});\n}\n"], ["/claude-task-master/src/ai-providers/base-provider.js", "import { generateObject, generateText, streamText } from 'ai';\nimport { log } from '../../scripts/modules/utils.js';\n\n/**\n * Base class for all AI providers\n */\nexport class BaseAIProvider {\n\tconstructor() {\n\t\tif (this.constructor === BaseAIProvider) {\n\t\t\tthrow new Error('BaseAIProvider cannot be instantiated directly');\n\t\t}\n\n\t\t// Each provider must set their name\n\t\tthis.name = this.constructor.name;\n\t}\n\n\t/**\n\t * Validates authentication parameters - can be overridden by providers\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Default: require API key (most providers need this)\n\t\tif (!params.apiKey) {\n\t\t\tthrow new Error(`${this.name} API key is required`);\n\t\t}\n\t}\n\n\t/**\n\t * Validates common parameters across all methods\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateParams(params) {\n\t\t// Validate authentication (can be overridden by providers)\n\t\tthis.validateAuth(params);\n\n\t\t// Validate required model ID\n\t\tif (!params.modelId) {\n\t\t\tthrow new Error(`${this.name} Model ID is required`);\n\t\t}\n\n\t\t// Validate optional parameters\n\t\tthis.validateOptionalParams(params);\n\t}\n\n\t/**\n\t * Validates optional parameters like temperature and maxTokens\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateOptionalParams(params) {\n\t\tif (\n\t\t\tparams.temperature !== undefined &&\n\t\t\t(params.temperature < 0 || params.temperature > 1)\n\t\t) {\n\t\t\tthrow new Error('Temperature must be between 0 and 1');\n\t\t}\n\t\tif (params.maxTokens !== undefined && params.maxTokens <= 0) {\n\t\t\tthrow new Error('maxTokens must be greater than 0');\n\t\t}\n\t}\n\n\t/**\n\t * Validates message array structure\n\t */\n\tvalidateMessages(messages) {\n\t\tif (!messages || !Array.isArray(messages) || messages.length === 0) {\n\t\t\tthrow new Error('Invalid or empty messages array provided');\n\t\t}\n\n\t\tfor (const msg of messages) {\n\t\t\tif (!msg.role || !msg.content) {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t'Invalid message format. Each message must have role and content'\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Common error handler\n\t */\n\thandleError(operation, error) {\n\t\tconst errorMessage = error.message || 'Unknown error occurred';\n\t\tlog('error', `${this.name} ${operation} failed: ${errorMessage}`, {\n\t\t\terror\n\t\t});\n\t\tthrow new Error(\n\t\t\t`${this.name} API error during ${operation}: ${errorMessage}`\n\t\t);\n\t}\n\n\t/**\n\t * Creates and returns a client instance for the provider\n\t * @abstract\n\t */\n\tgetClient(params) {\n\t\tthrow new Error('getClient must be implemented by provider');\n\t}\n\n\t/**\n\t * Returns if the API key is required\n\t * @abstract\n\t * @returns {boolean} if the API key is required, defaults to true\n\t */\n\tisRequiredApiKey() {\n\t\treturn true;\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name\n\t * @abstract\n\t * @returns {string|null} The environment variable name, or null if no API key is required\n\t */\n\tgetRequiredApiKeyName() {\n\t\tthrow new Error('getRequiredApiKeyName must be implemented by provider');\n\t}\n\n\t/**\n\t * Generates text using the provider's model\n\t */\n\tasync generateText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Generating ${this.name} text with model: ${params.modelId}`\n\t\t\t);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst result = await generateText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tmessages: params.messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} generateText completed successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\ttext: result.text,\n\t\t\t\tusage: {\n\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthis.handleError('text generation', error);\n\t\t}\n\t}\n\n\t/**\n\t * Streams text using the provider's model\n\t */\n\tasync streamText(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tlog('debug', `Streaming ${this.name} text with model: ${params.modelId}`);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst stream = await streamText({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tmessages: params.messages,\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} streamText initiated successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn stream;\n\t\t} catch (error) {\n\t\t\tthis.handleError('text streaming', error);\n\t\t}\n\t}\n\n\t/**\n\t * Generates a structured object using the provider's model\n\t */\n\tasync generateObject(params) {\n\t\ttry {\n\t\t\tthis.validateParams(params);\n\t\t\tthis.validateMessages(params.messages);\n\n\t\t\tif (!params.schema) {\n\t\t\t\tthrow new Error('Schema is required for object generation');\n\t\t\t}\n\t\t\tif (!params.objectName) {\n\t\t\t\tthrow new Error('Object name is required for object generation');\n\t\t\t}\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`Generating ${this.name} object ('${params.objectName}') with model: ${params.modelId}`\n\t\t\t);\n\n\t\t\tconst client = await this.getClient(params);\n\t\t\tconst result = await generateObject({\n\t\t\t\tmodel: client(params.modelId),\n\t\t\t\tmessages: params.messages,\n\t\t\t\tschema: params.schema,\n\t\t\t\tmode: 'auto',\n\t\t\t\tmaxTokens: params.maxTokens,\n\t\t\t\ttemperature: params.temperature\n\t\t\t});\n\n\t\t\tlog(\n\t\t\t\t'debug',\n\t\t\t\t`${this.name} generateObject completed successfully for model: ${params.modelId}`\n\t\t\t);\n\n\t\t\treturn {\n\t\t\t\tobject: result.object,\n\t\t\t\tusage: {\n\t\t\t\t\tinputTokens: result.usage?.promptTokens,\n\t\t\t\t\toutputTokens: result.usage?.completionTokens,\n\t\t\t\t\ttotalTokens: result.usage?.totalTokens\n\t\t\t\t}\n\t\t\t};\n\t\t} catch (error) {\n\t\t\tthis.handleError('object generation', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/index.js", "import { FastMCP } from 'fastmcp';\nimport path from 'path';\nimport dotenv from 'dotenv';\nimport { fileURLToPath } from 'url';\nimport fs from 'fs';\nimport logger from './logger.js';\nimport { registerTaskMasterTools } from './tools/index.js';\nimport ProviderRegistry from '../../src/provider-registry/index.js';\nimport { MCPProvider } from './providers/mcp-provider.js';\n\n// Load environment variables\ndotenv.config();\n\n// Constants\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = path.dirname(__filename);\n\n/**\n * Main MCP server class that integrates with Task Master\n */\nclass TaskMasterMCPServer {\n\tconstructor() {\n\t\t// Get version from package.json using synchronous fs\n\t\tconst packagePath = path.join(__dirname, '../../package.json');\n\t\tconst packageJson = JSON.parse(fs.readFileSync(packagePath, 'utf8'));\n\n\t\tthis.options = {\n\t\t\tname: 'Task Master MCP Server',\n\t\t\tversion: packageJson.version\n\t\t};\n\n\t\tthis.server = new FastMCP(this.options);\n\t\tthis.initialized = false;\n\n\t\t// Bind methods\n\t\tthis.init = this.init.bind(this);\n\t\tthis.start = this.start.bind(this);\n\t\tthis.stop = this.stop.bind(this);\n\n\t\t// Setup logging\n\t\tthis.logger = logger;\n\t}\n\n\t/**\n\t * Initialize the MCP server with necessary tools and routes\n\t */\n\tasync init() {\n\t\tif (this.initialized) return;\n\n\t\t// Pass the manager instance to the tool registration function\n\t\tregisterTaskMasterTools(this.server, this.asyncManager);\n\n\t\tthis.initialized = true;\n\n\t\treturn this;\n\t}\n\n\t/**\n\t * Start the MCP server\n\t */\n\tasync start() {\n\t\tif (!this.initialized) {\n\t\t\tawait this.init();\n\t\t}\n\n\t\tthis.server.on('connect', (event) => {\n\t\t\tevent.session.server.sendLoggingMessage({\n\t\t\t\tdata: {\n\t\t\t\t\tcontext: event.session.context,\n\t\t\t\t\tmessage: `MCP Server connected: ${event.session.name}`\n\t\t\t\t},\n\t\t\t\tlevel: 'info'\n\t\t\t});\n\t\t\tthis.registerRemoteProvider(event.session);\n\t\t});\n\n\t\t// Start the FastMCP server with increased timeout\n\t\tawait this.server.start({\n\t\t\ttransportType: 'stdio',\n\t\t\ttimeout: 120000 // 2 minutes timeout (in milliseconds)\n\t\t});\n\n\t\treturn this;\n\t}\n\n\t/**\n\t * Register both MCP providers with the provider registry\n\t */\n\tregisterRemoteProvider(session) {\n\t\t// Check if the server has at least one session\n\t\tif (session) {\n\t\t\t// Make sure session has required capabilities\n\t\t\tif (!session.clientCapabilities || !session.clientCapabilities.sampling) {\n\t\t\t\tsession.server.sendLoggingMessage({\n\t\t\t\t\tdata: {\n\t\t\t\t\t\tcontext: session.context,\n\t\t\t\t\t\tmessage: `MCP session missing required sampling capabilities, providers not registered`\n\t\t\t\t\t},\n\t\t\t\t\tlevel: 'info'\n\t\t\t\t});\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Register MCP provider with the Provider Registry\n\n\t\t\t// Register the unified MCP provider\n\t\t\tconst mcpProvider = new MCPProvider();\n\t\t\tmcpProvider.setSession(session);\n\n\t\t\t// Register provider with the registry\n\t\t\tconst providerRegistry = ProviderRegistry.getInstance();\n\t\t\tproviderRegistry.registerProvider('mcp', mcpProvider);\n\n\t\t\tsession.server.sendLoggingMessage({\n\t\t\t\tdata: {\n\t\t\t\t\tcontext: session.context,\n\t\t\t\t\tmessage: `MCP Server connected`\n\t\t\t\t},\n\t\t\t\tlevel: 'info'\n\t\t\t});\n\t\t} else {\n\t\t\tsession.server.sendLoggingMessage({\n\t\t\t\tdata: {\n\t\t\t\t\tcontext: session.context,\n\t\t\t\t\tmessage: `No MCP sessions available, providers not registered`\n\t\t\t\t},\n\t\t\t\tlevel: 'warn'\n\t\t\t});\n\t\t}\n\t}\n\n\t/**\n\t * Stop the MCP server\n\t */\n\tasync stop() {\n\t\tif (this.server) {\n\t\t\tawait this.server.stop();\n\t\t}\n\t}\n}\n\nexport default TaskMasterMCPServer;\n"], ["/claude-task-master/mcp-server/src/tools/initialize-project.js", "import { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { initializeProjectDirect } from '../core/task-master-core.js';\nimport { RULE_PROFILES } from '../../../src/constants/profiles.js';\n\nexport function registerInitializeProjectTool(server) {\n\tserver.addTool({\n\t\tname: 'initialize_project',\n\t\tdescription:\n\t\t\t'Initializes a new Task Master project structure by calling the core initialization logic. Creates necessary folders and configuration files for Task Master in the current directory.',\n\t\tparameters: z.object({\n\t\t\tskipInstall: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe(\n\t\t\t\t\t'Skip installing dependencies automatically. Never do this unless you are sure the project is already installed.'\n\t\t\t\t),\n\t\t\taddAliases: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe('Add shell aliases (tm, taskmaster) to shell config file.'),\n\t\t\tinitGit: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe('Initialize Git repository in project root.'),\n\t\t\tstoreTasksInGit: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe('Store tasks in Git (tasks.json and tasks/ directory).'),\n\t\t\tyes: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(true)\n\t\t\t\t.describe(\n\t\t\t\t\t'Skip prompts and use default values. Always set to true for MCP tools.'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.'\n\t\t\t\t),\n\t\t\trules: z\n\t\t\t\t.array(z.enum(RULE_PROFILES))\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`List of rule profiles to include at initialization. If omitted, defaults to Cursor profile only. Available options: ${RULE_PROFILES.join(', ')}`\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, context) => {\n\t\t\tconst { log } = context;\n\t\t\tconst session = context.session;\n\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing initialize_project tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst result = await initializeProjectDirect(args, log, { session });\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Initialization failed',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tconst errorMessage = `Project initialization tool failed: ${error.message || 'Unknown error'}`;\n\t\t\t\tlog.error(errorMessage, error);\n\t\t\t\treturn createErrorResponse(errorMessage, { details: error.stack });\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/tools/complexity-report.js", "/**\n * tools/complexity-report.js\n * Tool for displaying the complexity analysis report\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { complexityReportDirect } from '../core/task-master-core.js';\nimport { COMPLEXITY_REPORT_FILE } from '../../../src/constants/paths.js';\nimport { findComplexityReportPath } from '../core/utils/path-utils.js';\nimport { getCurrentTag } from '../../../scripts/modules/utils.js';\n\n/**\n * Register the complexityReport tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerComplexityReportTool(server) {\n\tserver.addTool({\n\t\tname: 'complexity_report',\n\t\tdescription: 'Display the complexity analysis report in a readable format',\n\t\tparameters: z.object({\n\t\t\tfile: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t`Path to the report file (default: ${COMPLEXITY_REPORT_FILE})`\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.')\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Getting complexity report with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst resolvedTag = getCurrentTag(args.projectRoot);\n\n\t\t\t\tconst pathArgs = {\n\t\t\t\t\tprojectRoot: args.projectRoot,\n\t\t\t\t\tcomplexityReport: args.file,\n\t\t\t\t\ttag: resolvedTag\n\t\t\t\t};\n\n\t\t\t\tconst reportPath = findComplexityReportPath(pathArgs, log);\n\t\t\t\tlog.info('Reading complexity report from path: ', reportPath);\n\n\t\t\t\tif (!reportPath) {\n\t\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t\t'No complexity report found. Run task-master analyze-complexity first.'\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\tconst result = await complexityReportDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\treportPath: reportPath\n\t\t\t\t\t},\n\t\t\t\t\tlog\n\t\t\t\t);\n\n\t\t\t\tif (result.success) {\n\t\t\t\t\tlog.info('Successfully retrieved complexity report');\n\t\t\t\t} else {\n\t\t\t\t\tlog.error(\n\t\t\t\t\t\t`Failed to retrieve complexity report: ${result.error.message}`\n\t\t\t\t\t);\n\t\t\t\t}\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error retrieving complexity report',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in complexity-report tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(\n\t\t\t\t\t`Failed to retrieve complexity report: ${error.message}`\n\t\t\t\t);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/mcp-server/src/core/direct-functions/cache-stats.js", "/**\n * cache-stats.js\n * Direct function implementation for retrieving cache statistics\n */\n\nimport { contextManager } from '../context-manager.js';\n\n/**\n * Get cache statistics for monitoring\n * @param {Object} args - Command arguments\n * @param {Object} log - Logger object\n * @returns {Object} - Cache statistics\n */\nexport async function getCacheStatsDirect(args, log) {\n\ttry {\n\t\tlog.info('Retrieving cache statistics');\n\t\tconst stats = contextManager.getStats();\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: stats\n\t\t};\n\t} catch (error) {\n\t\tlog.error(`Error getting cache stats: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'CACHE_STATS_ERROR',\n\t\t\t\tmessage: error.message || 'Unknown error occurred'\n\t\t\t}\n\t\t};\n\t}\n}\n"], ["/claude-task-master/src/profiles/kiro.js", "// Kiro profile for rule-transformer\nimport { createProfile } from './base-profile.js';\nimport fs from 'fs';\nimport path from 'path';\nimport { log } from '../../scripts/modules/utils.js';\n\n// Create and export kiro profile using the base factory\nexport const kiroProfile = createProfile({\n\tname: 'kiro',\n\tdisplayName: 'Kiro',\n\turl: 'kiro.dev',\n\tdocsUrl: 'kiro.dev/docs',\n\tprofileDir: '.kiro',\n\trulesDir: '.kiro/steering', // Kiro rules location (full path)\n\tmcpConfig: true,\n\tmcpConfigName: 'settings/mcp.json', // Create directly in settings subdirectory\n\tincludeDefaultRules: true, // Include default rules to get all the standard files\n\ttargetExtension: '.md',\n\tfileMap: {\n\t\t// Override specific mappings - the base profile will create:\n\t\t// 'rules/cursor_rules.mdc': 'kiro_rules.md'\n\t\t// 'rules/dev_workflow.mdc': 'dev_workflow.md'\n\t\t// 'rules/self_improve.mdc': 'self_improve.md'\n\t\t// 'rules/taskmaster.mdc': 'taskmaster.md'\n\t\t// We can add additional custom mappings here if needed\n\t\t'rules/taskmaster_hooks_workflow.mdc': 'taskmaster_hooks_workflow.md'\n\t},\n\tcustomReplacements: [\n\t\t// Core Kiro directory structure changes\n\t\t{ from: /\\.cursor\\/rules/g, to: '.kiro/steering' },\n\t\t{ from: /\\.cursor\\/mcp\\.json/g, to: '.kiro/settings/mcp.json' },\n\n\t\t// Fix any remaining kiro/rules references that might be created during transformation\n\t\t{ from: /\\.kiro\\/rules/g, to: '.kiro/steering' },\n\n\t\t// Essential markdown link transformations for Kiro structure\n\t\t{\n\t\t\tfrom: /\\[(.+?)\\]\\(mdc:\\.cursor\\/rules\\/(.+?)\\.mdc\\)/g,\n\t\t\tto: '[$1](.kiro/steering/$2.md)'\n\t\t},\n\n\t\t// Kiro specific terminology\n\t\t{ from: /rules directory/g, to: 'steering directory' },\n\t\t{ from: /cursor rules/gi, to: 'Kiro steering files' },\n\n\t\t// Transform frontmatter to Kiro format\n\t\t// This regex matches the entire frontmatter block and replaces it\n\t\t{\n\t\t\tfrom: /^---\\n(?:description:\\s*[^\\n]*\\n)?(?:globs:\\s*[^\\n]*\\n)?(?:alwaysApply:\\s*true\\n)?---/m,\n\t\t\tto: '---\\ninclusion: always\\n---'\n\t\t}\n\t],\n\n\t// Add lifecycle hook to copy Kiro hooks\n\tonPostConvert: (projectRoot, assetsDir) => {\n\t\tconst hooksSourceDir = path.join(assetsDir, 'kiro-hooks');\n\t\tconst hooksTargetDir = path.join(projectRoot, '.kiro', 'hooks');\n\n\t\t// Create hooks directory if it doesn't exist\n\t\tif (!fs.existsSync(hooksTargetDir)) {\n\t\t\tfs.mkdirSync(hooksTargetDir, { recursive: true });\n\t\t}\n\n\t\t// Copy all .kiro.hook files\n\t\tif (fs.existsSync(hooksSourceDir)) {\n\t\t\tconst hookFiles = fs\n\t\t\t\t.readdirSync(hooksSourceDir)\n\t\t\t\t.filter((f) => f.endsWith('.kiro.hook'));\n\n\t\t\thookFiles.forEach((file) => {\n\t\t\t\tconst sourcePath = path.join(hooksSourceDir, file);\n\t\t\t\tconst targetPath = path.join(hooksTargetDir, file);\n\n\t\t\t\tfs.copyFileSync(sourcePath, targetPath);\n\t\t\t});\n\n\t\t\tif (hookFiles.length > 0) {\n\t\t\t\tlog(\n\t\t\t\t\t'info',\n\t\t\t\t\t`[Kiro] Installed ${hookFiles.length} Taskmaster hooks in .kiro/hooks/`\n\t\t\t\t);\n\t\t\t}\n\t\t}\n\t}\n});\n"], ["/claude-task-master/mcp-server/src/custom-sdk/schema-converter.js", "/**\n * @fileoverview Schema conversion utilities for MCP AI SDK provider\n */\n\n/**\n * Convert Zod schema to human-readable JSON instructions\n * @param {import('zod').ZodSchema} schema - Zod schema object\n * @param {string} [objectName='result'] - Name of the object being generated\n * @returns {string} Instructions for JSON generation\n */\nexport function convertSchemaToInstructions(schema, objectName = 'result') {\n\ttry {\n\t\t// Generate example structure from schema\n\t\tconst exampleStructure = generateExampleFromSchema(schema);\n\n\t\treturn `\nCRITICAL JSON GENERATION INSTRUCTIONS:\n\nYou must respond with ONLY valid JSON that matches this exact structure for \"${objectName}\":\n\n${JSON.stringify(exampleStructure, null, 2)}\n\nSTRICT REQUIREMENTS:\n1. Response must start with { and end with }\n2. Use double quotes for all strings and property names\n3. Do not include any text before or after the JSON\n4. Do not wrap in markdown code blocks\n5. Do not include explanations or comments\n6. Follow the exact property names and types shown above\n7. All required fields must be present\n\nBegin your response immediately with the opening brace {`;\n\t} catch (error) {\n\t\t// Fallback to basic JSON instructions if schema parsing fails\n\t\treturn `\nCRITICAL JSON GENERATION INSTRUCTIONS:\n\nYou must respond with ONLY valid JSON for \"${objectName}\".\n\nSTRICT REQUIREMENTS:\n1. Response must start with { and end with }\n2. Use double quotes for all strings and property names \n3. Do not include any text before or after the JSON\n4. Do not wrap in markdown code blocks\n5. Do not include explanations or comments\n\nBegin your response immediately with the opening brace {`;\n\t}\n}\n\n/**\n * Generate example structure from Zod schema\n * @param {import('zod').ZodSchema} schema - Zod schema\n * @returns {any} Example object matching the schema\n */\nfunction generateExampleFromSchema(schema) {\n\t// This is a simplified schema-to-example converter\n\t// For production, you might want to use a more sophisticated library\n\n\tif (!schema || typeof schema._def === 'undefined') {\n\t\treturn {};\n\t}\n\n\tconst def = schema._def;\n\n\tswitch (def.typeName) {\n\t\tcase 'ZodObject':\n\t\t\tconst result = {};\n\t\t\tconst shape = def.shape();\n\n\t\t\tfor (const [key, fieldSchema] of Object.entries(shape)) {\n\t\t\t\tresult[key] = generateExampleFromSchema(fieldSchema);\n\t\t\t}\n\n\t\t\treturn result;\n\n\t\tcase 'ZodString':\n\t\t\treturn 'string';\n\n\t\tcase 'ZodNumber':\n\t\t\treturn 0;\n\n\t\tcase 'ZodBoolean':\n\t\t\treturn false;\n\n\t\tcase 'ZodArray':\n\t\t\tconst elementExample = generateExampleFromSchema(def.type);\n\t\t\treturn [elementExample];\n\n\t\tcase 'ZodOptional':\n\t\t\treturn generateExampleFromSchema(def.innerType);\n\n\t\tcase 'ZodNullable':\n\t\t\treturn generateExampleFromSchema(def.innerType);\n\n\t\tcase 'ZodEnum':\n\t\t\treturn def.values[0] || 'enum_value';\n\n\t\tcase 'ZodLiteral':\n\t\t\treturn def.value;\n\n\t\tcase 'ZodUnion':\n\t\t\t// Use the first option from the union\n\t\t\tif (def.options && def.options.length > 0) {\n\t\t\t\treturn generateExampleFromSchema(def.options[0]);\n\t\t\t}\n\t\t\treturn 'union_value';\n\n\t\tcase 'ZodRecord':\n\t\t\treturn {\n\t\t\t\tkey: generateExampleFromSchema(def.valueType)\n\t\t\t};\n\n\t\tdefault:\n\t\t\t// For unknown types, return a placeholder\n\t\t\treturn `<${def.typeName || 'unknown'}>`;\n\t}\n}\n\n/**\n * Enhance prompt with JSON generation instructions\n * @param {Array} prompt - AI SDK prompt array\n * @param {string} jsonInstructions - JSON generation instructions\n * @returns {Array} Enhanced prompt array\n */\nexport function enhancePromptForJSON(prompt, jsonInstructions) {\n\tconst enhancedPrompt = [...prompt];\n\n\t// Find system message or create one\n\tlet systemMessageIndex = enhancedPrompt.findIndex(\n\t\t(msg) => msg.role === 'system'\n\t);\n\n\tif (systemMessageIndex >= 0) {\n\t\t// Append to existing system message\n\t\tconst currentContent = enhancedPrompt[systemMessageIndex].content;\n\t\tenhancedPrompt[systemMessageIndex] = {\n\t\t\t...enhancedPrompt[systemMessageIndex],\n\t\t\tcontent: currentContent + '\\n\\n' + jsonInstructions\n\t\t};\n\t} else {\n\t\t// Add new system message at the beginning\n\t\tenhancedPrompt.unshift({\n\t\t\trole: 'system',\n\t\t\tcontent: jsonInstructions\n\t\t});\n\t}\n\n\treturn enhancedPrompt;\n}\n"], ["/claude-task-master/mcp-server/src/core/task-master-core.js", "/**\n * task-master-core.js\n * Central module that imports and re-exports all direct function implementations\n * for improved organization and maintainability.\n */\n\n// Import direct function implementations\nimport { listTasksDirect } from './direct-functions/list-tasks.js';\nimport { getCacheStatsDirect } from './direct-functions/cache-stats.js';\nimport { parsePRDDirect } from './direct-functions/parse-prd.js';\nimport { updateTasksDirect } from './direct-functions/update-tasks.js';\nimport { updateTaskByIdDirect } from './direct-functions/update-task-by-id.js';\nimport { updateSubtaskByIdDirect } from './direct-functions/update-subtask-by-id.js';\nimport { generateTaskFilesDirect } from './direct-functions/generate-task-files.js';\nimport { setTaskStatusDirect } from './direct-functions/set-task-status.js';\nimport { showTaskDirect } from './direct-functions/show-task.js';\nimport { nextTaskDirect } from './direct-functions/next-task.js';\nimport { expandTaskDirect } from './direct-functions/expand-task.js';\nimport { addTaskDirect } from './direct-functions/add-task.js';\nimport { addSubtaskDirect } from './direct-functions/add-subtask.js';\nimport { removeSubtaskDirect } from './direct-functions/remove-subtask.js';\nimport { analyzeTaskComplexityDirect } from './direct-functions/analyze-task-complexity.js';\nimport { clearSubtasksDirect } from './direct-functions/clear-subtasks.js';\nimport { expandAllTasksDirect } from './direct-functions/expand-all-tasks.js';\nimport { removeDependencyDirect } from './direct-functions/remove-dependency.js';\nimport { validateDependenciesDirect } from './direct-functions/validate-dependencies.js';\nimport { fixDependenciesDirect } from './direct-functions/fix-dependencies.js';\nimport { complexityReportDirect } from './direct-functions/complexity-report.js';\nimport { addDependencyDirect } from './direct-functions/add-dependency.js';\nimport { removeTaskDirect } from './direct-functions/remove-task.js';\nimport { initializeProjectDirect } from './direct-functions/initialize-project.js';\nimport { modelsDirect } from './direct-functions/models.js';\nimport { moveTaskDirect } from './direct-functions/move-task.js';\nimport { researchDirect } from './direct-functions/research.js';\nimport { addTagDirect } from './direct-functions/add-tag.js';\nimport { deleteTagDirect } from './direct-functions/delete-tag.js';\nimport { listTagsDirect } from './direct-functions/list-tags.js';\nimport { useTagDirect } from './direct-functions/use-tag.js';\nimport { renameTagDirect } from './direct-functions/rename-tag.js';\nimport { copyTagDirect } from './direct-functions/copy-tag.js';\n\n// Re-export utility functions\nexport { findTasksPath } from './utils/path-utils.js';\n\n// Use Map for potential future enhancements like introspection or dynamic dispatch\nexport const directFunctions = new Map([\n\t['listTasksDirect', listTasksDirect],\n\t['getCacheStatsDirect', getCacheStatsDirect],\n\t['parsePRDDirect', parsePRDDirect],\n\t['updateTasksDirect', updateTasksDirect],\n\t['updateTaskByIdDirect', updateTaskByIdDirect],\n\t['updateSubtaskByIdDirect', updateSubtaskByIdDirect],\n\t['generateTaskFilesDirect', generateTaskFilesDirect],\n\t['setTaskStatusDirect', setTaskStatusDirect],\n\t['showTaskDirect', showTaskDirect],\n\t['nextTaskDirect', nextTaskDirect],\n\t['expandTaskDirect', expandTaskDirect],\n\t['addTaskDirect', addTaskDirect],\n\t['addSubtaskDirect', addSubtaskDirect],\n\t['removeSubtaskDirect', removeSubtaskDirect],\n\t['analyzeTaskComplexityDirect', analyzeTaskComplexityDirect],\n\t['clearSubtasksDirect', clearSubtasksDirect],\n\t['expandAllTasksDirect', expandAllTasksDirect],\n\t['removeDependencyDirect', removeDependencyDirect],\n\t['validateDependenciesDirect', validateDependenciesDirect],\n\t['fixDependenciesDirect', fixDependenciesDirect],\n\t['complexityReportDirect', complexityReportDirect],\n\t['addDependencyDirect', addDependencyDirect],\n\t['removeTaskDirect', removeTaskDirect],\n\t['initializeProjectDirect', initializeProjectDirect],\n\t['modelsDirect', modelsDirect],\n\t['moveTaskDirect', moveTaskDirect],\n\t['researchDirect', researchDirect],\n\t['addTagDirect', addTagDirect],\n\t['deleteTagDirect', deleteTagDirect],\n\t['listTagsDirect', listTagsDirect],\n\t['useTagDirect', useTagDirect],\n\t['renameTagDirect', renameTagDirect],\n\t['copyTagDirect', copyTagDirect]\n]);\n\n// Re-export all direct function implementations\nexport {\n\tlistTasksDirect,\n\tgetCacheStatsDirect,\n\tparsePRDDirect,\n\tupdateTasksDirect,\n\tupdateTaskByIdDirect,\n\tupdateSubtaskByIdDirect,\n\tgenerateTaskFilesDirect,\n\tsetTaskStatusDirect,\n\tshowTaskDirect,\n\tnextTaskDirect,\n\texpandTaskDirect,\n\taddTaskDirect,\n\taddSubtaskDirect,\n\tremoveSubtaskDirect,\n\tanalyzeTaskComplexityDirect,\n\tclearSubtasksDirect,\n\texpandAllTasksDirect,\n\tremoveDependencyDirect,\n\tvalidateDependenciesDirect,\n\tfixDependenciesDirect,\n\tcomplexityReportDirect,\n\taddDependencyDirect,\n\tremoveTaskDirect,\n\tinitializeProjectDirect,\n\tmodelsDirect,\n\tmoveTaskDirect,\n\tresearchDirect,\n\taddTagDirect,\n\tdeleteTagDirect,\n\tlistTagsDirect,\n\tuseTagDirect,\n\trenameTagDirect,\n\tcopyTagDirect\n};\n"], ["/claude-task-master/src/profiles/roo.js", "// Roo Code conversion profile for rule-transformer\nimport path from 'path';\nimport fs from 'fs';\nimport { isSilentMode, log } from '../../scripts/modules/utils.js';\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\nimport { ROO_MODES } from '../constants/profiles.js';\n\n// Lifecycle functions for Roo profile\nfunction onAddRulesProfile(targetDir, assetsDir) {\n\t// Use the provided assets directory to find the roocode directory\n\tconst sourceDir = path.join(assetsDir, 'roocode');\n\n\tif (!fs.existsSync(sourceDir)) {\n\t\tlog('error', `[Roo] Source directory does not exist: ${sourceDir}`);\n\t\treturn;\n\t}\n\n\tcopyRecursiveSync(sourceDir, targetDir);\n\tlog('debug', `[Roo] Copied roocode directory to ${targetDir}`);\n\n\tconst rooModesDir = path.join(sourceDir, '.roo');\n\n\t// Copy .roomodes to project root\n\tconst roomodesSrc = path.join(sourceDir, '.roomodes');\n\tconst roomodesDest = path.join(targetDir, '.roomodes');\n\tif (fs.existsSync(roomodesSrc)) {\n\t\ttry {\n\t\t\tfs.copyFileSync(roomodesSrc, roomodesDest);\n\t\t\tlog('debug', `[Roo] Copied .roomodes to ${roomodesDest}`);\n\t\t} catch (err) {\n\t\t\tlog('error', `[Roo] Failed to copy .roomodes: ${err.message}`);\n\t\t}\n\t}\n\n\tfor (const mode of ROO_MODES) {\n\t\tconst src = path.join(rooModesDir, `rules-${mode}`, `${mode}-rules`);\n\t\tconst dest = path.join(targetDir, '.roo', `rules-${mode}`, `${mode}-rules`);\n\t\tif (fs.existsSync(src)) {\n\t\t\ttry {\n\t\t\t\tconst destDir = path.dirname(dest);\n\t\t\t\tif (!fs.existsSync(destDir)) fs.mkdirSync(destDir, { recursive: true });\n\t\t\t\tfs.copyFileSync(src, dest);\n\t\t\t\tlog('debug', `[Roo] Copied ${mode}-rules to ${dest}`);\n\t\t\t} catch (err) {\n\t\t\t\tlog('error', `[Roo] Failed to copy ${src} to ${dest}: ${err.message}`);\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunction copyRecursiveSync(src, dest) {\n\tconst exists = fs.existsSync(src);\n\tconst stats = exists && fs.statSync(src);\n\tconst isDirectory = exists && stats.isDirectory();\n\tif (isDirectory) {\n\t\tif (!fs.existsSync(dest)) fs.mkdirSync(dest, { recursive: true });\n\t\tfs.readdirSync(src).forEach((childItemName) => {\n\t\t\tcopyRecursiveSync(\n\t\t\t\tpath.join(src, childItemName),\n\t\t\t\tpath.join(dest, childItemName)\n\t\t\t);\n\t\t});\n\t} else {\n\t\tfs.copyFileSync(src, dest);\n\t}\n}\n\nfunction onRemoveRulesProfile(targetDir) {\n\tconst roomodesPath = path.join(targetDir, '.roomodes');\n\tif (fs.existsSync(roomodesPath)) {\n\t\ttry {\n\t\t\tfs.rmSync(roomodesPath, { force: true });\n\t\t\tlog('debug', `[Roo] Removed .roomodes from ${roomodesPath}`);\n\t\t} catch (err) {\n\t\t\tlog('error', `[Roo] Failed to remove .roomodes: ${err.message}`);\n\t\t}\n\t}\n\n\tconst rooDir = path.join(targetDir, '.roo');\n\tif (fs.existsSync(rooDir)) {\n\t\tfs.readdirSync(rooDir).forEach((entry) => {\n\t\t\tif (entry.startsWith('rules-')) {\n\t\t\t\tconst modeDir = path.join(rooDir, entry);\n\t\t\t\ttry {\n\t\t\t\t\tfs.rmSync(modeDir, { recursive: true, force: true });\n\t\t\t\t\tlog('debug', `[Roo] Removed ${entry} directory from ${modeDir}`);\n\t\t\t\t} catch (err) {\n\t\t\t\t\tlog('error', `[Roo] Failed to remove ${modeDir}: ${err.message}`);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t\tif (fs.readdirSync(rooDir).length === 0) {\n\t\t\ttry {\n\t\t\t\tfs.rmSync(rooDir, { recursive: true, force: true });\n\t\t\t\tlog('debug', `[Roo] Removed empty .roo directory from ${rooDir}`);\n\t\t\t} catch (err) {\n\t\t\t\tlog('error', `[Roo] Failed to remove .roo directory: ${err.message}`);\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunction onPostConvertRulesProfile(targetDir, assetsDir) {\n\tonAddRulesProfile(targetDir, assetsDir);\n}\n\n// Create and export roo profile using the base factory\nexport const rooProfile = createProfile({\n\tname: 'roo',\n\tdisplayName: 'Roo Code',\n\turl: 'roocode.com',\n\tdocsUrl: 'docs.roocode.com',\n\ttoolMappings: COMMON_TOOL_MAPPINGS.ROO_STYLE,\n\tonAdd: onAddRulesProfile,\n\tonRemove: onRemoveRulesProfile,\n\tonPostConvert: onPostConvertRulesProfile\n});\n\n// Export lifecycle functions separately to avoid naming conflicts\nexport { onAddRulesProfile, onRemoveRulesProfile, onPostConvertRulesProfile };\n"], ["/claude-task-master/mcp-server/src/tools/index.js", "/**\n * tools/index.js\n * Export all Task Master CLI tools for MCP server\n */\n\nimport { registerListTasksTool } from './get-tasks.js';\nimport logger from '../logger.js';\nimport { registerSetTaskStatusTool } from './set-task-status.js';\nimport { registerParsePRDTool } from './parse-prd.js';\nimport { registerUpdateTool } from './update.js';\nimport { registerUpdateTaskTool } from './update-task.js';\nimport { registerUpdateSubtaskTool } from './update-subtask.js';\nimport { registerGenerateTool } from './generate.js';\nimport { registerShowTaskTool } from './get-task.js';\nimport { registerNextTaskTool } from './next-task.js';\nimport { registerExpandTaskTool } from './expand-task.js';\nimport { registerAddTaskTool } from './add-task.js';\nimport { registerAddSubtaskTool } from './add-subtask.js';\nimport { registerRemoveSubtaskTool } from './remove-subtask.js';\nimport { registerAnalyzeProjectComplexityTool } from './analyze.js';\nimport { registerClearSubtasksTool } from './clear-subtasks.js';\nimport { registerExpandAllTool } from './expand-all.js';\nimport { registerRemoveDependencyTool } from './remove-dependency.js';\nimport { registerValidateDependenciesTool } from './validate-dependencies.js';\nimport { registerFixDependenciesTool } from './fix-dependencies.js';\nimport { registerComplexityReportTool } from './complexity-report.js';\nimport { registerAddDependencyTool } from './add-dependency.js';\nimport { registerRemoveTaskTool } from './remove-task.js';\nimport { registerInitializeProjectTool } from './initialize-project.js';\nimport { registerModelsTool } from './models.js';\nimport { registerMoveTaskTool } from './move-task.js';\nimport { registerResponseLanguageTool } from './response-language.js';\nimport { registerAddTagTool } from './add-tag.js';\nimport { registerDeleteTagTool } from './delete-tag.js';\nimport { registerListTagsTool } from './list-tags.js';\nimport { registerUseTagTool } from './use-tag.js';\nimport { registerRenameTagTool } from './rename-tag.js';\nimport { registerCopyTagTool } from './copy-tag.js';\nimport { registerResearchTool } from './research.js';\nimport { registerRulesTool } from './rules.js';\n\n/**\n * Register all Task Master tools with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerTaskMasterTools(server) {\n\ttry {\n\t\t// Register each tool in a logical workflow order\n\n\t\t// Group 1: Initialization & Setup\n\t\tregisterInitializeProjectTool(server);\n\t\tregisterModelsTool(server);\n\t\tregisterRulesTool(server);\n\t\tregisterParsePRDTool(server);\n\n\t\t// Group 2: Task Analysis & Expansion\n\t\tregisterAnalyzeProjectComplexityTool(server);\n\t\tregisterExpandTaskTool(server);\n\t\tregisterExpandAllTool(server);\n\n\t\t// Group 3: Task Listing & Viewing\n\t\tregisterListTasksTool(server);\n\t\tregisterShowTaskTool(server);\n\t\tregisterNextTaskTool(server);\n\t\tregisterComplexityReportTool(server);\n\n\t\t// Group 4: Task Status & Management\n\t\tregisterSetTaskStatusTool(server);\n\t\tregisterGenerateTool(server);\n\n\t\t// Group 5: Task Creation & Modification\n\t\tregisterAddTaskTool(server);\n\t\tregisterAddSubtaskTool(server);\n\t\tregisterUpdateTool(server);\n\t\tregisterUpdateTaskTool(server);\n\t\tregisterUpdateSubtaskTool(server);\n\t\tregisterRemoveTaskTool(server);\n\t\tregisterRemoveSubtaskTool(server);\n\t\tregisterClearSubtasksTool(server);\n\t\tregisterMoveTaskTool(server);\n\n\t\t// Group 6: Dependency Management\n\t\tregisterAddDependencyTool(server);\n\t\tregisterRemoveDependencyTool(server);\n\t\tregisterValidateDependenciesTool(server);\n\t\tregisterFixDependenciesTool(server);\n\t\tregisterResponseLanguageTool(server);\n\n\t\t// Group 7: Tag Management\n\t\tregisterListTagsTool(server);\n\t\tregisterAddTagTool(server);\n\t\tregisterDeleteTagTool(server);\n\t\tregisterUseTagTool(server);\n\t\tregisterRenameTagTool(server);\n\t\tregisterCopyTagTool(server);\n\n\t\t// Group 8: Research Features\n\t\tregisterResearchTool(server);\n\t} catch (error) {\n\t\tlogger.error(`Error registering Task Master tools: ${error.message}`);\n\t\tthrow error;\n\t}\n}\n\nexport default {\n\tregisterTaskMasterTools\n};\n"], ["/claude-task-master/mcp-server/src/tools/models.js", "/**\n * models.js\n * MCP tool for managing AI model configurations\n */\n\nimport { z } from 'zod';\nimport {\n\thandleApiResult,\n\tcreateErrorResponse,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { modelsDirect } from '../core/task-master-core.js';\n\n/**\n * Register the models tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerModelsTool(server) {\n\tserver.addTool({\n\t\tname: 'models',\n\t\tdescription:\n\t\t\t'Get information about available AI models or set model configurations. Run without arguments to get the current model configuration and API key status for the selected model providers.',\n\t\tparameters: z.object({\n\t\t\tsetMain: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Set the primary model for task generation/updates. Model provider API key is required in the MCP config ENV.'\n\t\t\t\t),\n\t\t\tsetResearch: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Set the model for research-backed operations. Model provider API key is required in the MCP config ENV.'\n\t\t\t\t),\n\t\t\tsetFallback: z\n\t\t\t\t.string()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Set the model to use if the primary fails. Model provider API key is required in the MCP config ENV.'\n\t\t\t\t),\n\t\t\tlistAvailableModels: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'List all available models not currently in use. Input/output costs values are in dollars (3 is $3.00).'\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe('The directory of the project. Must be an absolute path.'),\n\t\t\topenrouter: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom OpenRouter model.'),\n\t\t\tollama: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom Ollama model.'),\n\t\t\tbedrock: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom AWS Bedrock model.'),\n\t\t\tazure: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe('Indicates the set model ID is a custom Azure OpenAI model.'),\n\t\t\tvertex: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.describe(\n\t\t\t\t\t'Indicates the set model ID is a custom Google Vertex AI model.'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(`Starting models tool with args: ${JSON.stringify(args)}`);\n\n\t\t\t\t// Use args.projectRoot directly (guaranteed by withNormalizedProjectRoot)\n\t\t\t\tconst result = await modelsDirect(\n\t\t\t\t\t{ ...args, projectRoot: args.projectRoot },\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\n\t\t\t\treturn handleApiResult(\n\t\t\t\t\tresult,\n\t\t\t\t\tlog,\n\t\t\t\t\t'Error managing models',\n\t\t\t\t\tundefined,\n\t\t\t\t\targs.projectRoot\n\t\t\t\t);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in models tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/task-manager/task-exists.js", "/**\n * Checks if a task with the given ID exists\n * @param {Array} tasks - Array of tasks to search\n * @param {string|number} taskId - ID of task or subtask to check\n * @returns {boolean} Whether the task exists\n */\nfunction taskExists(tasks, taskId) {\n\t// Handle subtask IDs (e.g., \"1.2\")\n\tif (typeof taskId === 'string' && taskId.includes('.')) {\n\t\tconst [parentIdStr, subtaskIdStr] = taskId.split('.');\n\t\tconst parentId = parseInt(parentIdStr, 10);\n\t\tconst subtaskId = parseInt(subtaskIdStr, 10);\n\n\t\t// Find the parent task\n\t\tconst parentTask = tasks.find((t) => t.id === parentId);\n\n\t\t// If parent exists, check if subtask exists\n\t\treturn (\n\t\t\tparentTask &&\n\t\t\tparentTask.subtasks &&\n\t\t\tparentTask.subtasks.some((st) => st.id === subtaskId)\n\t\t);\n\t}\n\n\t// Handle regular task IDs\n\tconst id = parseInt(taskId, 10);\n\treturn tasks.some((t) => t.id === id);\n}\n\nexport default taskExists;\n"], ["/claude-task-master/src/ui/confirm.js", "import chalk from 'chalk';\nimport boxen from 'boxen';\n\n/**\n * Confirm removing profile rules (destructive operation)\n * @param {string[]} profiles - Array of profile names to remove\n * @returns {Promise<boolean>} - Promise resolving to true if user confirms, false otherwise\n */\nasync function confirmProfilesRemove(profiles) {\n\tconst profileList = profiles\n\t\t.map((b) => b.charAt(0).toUpperCase() + b.slice(1))\n\t\t.join(', ');\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.yellow(\n\t\t\t\t`WARNING: This will selectively remove Task Master components for: ${profileList}.\n\nWhat will be removed:\n• Task Master specific rule files (e.g., cursor_rules.mdc, taskmaster.mdc, etc.)\n• Task Master MCP server configuration (if no other MCP servers exist)\n\nWhat will be preserved:\n• Your existing custom rule files\n• Other MCP server configurations\n• The profile directory itself (unless completely empty after removal)\n\nThe .[profile] directory will only be removed if ALL of the following are true:\n• All rules in the directory were Task Master rules (no custom rules)\n• No other files or folders exist in the profile directory\n• The MCP configuration was completely removed (no other servers)\n\nAre you sure you want to proceed?`\n\t\t\t),\n\t\t\t{ padding: 1, borderColor: 'yellow', borderStyle: 'round' }\n\t\t)\n\t);\n\tconst inquirer = await import('inquirer');\n\tconst { confirm } = await inquirer.default.prompt([\n\t\t{\n\t\t\ttype: 'confirm',\n\t\t\tname: 'confirm',\n\t\t\tmessage: 'Type y to confirm selective removal, or n to abort:',\n\t\t\tdefault: false\n\t\t}\n\t]);\n\treturn confirm;\n}\n\n/**\n * Confirm removing ALL remaining profile rules (extremely critical operation)\n * @param {string[]} profiles - Array of profile names to remove\n * @param {string[]} remainingProfiles - Array of profiles that would be left after removal\n * @returns {Promise<boolean>} - Promise resolving to true if user confirms, false otherwise\n */\nasync function confirmRemoveAllRemainingProfiles(profiles, remainingProfiles) {\n\tconst profileList = profiles\n\t\t.map((p) => p.charAt(0).toUpperCase() + p.slice(1))\n\t\t.join(', ');\n\n\tconsole.log(\n\t\tboxen(\n\t\t\tchalk.red.bold(\n\t\t\t\t`⚠️ CRITICAL WARNING: REMOVING ALL TASK MASTER RULE PROFILES ⚠️\\n\\n` +\n\t\t\t\t\t`You are about to remove Task Master components for: ${profileList}\\n` +\n\t\t\t\t\t`This will leave your project with NO Task Master rule profiles remaining!\\n\\n` +\n\t\t\t\t\t`What will be removed:\\n` +\n\t\t\t\t\t`• All Task Master specific rule files\\n` +\n\t\t\t\t\t`• Task Master MCP server configurations\\n` +\n\t\t\t\t\t`• Profile directories (only if completely empty after removal)\\n\\n` +\n\t\t\t\t\t`What will be preserved:\\n` +\n\t\t\t\t\t`• Your existing custom rule files\\n` +\n\t\t\t\t\t`• Other MCP server configurations\\n` +\n\t\t\t\t\t`• Profile directories with custom content\\n\\n` +\n\t\t\t\t\t`This could impact Task Master functionality but will preserve your custom configurations.\\n\\n` +\n\t\t\t\t\t`Are you absolutely sure you want to proceed?`\n\t\t\t),\n\t\t\t{\n\t\t\t\tpadding: 1,\n\t\t\t\tborderColor: 'red',\n\t\t\t\tborderStyle: 'double',\n\t\t\t\ttitle: '🚨 CRITICAL OPERATION',\n\t\t\t\ttitleAlignment: 'center'\n\t\t\t}\n\t\t)\n\t);\n\n\tconst inquirer = await import('inquirer');\n\tconst { confirm } = await inquirer.default.prompt([\n\t\t{\n\t\t\ttype: 'confirm',\n\t\t\tname: 'confirm',\n\t\t\tmessage:\n\t\t\t\t'Type y to confirm removing ALL Task Master rule profiles, or n to abort:',\n\t\t\tdefault: false\n\t\t}\n\t]);\n\treturn confirm;\n}\n\nexport { confirmProfilesRemove, confirmRemoveAllRemainingProfiles };\n"], ["/claude-task-master/mcp-server/src/tools/rules.js", "/**\n * tools/rules.js\n * Tool to add or remove rules from a project (MCP server)\n */\n\nimport { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { rulesDirect } from '../core/direct-functions/rules.js';\nimport { RULE_PROFILES } from '../../../src/constants/profiles.js';\n\n/**\n * Register the rules tool with the MCP server\n * @param {Object} server - FastMCP server instance\n */\nexport function registerRulesTool(server) {\n\tserver.addTool({\n\t\tname: 'rules',\n\t\tdescription: 'Add or remove rule profiles from the project.',\n\t\tparameters: z.object({\n\t\t\taction: z\n\t\t\t\t.enum(['add', 'remove'])\n\t\t\t\t.describe('Whether to add or remove rule profiles.'),\n\t\t\tprofiles: z\n\t\t\t\t.array(z.enum(RULE_PROFILES))\n\t\t\t\t.min(1)\n\t\t\t\t.describe(\n\t\t\t\t\t`List of rule profiles to add or remove (e.g., [\\\"cursor\\\", \\\"roo\\\"]). Available options: ${RULE_PROFILES.join(', ')}`\n\t\t\t\t),\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The root directory of the project. Must be an absolute path.'\n\t\t\t\t),\n\t\t\tforce: z\n\t\t\t\t.boolean()\n\t\t\t\t.optional()\n\t\t\t\t.default(false)\n\t\t\t\t.describe(\n\t\t\t\t\t'DANGEROUS: Force removal even if it would leave no rule profiles. Only use if you are absolutely certain.'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`[rules tool] Executing action: ${args.action} for profiles: ${args.profiles.join(', ')} in ${args.projectRoot}`\n\t\t\t\t);\n\t\t\t\tconst result = await rulesDirect(args, log, { session });\n\t\t\t\treturn handleApiResult(result, log);\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`[rules tool] Error: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message, { details: error.stack });\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/scripts/modules/task-manager/is-task-dependent.js", "/**\n * Check if a task is dependent on another task (directly or indirectly)\n * Used to prevent circular dependencies\n * @param {Array} allTasks - Array of all tasks\n * @param {Object} task - The task to check\n * @param {number} targetTaskId - The task ID to check dependency against\n * @returns {boolean} Whether the task depends on the target task\n */\nfunction isTaskDependentOn(allTasks, task, targetTaskId) {\n\t// If the task is a subtask, check if its parent is the target\n\tif (task.parentTaskId === targetTaskId) {\n\t\treturn true;\n\t}\n\n\t// Check direct dependencies\n\tif (task.dependencies && task.dependencies.includes(targetTaskId)) {\n\t\treturn true;\n\t}\n\n\t// Check dependencies of dependencies (recursive)\n\tif (task.dependencies) {\n\t\tfor (const depId of task.dependencies) {\n\t\t\tconst depTask = allTasks.find((t) => t.id === depId);\n\t\t\tif (depTask && isTaskDependentOn(allTasks, depTask, targetTaskId)) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check subtasks for dependencies\n\tif (task.subtasks) {\n\t\tfor (const subtask of task.subtasks) {\n\t\t\tif (isTaskDependentOn(allTasks, subtask, targetTaskId)) {\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false;\n}\n\nexport default isTaskDependentOn;\n"], ["/claude-task-master/scripts/test-claude.js", "#!/usr/bin/env node\n\n/**\n * test-claude.js\n *\n * A simple test script to verify the improvements to the callClaude function.\n * This script tests different scenarios:\n * 1. Normal operation with a small PRD\n * 2. Testing with a large number of tasks (to potentially trigger task reduction)\n * 3. Simulating a failure to test retry logic\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport dotenv from 'dotenv';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n// Load environment variables from .env file\ndotenv.config();\n\n// Create a simple PRD for testing\nconst createTestPRD = (size = 'small', taskComplexity = 'simple') => {\n\tlet content = `# Test PRD - ${size.toUpperCase()} SIZE, ${taskComplexity.toUpperCase()} COMPLEXITY\\n\\n`;\n\n\t// Add more content based on size\n\tif (size === 'small') {\n\t\tcontent += `\n## Overview\nThis is a small test PRD to verify the callClaude function improvements.\n\n## Requirements\n1. Create a simple web application\n2. Implement user authentication\n3. Add a dashboard for users\n4. Create an admin panel\n5. Implement data visualization\n\n## Technical Stack\n- Frontend: React\n- Backend: Node.js\n- Database: MongoDB\n`;\n\t} else if (size === 'medium') {\n\t\t// Medium-sized PRD with more requirements\n\t\tcontent += `\n## Overview\nThis is a medium-sized test PRD to verify the callClaude function improvements.\n\n## Requirements\n1. Create a web application with multiple pages\n2. Implement user authentication with OAuth\n3. Add a dashboard for users with customizable widgets\n4. Create an admin panel with user management\n5. Implement data visualization with charts and graphs\n6. Add real-time notifications\n7. Implement a search feature\n8. Add user profile management\n9. Implement role-based access control\n10. Add a reporting system\n11. Implement file uploads and management\n12. Add a commenting system\n13. Implement a rating system\n14. Add a recommendation engine\n15. Implement a payment system\n\n## Technical Stack\n- Frontend: React with TypeScript\n- Backend: Node.js with Express\n- Database: MongoDB with Mongoose\n- Authentication: JWT and OAuth\n- Deployment: Docker and Kubernetes\n- CI/CD: GitHub Actions\n- Monitoring: Prometheus and Grafana\n`;\n\t} else if (size === 'large') {\n\t\t// Large PRD with many requirements\n\t\tcontent += `\n## Overview\nThis is a large test PRD to verify the callClaude function improvements.\n\n## Requirements\n`;\n\t\t// Generate 30 requirements\n\t\tfor (let i = 1; i <= 30; i++) {\n\t\t\tcontent += `${i}. Requirement ${i} - This is a detailed description of requirement ${i}.\\n`;\n\t\t}\n\n\t\tcontent += `\n## Technical Stack\n- Frontend: React with TypeScript\n- Backend: Node.js with Express\n- Database: MongoDB with Mongoose\n- Authentication: JWT and OAuth\n- Deployment: Docker and Kubernetes\n- CI/CD: GitHub Actions\n- Monitoring: Prometheus and Grafana\n\n## User Stories\n`;\n\t\t// Generate 20 user stories\n\t\tfor (let i = 1; i <= 20; i++) {\n\t\t\tcontent += `- As a user, I want to be able to ${i} so that I can achieve benefit ${i}.\\n`;\n\t\t}\n\n\t\tcontent += `\n## Non-Functional Requirements\n- Performance: The system should respond within 200ms\n- Scalability: The system should handle 10,000 concurrent users\n- Availability: The system should have 99.9% uptime\n- Security: The system should comply with OWASP top 10\n- Accessibility: The system should comply with WCAG 2.1 AA\n`;\n\t}\n\n\t// Add complexity if needed\n\tif (taskComplexity === 'complex') {\n\t\tcontent += `\n## Complex Requirements\n- Implement a real-time collaboration system\n- Add a machine learning-based recommendation engine\n- Implement a distributed caching system\n- Add a microservices architecture\n- Implement a custom analytics engine\n- Add support for multiple languages and locales\n- Implement a custom search engine with advanced filtering\n- Add a custom workflow engine\n- Implement a custom reporting system\n- Add a custom dashboard builder\n`;\n\t}\n\n\treturn content;\n};\n\n// Function to run the tests\nasync function runTests() {\n\tconsole.log('Starting tests for callClaude function improvements...');\n\n\ttry {\n\t\t// Instead of importing the callClaude function directly, we'll use the dev.js script\n\t\t// with our test PRDs by running it as a child process\n\n\t\t// Test 1: Small PRD, 5 tasks\n\t\tconsole.log('\\n=== Test 1: Small PRD, 5 tasks ===');\n\t\tconst smallPRD = createTestPRD('small', 'simple');\n\t\tconst smallPRDPath = path.join(__dirname, 'test-small-prd.txt');\n\t\tfs.writeFileSync(smallPRDPath, smallPRD, 'utf8');\n\n\t\tconsole.log(`Created test PRD at ${smallPRDPath}`);\n\t\tconsole.log('Running dev.js with small PRD...');\n\n\t\t// Use the child_process module to run the dev.js script\n\t\tconst { execSync } = await import('child_process');\n\n\t\ttry {\n\t\t\tconst smallResult = execSync(\n\t\t\t\t`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${smallPRDPath} --num-tasks=5`,\n\t\t\t\t{\n\t\t\t\t\tstdio: 'inherit'\n\t\t\t\t}\n\t\t\t);\n\t\t\tconsole.log('Small PRD test completed successfully');\n\t\t} catch (error) {\n\t\t\tconsole.error('Small PRD test failed:', error.message);\n\t\t}\n\n\t\t// Test 2: Medium PRD, 15 tasks\n\t\tconsole.log('\\n=== Test 2: Medium PRD, 15 tasks ===');\n\t\tconst mediumPRD = createTestPRD('medium', 'simple');\n\t\tconst mediumPRDPath = path.join(__dirname, 'test-medium-prd.txt');\n\t\tfs.writeFileSync(mediumPRDPath, mediumPRD, 'utf8');\n\n\t\tconsole.log(`Created test PRD at ${mediumPRDPath}`);\n\t\tconsole.log('Running dev.js with medium PRD...');\n\n\t\ttry {\n\t\t\tconst mediumResult = execSync(\n\t\t\t\t`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${mediumPRDPath} --num-tasks=15`,\n\t\t\t\t{\n\t\t\t\t\tstdio: 'inherit'\n\t\t\t\t}\n\t\t\t);\n\t\t\tconsole.log('Medium PRD test completed successfully');\n\t\t} catch (error) {\n\t\t\tconsole.error('Medium PRD test failed:', error.message);\n\t\t}\n\n\t\t// Test 3: Large PRD, 25 tasks\n\t\tconsole.log('\\n=== Test 3: Large PRD, 25 tasks ===');\n\t\tconst largePRD = createTestPRD('large', 'complex');\n\t\tconst largePRDPath = path.join(__dirname, 'test-large-prd.txt');\n\t\tfs.writeFileSync(largePRDPath, largePRD, 'utf8');\n\n\t\tconsole.log(`Created test PRD at ${largePRDPath}`);\n\t\tconsole.log('Running dev.js with large PRD...');\n\n\t\ttry {\n\t\t\tconst largeResult = execSync(\n\t\t\t\t`node ${path.join(__dirname, 'dev.js')} parse-prd --input=${largePRDPath} --num-tasks=25`,\n\t\t\t\t{\n\t\t\t\t\tstdio: 'inherit'\n\t\t\t\t}\n\t\t\t);\n\t\t\tconsole.log('Large PRD test completed successfully');\n\t\t} catch (error) {\n\t\t\tconsole.error('Large PRD test failed:', error.message);\n\t\t}\n\n\t\tconsole.log('\\nAll tests completed!');\n\t} catch (error) {\n\t\tconsole.error('Test failed:', error);\n\t} finally {\n\t\t// Clean up test files\n\t\tconsole.log('\\nCleaning up test files...');\n\t\tconst testFiles = [\n\t\t\tpath.join(__dirname, 'test-small-prd.txt'),\n\t\t\tpath.join(__dirname, 'test-medium-prd.txt'),\n\t\t\tpath.join(__dirname, 'test-large-prd.txt')\n\t\t];\n\n\t\ttestFiles.forEach((file) => {\n\t\t\tif (fs.existsSync(file)) {\n\t\t\t\tfs.unlinkSync(file);\n\t\t\t\tconsole.log(`Deleted ${file}`);\n\t\t\t}\n\t\t});\n\n\t\tconsole.log('Cleanup complete.');\n\t}\n}\n\n// Run the tests\nrunTests().catch((error) => {\n\tconsole.error('Error running tests:', error);\n\tprocess.exit(1);\n});\n"], ["/claude-task-master/mcp-server/src/core/context-manager.js", "/**\n * context-manager.js\n * Context and cache management for Task Master MCP Server\n */\n\nimport { FastMCP } from 'fastmcp';\nimport { LRUCache } from 'lru-cache';\n\n/**\n * Configuration options for the ContextManager\n * @typedef {Object} ContextManagerConfig\n * @property {number} maxCacheSize - Maximum number of items in the cache\n * @property {number} ttl - Time to live for cached items in milliseconds\n * @property {number} maxContextSize - Maximum size of context window in tokens\n */\n\nexport class ContextManager {\n\t/**\n\t * Create a new ContextManager instance\n\t * @param {ContextManagerConfig} config - Configuration options\n\t */\n\tconstructor(config = {}) {\n\t\tthis.config = {\n\t\t\tmaxCacheSize: config.maxCacheSize || 1000,\n\t\t\tttl: config.ttl || 1000 * 60 * 5, // 5 minutes default\n\t\t\tmaxContextSize: config.maxContextSize || 4000\n\t\t};\n\n\t\t// Initialize LRU cache for context data\n\t\tthis.cache = new LRUCache({\n\t\t\tmax: this.config.maxCacheSize,\n\t\t\tttl: this.config.ttl,\n\t\t\tupdateAgeOnGet: true\n\t\t});\n\n\t\t// Cache statistics\n\t\tthis.stats = {\n\t\t\thits: 0,\n\t\t\tmisses: 0,\n\t\t\tinvalidations: 0\n\t\t};\n\t}\n\n\t/**\n\t * Create a new context or retrieve from cache\n\t * @param {string} contextId - Unique identifier for the context\n\t * @param {Object} metadata - Additional metadata for the context\n\t * @returns {Object} Context object with metadata\n\t */\n\tasync getContext(contextId, metadata = {}) {\n\t\tconst cacheKey = this._getCacheKey(contextId, metadata);\n\n\t\t// Try to get from cache first\n\t\tconst cached = this.cache.get(cacheKey);\n\t\tif (cached) {\n\t\t\tthis.stats.hits++;\n\t\t\treturn cached;\n\t\t}\n\n\t\tthis.stats.misses++;\n\n\t\t// Create new context if not in cache\n\t\tconst context = {\n\t\t\tid: contextId,\n\t\t\tmetadata: {\n\t\t\t\t...metadata,\n\t\t\t\tcreated: new Date().toISOString()\n\t\t\t}\n\t\t};\n\n\t\t// Cache the new context\n\t\tthis.cache.set(cacheKey, context);\n\n\t\treturn context;\n\t}\n\n\t/**\n\t * Update an existing context\n\t * @param {string} contextId - Context identifier\n\t * @param {Object} updates - Updates to apply to the context\n\t * @returns {Object} Updated context\n\t */\n\tasync updateContext(contextId, updates) {\n\t\tconst context = await this.getContext(contextId);\n\n\t\t// Apply updates to context\n\t\tObject.assign(context.metadata, updates);\n\n\t\t// Update cache\n\t\tconst cacheKey = this._getCacheKey(contextId, context.metadata);\n\t\tthis.cache.set(cacheKey, context);\n\n\t\treturn context;\n\t}\n\n\t/**\n\t * Invalidate a context in the cache\n\t * @param {string} contextId - Context identifier\n\t * @param {Object} metadata - Metadata used in the cache key\n\t */\n\tinvalidateContext(contextId, metadata = {}) {\n\t\tconst cacheKey = this._getCacheKey(contextId, metadata);\n\t\tthis.cache.delete(cacheKey);\n\t\tthis.stats.invalidations++;\n\t}\n\n\t/**\n\t * Get cached data associated with a specific key.\n\t * Increments cache hit stats if found.\n\t * @param {string} key - The cache key.\n\t * @returns {any | undefined} The cached data or undefined if not found/expired.\n\t */\n\tgetCachedData(key) {\n\t\tconst cached = this.cache.get(key);\n\t\tif (cached !== undefined) {\n\t\t\t// Check for undefined specifically, as null/false might be valid cached values\n\t\t\tthis.stats.hits++;\n\t\t\treturn cached;\n\t\t}\n\t\tthis.stats.misses++;\n\t\treturn undefined;\n\t}\n\n\t/**\n\t * Set data in the cache with a specific key.\n\t * @param {string} key - The cache key.\n\t * @param {any} data - The data to cache.\n\t */\n\tsetCachedData(key, data) {\n\t\tthis.cache.set(key, data);\n\t}\n\n\t/**\n\t * Invalidate a specific cache key.\n\t * Increments invalidation stats.\n\t * @param {string} key - The cache key to invalidate.\n\t */\n\tinvalidateCacheKey(key) {\n\t\tthis.cache.delete(key);\n\t\tthis.stats.invalidations++;\n\t}\n\n\t/**\n\t * Get cache statistics\n\t * @returns {Object} Cache statistics\n\t */\n\tgetStats() {\n\t\treturn {\n\t\t\thits: this.stats.hits,\n\t\t\tmisses: this.stats.misses,\n\t\t\tinvalidations: this.stats.invalidations,\n\t\t\tsize: this.cache.size,\n\t\t\tmaxSize: this.config.maxCacheSize,\n\t\t\tttl: this.config.ttl\n\t\t};\n\t}\n\n\t/**\n\t * Generate a cache key from context ID and metadata\n\t * @private\n\t * @deprecated No longer used for direct cache key generation outside the manager.\n\t * Prefer generating specific keys in calling functions.\n\t */\n\t_getCacheKey(contextId, metadata) {\n\t\t// Kept for potential backward compatibility or internal use if needed later.\n\t\treturn `${contextId}:${JSON.stringify(metadata)}`;\n\t}\n}\n\n// Export a singleton instance with default config\nexport const contextManager = new ContextManager();\n"], ["/claude-task-master/mcp-server/src/custom-sdk/errors.js", "/**\n * src/ai-providers/custom-sdk/mcp/errors.js\n *\n * Error handling utilities for MCP AI SDK provider.\n * Maps MCP errors to AI SDK compatible error types.\n */\n\n/**\n * MCP-specific error class\n */\nexport class MCPError extends Error {\n\tconstructor(message, options = {}) {\n\t\tsuper(message);\n\t\tthis.name = 'MCPError';\n\t\tthis.code = options.code;\n\t\tthis.cause = options.cause;\n\t\tthis.mcpResponse = options.mcpResponse;\n\t}\n}\n\n/**\n * Session-related error\n */\nexport class MCPSessionError extends MCPError {\n\tconstructor(message, options = {}) {\n\t\tsuper(message, options);\n\t\tthis.name = 'MCPSessionError';\n\t}\n}\n\n/**\n * Sampling-related error\n */\nexport class MCPSamplingError extends MCPError {\n\tconstructor(message, options = {}) {\n\t\tsuper(message, options);\n\t\tthis.name = 'MCPSamplingError';\n\t}\n}\n\n/**\n * Map MCP errors to AI SDK compatible error types\n * @param {Error} error - Original error\n * @returns {Error} Mapped error\n */\nexport function mapMCPError(error) {\n\t// If already an MCP error, return as-is\n\tif (error instanceof MCPError) {\n\t\treturn error;\n\t}\n\n\tconst message = error.message || 'Unknown MCP error';\n\tconst originalError = error;\n\n\t// Map common error patterns\n\tif (message.includes('session') || message.includes('connection')) {\n\t\treturn new MCPSessionError(message, {\n\t\t\tcause: originalError,\n\t\t\tcode: 'SESSION_ERROR'\n\t\t});\n\t}\n\n\tif (message.includes('sampling') || message.includes('timeout')) {\n\t\treturn new MCPSamplingError(message, {\n\t\t\tcause: originalError,\n\t\t\tcode: 'SAMPLING_ERROR'\n\t\t});\n\t}\n\n\tif (message.includes('capabilities') || message.includes('not supported')) {\n\t\treturn new MCPSessionError(message, {\n\t\t\tcause: originalError,\n\t\t\tcode: 'CAPABILITY_ERROR'\n\t\t});\n\t}\n\n\t// Default to generic MCP error\n\treturn new MCPError(message, {\n\t\tcause: originalError,\n\t\tcode: 'UNKNOWN_ERROR'\n\t});\n}\n\n/**\n * Check if error is retryable\n * @param {Error} error - Error to check\n * @returns {boolean} True if error might be retryable\n */\nexport function isRetryableError(error) {\n\tif (error instanceof MCPSamplingError && error.code === 'SAMPLING_ERROR') {\n\t\treturn true;\n\t}\n\n\tif (error instanceof MCPSessionError && error.code === 'SESSION_ERROR') {\n\t\t// Session errors are generally not retryable\n\t\treturn false;\n\t}\n\n\t// Check for common retryable patterns\n\tconst message = error.message?.toLowerCase() || '';\n\treturn (\n\t\tmessage.includes('timeout') ||\n\t\tmessage.includes('network') ||\n\t\tmessage.includes('temporary')\n\t);\n}\n"], ["/claude-task-master/mcp-server/src/custom-sdk/message-converter.js", "/**\n * src/ai-providers/custom-sdk/mcp/message-converter.js\n *\n * Message conversion utilities for converting between AI SDK prompt format\n * and MCP sampling format.\n */\n\n/**\n * Convert AI SDK prompt format to MCP sampling format\n * @param {Array} prompt - AI SDK prompt array\n * @returns {object} MCP format with messages and systemPrompt\n */\nexport function convertToMCPFormat(prompt) {\n\tconst messages = [];\n\tlet systemPrompt = '';\n\n\tfor (const message of prompt) {\n\t\tif (message.role === 'system') {\n\t\t\t// Extract system prompt\n\t\t\tsystemPrompt = extractTextContent(message.content);\n\t\t} else if (message.role === 'user' || message.role === 'assistant') {\n\t\t\t// Convert user/assistant messages\n\t\t\tmessages.push({\n\t\t\t\trole: message.role,\n\t\t\t\tcontent: {\n\t\t\t\t\ttype: 'text',\n\t\t\t\t\ttext: extractTextContent(message.content)\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t}\n\n\treturn {\n\t\tmessages,\n\t\tsystemPrompt\n\t};\n}\n\n/**\n * Convert MCP response format to AI SDK format\n * @param {object} response - MCP sampling response\n * @returns {object} AI SDK compatible result\n */\nexport function convertFromMCPFormat(response) {\n\t// Handle different possible response formats\n\tlet text = '';\n\tlet usage = null;\n\tlet finishReason = 'stop';\n\tlet warnings = [];\n\n\tif (typeof response === 'string') {\n\t\ttext = response;\n\t} else if (response.content) {\n\t\ttext = extractTextContent(response.content);\n\t\tusage = response.usage;\n\t\tfinishReason = response.finishReason || 'stop';\n\t} else if (response.text) {\n\t\ttext = response.text;\n\t\tusage = response.usage;\n\t\tfinishReason = response.finishReason || 'stop';\n\t} else {\n\t\t// Fallback: try to extract text from response\n\t\ttext = JSON.stringify(response);\n\t\twarnings.push('Unexpected MCP response format, used JSON fallback');\n\t}\n\n\treturn {\n\t\ttext,\n\t\tusage,\n\t\tfinishReason,\n\t\twarnings\n\t};\n}\n\n/**\n * Extract text content from various content formats\n * @param {string|Array|object} content - Content in various formats\n * @returns {string} Extracted text\n */\nfunction extractTextContent(content) {\n\tif (typeof content === 'string') {\n\t\treturn content;\n\t}\n\n\tif (Array.isArray(content)) {\n\t\t// Handle array of content parts\n\t\treturn content\n\t\t\t.map((part) => {\n\t\t\t\tif (typeof part === 'string') {\n\t\t\t\t\treturn part;\n\t\t\t\t}\n\t\t\t\tif (part.type === 'text' && part.text) {\n\t\t\t\t\treturn part.text;\n\t\t\t\t}\n\t\t\t\tif (part.text) {\n\t\t\t\t\treturn part.text;\n\t\t\t\t}\n\t\t\t\t// Skip non-text content (images, etc.)\n\t\t\t\treturn '';\n\t\t\t})\n\t\t\t.filter((text) => text.length > 0)\n\t\t\t.join(' ');\n\t}\n\n\tif (content && typeof content === 'object') {\n\t\tif (content.type === 'text' && content.text) {\n\t\t\treturn content.text;\n\t\t}\n\t\tif (content.text) {\n\t\t\treturn content.text;\n\t\t}\n\t}\n\n\t// Fallback\n\treturn String(content || '');\n}\n"], ["/claude-task-master/mcp-server/src/custom-sdk/json-extractor.js", "/**\n * @fileoverview Extract JSON from MCP response, handling markdown blocks and other formatting\n */\n\n/**\n * Extract JSON from MCP AI response\n * @param {string} text - The text to extract JSON from\n * @returns {string} - The extracted JSON string\n */\nexport function extractJson(text) {\n\t// Remove markdown code blocks if present\n\tlet jsonText = text.trim();\n\n\t// Remove ```json blocks\n\tjsonText = jsonText.replace(/^```json\\s*/gm, '');\n\tjsonText = jsonText.replace(/^```\\s*/gm, '');\n\tjsonText = jsonText.replace(/```\\s*$/gm, '');\n\n\t// Remove common TypeScript/JavaScript patterns\n\tjsonText = jsonText.replace(/^const\\s+\\w+\\s*=\\s*/, ''); // Remove \"const varName = \"\n\tjsonText = jsonText.replace(/^let\\s+\\w+\\s*=\\s*/, ''); // Remove \"let varName = \"\n\tjsonText = jsonText.replace(/^var\\s+\\w+\\s*=\\s*/, ''); // Remove \"var varName = \"\n\tjsonText = jsonText.replace(/;?\\s*$/, ''); // Remove trailing semicolons\n\n\t// Remove explanatory text before JSON (common with AI responses)\n\tjsonText = jsonText.replace(/^.*?(?=\\{|\\[)/s, '');\n\n\t// Remove explanatory text after JSON\n\tconst lines = jsonText.split('\\n');\n\tlet jsonEndIndex = -1;\n\tlet braceCount = 0;\n\tlet inString = false;\n\tlet escapeNext = false;\n\n\t// Find the end of the JSON by tracking braces\n\tfor (let i = 0; i < jsonText.length; i++) {\n\t\tconst char = jsonText[i];\n\n\t\tif (escapeNext) {\n\t\t\tescapeNext = false;\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (char === '\\\\') {\n\t\t\tescapeNext = true;\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (char === '\"' && !escapeNext) {\n\t\t\tinString = !inString;\n\t\t\tcontinue;\n\t\t}\n\n\t\tif (!inString) {\n\t\t\tif (char === '{' || char === '[') {\n\t\t\t\tbraceCount++;\n\t\t\t} else if (char === '}' || char === ']') {\n\t\t\t\tbraceCount--;\n\t\t\t\tif (braceCount === 0) {\n\t\t\t\t\tjsonEndIndex = i;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif (jsonEndIndex > -1) {\n\t\tjsonText = jsonText.substring(0, jsonEndIndex + 1);\n\t}\n\n\t// Try to extract JSON object or array if previous method didn't work\n\tif (jsonEndIndex === -1) {\n\t\tconst objectMatch = jsonText.match(/{[\\s\\S]*}/);\n\t\tconst arrayMatch = jsonText.match(/\\[[\\s\\S]*\\]/);\n\n\t\tif (objectMatch) {\n\t\t\tjsonText = objectMatch[0];\n\t\t} else if (arrayMatch) {\n\t\t\tjsonText = arrayMatch[0];\n\t\t}\n\t}\n\n\t// First try to parse as valid JSON\n\ttry {\n\t\tJSON.parse(jsonText);\n\t\treturn jsonText;\n\t} catch {\n\t\t// If it's not valid JSON, it might be a JavaScript object literal\n\t\t// Try to convert it to valid JSON\n\t\ttry {\n\t\t\t// This is a simple conversion that handles basic cases\n\t\t\t// Replace unquoted keys with quoted keys\n\t\t\tconst converted = jsonText\n\t\t\t\t.replace(/([{,]\\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\\s*:/g, '$1\"$2\":')\n\t\t\t\t// Replace single quotes with double quotes\n\t\t\t\t.replace(/'/g, '\"')\n\t\t\t\t// Handle trailing commas\n\t\t\t\t.replace(/,\\s*([}\\]])/g, '$1');\n\n\t\t\t// Validate the converted JSON\n\t\t\tJSON.parse(converted);\n\t\t\treturn converted;\n\t\t} catch {\n\t\t\t// If all else fails, return the original text\n\t\t\t// The calling code will handle the error appropriately\n\t\t\treturn text;\n\t\t}\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/tools/response-language.js", "import { z } from 'zod';\nimport {\n\tcreateErrorResponse,\n\thandleApiResult,\n\twithNormalizedProjectRoot\n} from './utils.js';\nimport { responseLanguageDirect } from '../core/direct-functions/response-language.js';\n\nexport function registerResponseLanguageTool(server) {\n\tserver.addTool({\n\t\tname: 'response-language',\n\t\tdescription: 'Get or set the response language for the project',\n\t\tparameters: z.object({\n\t\t\tprojectRoot: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The root directory for the project. ALWAYS SET THIS TO THE PROJECT ROOT DIRECTORY. IF NOT SET, THE TOOL WILL NOT WORK.'\n\t\t\t\t),\n\t\t\tlanguage: z\n\t\t\t\t.string()\n\t\t\t\t.describe(\n\t\t\t\t\t'The new response language to set. like \"中文\" \"English\" or \"español\".'\n\t\t\t\t)\n\t\t}),\n\t\texecute: withNormalizedProjectRoot(async (args, { log, session }) => {\n\t\t\ttry {\n\t\t\t\tlog.info(\n\t\t\t\t\t`Executing response-language tool with args: ${JSON.stringify(args)}`\n\t\t\t\t);\n\n\t\t\t\tconst result = await responseLanguageDirect(\n\t\t\t\t\t{\n\t\t\t\t\t\t...args,\n\t\t\t\t\t\tprojectRoot: args.projectRoot\n\t\t\t\t\t},\n\t\t\t\t\tlog,\n\t\t\t\t\t{ session }\n\t\t\t\t);\n\t\t\t\treturn handleApiResult(result, log, 'Error setting response language');\n\t\t\t} catch (error) {\n\t\t\t\tlog.error(`Error in response-language tool: ${error.message}`);\n\t\t\t\treturn createErrorResponse(error.message);\n\t\t\t}\n\t\t})\n\t});\n}\n"], ["/claude-task-master/test-version-check-full.js", "import {\n\tcheckForUpdate,\n\tdisplayUpgradeNotification,\n\tcompareVersions\n} from './scripts/modules/commands.js';\nimport fs from 'fs';\nimport path from 'path';\n\n// Force our current version for testing\nprocess.env.FORCE_VERSION = '0.9.30';\n\n// Create a mock package.json in memory for testing\nconst mockPackageJson = {\n\tname: 'task-master-ai',\n\tversion: '0.9.30'\n};\n\n// Modified version of checkForUpdate that doesn't use HTTP for testing\nasync function testCheckForUpdate(simulatedLatestVersion) {\n\t// Get current version - use our forced version\n\tconst currentVersion = process.env.FORCE_VERSION || '0.9.30';\n\n\tconsole.log(`Using simulated current version: ${currentVersion}`);\n\tconsole.log(`Using simulated latest version: ${simulatedLatestVersion}`);\n\n\t// Compare versions\n\tconst needsUpdate =\n\t\tcompareVersions(currentVersion, simulatedLatestVersion) < 0;\n\n\treturn {\n\t\tcurrentVersion,\n\t\tlatestVersion: simulatedLatestVersion,\n\t\tneedsUpdate\n\t};\n}\n\n// Test with current version older than latest (should show update notice)\nasync function runTest() {\n\tconsole.log('=== Testing version check scenarios ===\\n');\n\n\t// Scenario 1: Update available\n\tconsole.log(\n\t\t'\\n--- Scenario 1: Update available (Current: 0.9.30, Latest: 1.0.0) ---'\n\t);\n\tconst updateInfo1 = await testCheckForUpdate('1.0.0');\n\tconsole.log('Update check results:');\n\tconsole.log(`- Current version: ${updateInfo1.currentVersion}`);\n\tconsole.log(`- Latest version: ${updateInfo1.latestVersion}`);\n\tconsole.log(`- Update needed: ${updateInfo1.needsUpdate}`);\n\n\tif (updateInfo1.needsUpdate) {\n\t\tconsole.log('\\nDisplaying upgrade notification:');\n\t\tdisplayUpgradeNotification(\n\t\t\tupdateInfo1.currentVersion,\n\t\t\tupdateInfo1.latestVersion\n\t\t);\n\t}\n\n\t// Scenario 2: No update needed (versions equal)\n\tconsole.log(\n\t\t'\\n--- Scenario 2: No update needed (Current: 0.9.30, Latest: 0.9.30) ---'\n\t);\n\tconst updateInfo2 = await testCheckForUpdate('0.9.30');\n\tconsole.log('Update check results:');\n\tconsole.log(`- Current version: ${updateInfo2.currentVersion}`);\n\tconsole.log(`- Latest version: ${updateInfo2.latestVersion}`);\n\tconsole.log(`- Update needed: ${updateInfo2.needsUpdate}`);\n\n\t// Scenario 3: Development version (current newer than latest)\n\tconsole.log(\n\t\t'\\n--- Scenario 3: Development version (Current: 0.9.30, Latest: 0.9.0) ---'\n\t);\n\tconst updateInfo3 = await testCheckForUpdate('0.9.0');\n\tconsole.log('Update check results:');\n\tconsole.log(`- Current version: ${updateInfo3.currentVersion}`);\n\tconsole.log(`- Latest version: ${updateInfo3.latestVersion}`);\n\tconsole.log(`- Update needed: ${updateInfo3.needsUpdate}`);\n\n\tconsole.log('\\n=== Test complete ===');\n}\n\n// Run all tests\nrunTest();\n"], ["/claude-task-master/mcp-server/src/providers/mcp-provider.js", "/**\n * mcp-server/src/providers/mcp-provider.js\n *\n * Implementation for MCP custom AI SDK provider that integrates with\n * the existing MCP server infrastructure and provider registry.\n * Follows the Claude Code provider pattern for session-based providers.\n */\n\nimport { createMCP } from '../custom-sdk/index.js';\nimport { BaseAIProvider } from '../../../src/ai-providers/base-provider.js';\n\nexport class MCPProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'mcp';\n\t\tthis.session = null; // MCP server session object\n\t}\n\n\tgetRequiredApiKeyName() {\n\t\treturn 'MCP_API_KEY';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Override validateAuth to validate MCP session instead of API key\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Validate MCP session instead of API key\n\t\tif (!this.session) {\n\t\t\tthrow new Error('MCP Provider requires active MCP session');\n\t\t}\n\n\t\tif (!this.session.clientCapabilities?.sampling) {\n\t\t\tthrow new Error('MCP session must have client sampling capabilities');\n\t\t}\n\t}\n\n\t/**\n\t * Creates and returns an MCP AI SDK client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @returns {Function} MCP AI SDK client function\n\t * @throws {Error} If initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\t// Pass MCP session to AI SDK implementation\n\t\t\treturn createMCP({\n\t\t\t\tsession: this.session,\n\t\t\t\tdefaultSettings: {\n\t\t\t\t\ttemperature: params.temperature,\n\t\t\t\t\tmaxTokens: params.maxTokens\n\t\t\t\t}\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\t/**\n\t * Method called by MCP server on connect events\n\t * @param {object} session - MCP session object\n\t */\n\tsetSession(session) {\n\t\tthis.session = session;\n\n\t\tif (!session) {\n\t\t\tthis.logger?.warn('Set null session on MCP Provider');\n\t\t} else {\n\t\t\tthis.logger?.debug('Updated MCP Provider session');\n\t\t}\n\t}\n\n\t/**\n\t * Get current session status\n\t * @returns {boolean} True if session is available and valid\n\t */\n\thasValidSession() {\n\t\treturn !!(this.session && this.session.clientCapabilities?.sampling);\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/anthropic.js", "/**\n * src/ai-providers/anthropic.js\n *\n * Implementation for interacting with Anthropic models (e.g., Claude)\n * using the Vercel AI SDK.\n */\n\nimport { createAnthropic } from '@ai-sdk/anthropic';\nimport { BaseAIProvider } from './base-provider.js';\n\n// TODO: Implement standardized functions for generateText, streamText, generateObject\n\n// --- Client Instantiation ---\n// Note: API key resolution should ideally happen closer to the call site\n// using the config manager/resolver which checks process.env and session.env.\n// This is a placeholder for basic functionality.\n// Remove the global variable and caching logic\n// let anthropicClient;\n\nexport class AnthropicAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Anthropic';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Anthropic API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'ANTHROPIC_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an Anthropic client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Anthropic API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Anthropic client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Anthropic API key is required.');\n\t\t\t}\n\n\t\t\treturn createAnthropic({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL }),\n\t\t\t\theaders: {\n\t\t\t\t\t'anthropic-beta': 'output-128k-2025-02-19'\n\t\t\t\t}\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n\n// TODO: Implement streamAnthropicObject if needed and supported well by the SDK for Anthropic.\n// The basic structure would be similar to generateAnthropicObject but using streamObject.\n"], ["/claude-task-master/scripts/modules/update-config-tokens.js", "/**\n * update-config-tokens.js\n * Updates config.json with correct maxTokens values from supported-models.json\n */\n\nimport fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { dirname } from 'path';\n\nconst __filename = fileURLToPath(import.meta.url);\nconst __dirname = dirname(__filename);\n\n/**\n * Updates the config file with correct maxTokens values from supported-models.json\n * @param {string} configPath - Path to the config.json file to update\n * @returns {boolean} True if successful, false otherwise\n */\nexport function updateConfigMaxTokens(configPath) {\n\ttry {\n\t\t// Load supported models\n\t\tconst supportedModelsPath = path.join(__dirname, 'supported-models.json');\n\t\tconst supportedModels = JSON.parse(\n\t\t\tfs.readFileSync(supportedModelsPath, 'utf-8')\n\t\t);\n\n\t\t// Load config\n\t\tconst config = JSON.parse(fs.readFileSync(configPath, 'utf-8'));\n\n\t\t// Update each role's maxTokens if the model exists in supported-models.json\n\t\tconst roles = ['main', 'research', 'fallback'];\n\n\t\tfor (const role of roles) {\n\t\t\tif (config.models && config.models[role]) {\n\t\t\t\tconst provider = config.models[role].provider;\n\t\t\t\tconst modelId = config.models[role].modelId;\n\n\t\t\t\t// Find the model in supported models\n\t\t\t\tif (supportedModels[provider]) {\n\t\t\t\t\tconst modelData = supportedModels[provider].find(\n\t\t\t\t\t\t(m) => m.id === modelId\n\t\t\t\t\t);\n\t\t\t\t\tif (modelData && modelData.max_tokens) {\n\t\t\t\t\t\tconfig.models[role].maxTokens = modelData.max_tokens;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Write back the updated config\n\t\tfs.writeFileSync(configPath, JSON.stringify(config, null, 2));\n\t\treturn true;\n\t} catch (error) {\n\t\tconsole.error('Error updating config maxTokens:', error.message);\n\t\treturn false;\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/custom-sdk/index.js", "/**\n * src/ai-providers/custom-sdk/mcp/index.js\n *\n * AI SDK factory function for MCP provider.\n * Creates MCP language model instances with session-based AI operations.\n */\n\nimport { MCPLanguageModel } from './language-model.js';\n\n/**\n * Create MCP provider factory function following AI SDK patterns\n * @param {object} options - Provider options\n * @param {object} options.session - MCP session object\n * @param {object} options.defaultSettings - Default settings for the provider\n * @returns {Function} Provider factory function\n */\nexport function createMCP(options = {}) {\n\tif (!options.session) {\n\t\tthrow new Error('MCP provider requires session object');\n\t}\n\n\t// Return the provider factory function that AI SDK expects\n\tconst provider = function (modelId, settings = {}) {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\n\t\t\t\t'The MCP model function cannot be called with the new keyword.'\n\t\t\t);\n\t\t}\n\n\t\treturn new MCPLanguageModel({\n\t\t\tsession: options.session,\n\t\t\tmodelId: modelId || 'claude-3-5-sonnet-20241022',\n\t\t\tsettings: {\n\t\t\t\ttemperature: settings.temperature,\n\t\t\t\tmaxTokens: settings.maxTokens,\n\t\t\t\t...options.defaultSettings,\n\t\t\t\t...settings\n\t\t\t}\n\t\t});\n\t};\n\n\t// Add required methods for AI SDK compatibility\n\tprovider.languageModel = (modelId, settings) => provider(modelId, settings);\n\tprovider.chat = (modelId, settings) => provider(modelId, settings);\n\n\treturn provider;\n}\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/index.js", "/**\n * @fileoverview Claude Code provider factory and exports\n */\n\nimport { NoSuchModelError } from '@ai-sdk/provider';\nimport { ClaudeCodeLanguageModel } from './language-model.js';\n\n/**\n * @typedef {import('./types.js').ClaudeCodeSettings} ClaudeCodeSettings\n * @typedef {import('./types.js').ClaudeCodeModelId} ClaudeCodeModelId\n * @typedef {import('./types.js').ClaudeCodeProvider} ClaudeCodeProvider\n * @typedef {import('./types.js').ClaudeCodeProviderSettings} ClaudeCodeProviderSettings\n */\n\n/**\n * Create a Claude Code provider using the official SDK\n * @param {ClaudeCodeProviderSettings} [options={}] - Provider configuration options\n * @returns {ClaudeCodeProvider} Claude Code provider instance\n */\nexport function createClaudeCode(options = {}) {\n\t/**\n\t * Create a language model instance\n\t * @param {ClaudeCodeModelId} modelId - Model ID\n\t * @param {ClaudeCodeSettings} [settings={}] - Model settings\n\t * @returns {ClaudeCodeLanguageModel}\n\t */\n\tconst createModel = (modelId, settings = {}) => {\n\t\treturn new ClaudeCodeLanguageModel({\n\t\t\tid: modelId,\n\t\t\tsettings: {\n\t\t\t\t...options.defaultSettings,\n\t\t\t\t...settings\n\t\t\t}\n\t\t});\n\t};\n\n\t/**\n\t * Provider function\n\t * @param {ClaudeCodeModelId} modelId - Model ID\n\t * @param {ClaudeCodeSettings} [settings] - Model settings\n\t * @returns {ClaudeCodeLanguageModel}\n\t */\n\tconst provider = function (modelId, settings) {\n\t\tif (new.target) {\n\t\t\tthrow new Error(\n\t\t\t\t'The Claude Code model function cannot be called with the new keyword.'\n\t\t\t);\n\t\t}\n\n\t\treturn createModel(modelId, settings);\n\t};\n\n\tprovider.languageModel = createModel;\n\tprovider.chat = createModel; // Alias for languageModel\n\n\t// Add textEmbeddingModel method that throws NoSuchModelError\n\tprovider.textEmbeddingModel = (modelId) => {\n\t\tthrow new NoSuchModelError({\n\t\t\tmodelId,\n\t\t\tmodelType: 'textEmbeddingModel'\n\t\t});\n\t};\n\n\treturn /** @type {ClaudeCodeProvider} */ (provider);\n}\n\n/**\n * Default Claude Code provider instance\n */\nexport const claudeCode = createClaudeCode();\n\n// Provider exports\nexport { ClaudeCodeLanguageModel } from './language-model.js';\n\n// Error handling exports\nexport {\n\tisAuthenticationError,\n\tisTimeoutError,\n\tgetErrorMetadata,\n\tcreateAPICallError,\n\tcreateAuthenticationError,\n\tcreateTimeoutError\n} from './errors.js';\n"], ["/claude-task-master/src/utils/logger-utils.js", "/**\n * Logger utility functions for Task Master\n * Provides standardized logging patterns for both CLI and utility contexts\n */\n\nimport { log as utilLog } from '../../scripts/modules/utils.js';\n\n/**\n * Creates a standard logger object that wraps the utility log function\n * This provides a consistent logger interface across different parts of the application\n * @returns {Object} A logger object with standard logging methods (info, warn, error, debug, success)\n */\nexport function createStandardLogger() {\n\treturn {\n\t\tinfo: (msg, ...args) => utilLog('info', msg, ...args),\n\t\twarn: (msg, ...args) => utilLog('warn', msg, ...args),\n\t\terror: (msg, ...args) => utilLog('error', msg, ...args),\n\t\tdebug: (msg, ...args) => utilLog('debug', msg, ...args),\n\t\tsuccess: (msg, ...args) => utilLog('success', msg, ...args)\n\t};\n}\n\n/**\n * Creates a logger using either the provided logger or a default standard logger\n * This is the recommended pattern for functions that accept an optional logger parameter\n * @param {Object|null} providedLogger - Optional logger object passed from caller\n * @returns {Object} A logger object with standard logging methods\n */\nexport function getLoggerOrDefault(providedLogger = null) {\n\treturn providedLogger || createStandardLogger();\n}\n"], ["/claude-task-master/mcp-test.js", "#!/usr/bin/env node\n\nimport { Config } from 'fastmcp';\nimport path from 'path';\nimport fs from 'fs';\n\n// Log the current directory\nconsole.error(`Current working directory: ${process.cwd()}`);\n\ntry {\n\tconsole.error('Attempting to load FastMCP Config...');\n\n\t// Check if .cursor/mcp.json exists\n\tconst mcpPath = path.join(process.cwd(), '.cursor', 'mcp.json');\n\tconsole.error(`Checking if mcp.json exists at: ${mcpPath}`);\n\n\tif (fs.existsSync(mcpPath)) {\n\t\tconsole.error('mcp.json file found');\n\t\tconsole.error(\n\t\t\t`File content: ${JSON.stringify(JSON.parse(fs.readFileSync(mcpPath, 'utf8')), null, 2)}`\n\t\t);\n\t} else {\n\t\tconsole.error('mcp.json file not found');\n\t}\n\n\t// Try to create Config\n\tconst config = new Config();\n\tconsole.error('Config created successfully');\n\n\t// Check if env property exists\n\tif (config.env) {\n\t\tconsole.error(\n\t\t\t`Config.env exists with keys: ${Object.keys(config.env).join(', ')}`\n\t\t);\n\n\t\t// Print each env var value (careful with sensitive values)\n\t\tfor (const [key, value] of Object.entries(config.env)) {\n\t\t\tif (key.includes('KEY')) {\n\t\t\t\tconsole.error(`${key}: [value hidden]`);\n\t\t\t} else {\n\t\t\t\tconsole.error(`${key}: ${value}`);\n\t\t\t}\n\t\t}\n\t} else {\n\t\tconsole.error('Config.env does not exist');\n\t}\n} catch (error) {\n\tconsole.error(`Error loading Config: ${error.message}`);\n\tconsole.error(`Stack trace: ${error.stack}`);\n}\n\n// Log process.env to see if values from mcp.json were loaded automatically\nconsole.error('\\nChecking if process.env already has values from mcp.json:');\nconst envVars = [\n\t'ANTHROPIC_API_KEY',\n\t'PERPLEXITY_API_KEY',\n\t'MODEL',\n\t'PERPLEXITY_MODEL',\n\t'MAX_TOKENS',\n\t'TEMPERATURE',\n\t'DEFAULT_SUBTASKS',\n\t'DEFAULT_PRIORITY'\n];\n\nfor (const varName of envVars) {\n\tif (process.env[varName]) {\n\t\tif (varName.includes('KEY')) {\n\t\t\tconsole.error(`${varName}: [value hidden]`);\n\t\t} else {\n\t\t\tconsole.error(`${varName}: ${process.env[varName]}`);\n\t\t}\n\t} else {\n\t\tconsole.error(`${varName}: not set`);\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/xai.js", "/**\n * xai.js\n * AI provider implementation for xAI models using Vercel AI SDK.\n */\n\nimport { createXai } from '@ai-sdk/xai';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class XAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'xAI';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the xAI API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'XAI_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an xAI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - xAI API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} xAI client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('xAI API key is required.');\n\t\t\t}\n\n\t\t\treturn createXai({\n\t\t\t\tapiKey,\n\t\t\t\tbaseURL: baseURL || 'https://api.x.ai/v1'\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/provider-registry/index.js", "/**\n * Provider Registry - Singleton for managing AI providers\n *\n * This module implements a singleton registry that allows dynamic registration\n * of AI providers at runtime, while maintaining compatibility with the existing\n * static PROVIDERS object in ai-services-unified.js.\n */\n\n// Singleton instance\nlet instance = null;\n\n/**\n * Provider Registry class - Manages dynamic provider registration\n */\nclass ProviderRegistry {\n\tconstructor() {\n\t\t// Private provider map\n\t\tthis._providers = new Map();\n\n\t\t// Flag to track initialization\n\t\tthis._initialized = false;\n\t}\n\n\t/**\n\t * Get the singleton instance\n\t * @returns {ProviderRegistry} The singleton instance\n\t */\n\tstatic getInstance() {\n\t\tif (!instance) {\n\t\t\tinstance = new ProviderRegistry();\n\t\t}\n\t\treturn instance;\n\t}\n\n\t/**\n\t * Initialize the registry\n\t * @returns {ProviderRegistry} The singleton instance\n\t */\n\tinitialize() {\n\t\tif (this._initialized) {\n\t\t\treturn this;\n\t\t}\n\n\t\tthis._initialized = true;\n\t\treturn this;\n\t}\n\n\t/**\n\t * Register a provider with the registry\n\t * @param {string} providerName - The name of the provider\n\t * @param {object} provider - The provider instance\n\t * @param {object} options - Additional options for registration\n\t * @returns {ProviderRegistry} The singleton instance for chaining\n\t */\n\tregisterProvider(providerName, provider, options = {}) {\n\t\tif (!providerName || typeof providerName !== 'string') {\n\t\t\tthrow new Error('Provider name must be a non-empty string');\n\t\t}\n\n\t\tif (!provider) {\n\t\t\tthrow new Error('Provider instance is required');\n\t\t}\n\n\t\t// Validate that provider implements the required interface\n\t\tif (\n\t\t\ttypeof provider.generateText !== 'function' ||\n\t\t\ttypeof provider.streamText !== 'function' ||\n\t\t\ttypeof provider.generateObject !== 'function'\n\t\t) {\n\t\t\tthrow new Error('Provider must implement BaseAIProvider interface');\n\t\t}\n\n\t\t// Add provider to the registry\n\t\tthis._providers.set(providerName, {\n\t\t\tinstance: provider,\n\t\t\toptions,\n\t\t\tregisteredAt: new Date()\n\t\t});\n\n\t\treturn this;\n\t}\n\n\t/**\n\t * Check if a provider exists in the registry\n\t * @param {string} providerName - The name of the provider\n\t * @returns {boolean} True if the provider exists\n\t */\n\thasProvider(providerName) {\n\t\treturn this._providers.has(providerName);\n\t}\n\n\t/**\n\t * Get a provider from the registry\n\t * @param {string} providerName - The name of the provider\n\t * @returns {object|null} The provider instance or null if not found\n\t */\n\tgetProvider(providerName) {\n\t\tconst providerEntry = this._providers.get(providerName);\n\t\treturn providerEntry ? providerEntry.instance : null;\n\t}\n\n\t/**\n\t * Get all registered providers\n\t * @returns {Map} Map of all registered providers\n\t */\n\tgetAllProviders() {\n\t\treturn new Map(this._providers);\n\t}\n\n\t/**\n\t * Remove a provider from the registry\n\t * @param {string} providerName - The name of the provider\n\t * @returns {boolean} True if the provider was removed\n\t */\n\tunregisterProvider(providerName) {\n\t\tif (this._providers.has(providerName)) {\n\t\t\tthis._providers.delete(providerName);\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\n\t/**\n\t * Reset the registry (primarily for testing)\n\t */\n\treset() {\n\t\tthis._providers.clear();\n\t\tthis._initialized = false;\n\t}\n}\n\nProviderRegistry.getInstance().initialize(); // Ensure singleton is initialized on import\n// Export singleton getter\nexport default ProviderRegistry;\n"], ["/claude-task-master/src/ai-providers/claude-code.js", "/**\n * src/ai-providers/claude-code.js\n *\n * Implementation for interacting with Claude models via Claude Code CLI\n * using a custom AI SDK implementation.\n */\n\nimport { createClaudeCode } from './custom-sdk/claude-code/index.js';\nimport { BaseAIProvider } from './base-provider.js';\nimport { getClaudeCodeSettingsForCommand } from '../../scripts/modules/config-manager.js';\n\nexport class ClaudeCodeProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Claude Code';\n\t}\n\n\tgetRequiredApiKeyName() {\n\t\treturn 'CLAUDE_CODE_API_KEY';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Override validateAuth to skip API key validation for Claude Code\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {\n\t\t// Claude Code doesn't require an API key\n\t\t// No validation needed\n\t}\n\n\t/**\n\t * Creates and returns a Claude Code client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.commandName] - Name of the command invoking the service\n\t * @param {string} [params.baseURL] - Optional custom API endpoint (not used by Claude Code)\n\t * @returns {Function} Claude Code client function\n\t * @throws {Error} If initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\t// Claude Code doesn't use API keys or base URLs\n\t\t\t// Just return the provider factory\n\t\t\treturn createClaudeCode({\n\t\t\t\tdefaultSettings: getClaudeCodeSettingsForCommand(params?.commandName)\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/mcp-server/src/core/utils/env-utils.js", "/**\n * Temporarily sets environment variables from session.env, executes an action,\n * and restores the original environment variables.\n * @param {object | undefined} sessionEnv - The environment object from the session.\n * @param {Function} actionFn - An async function to execute with the temporary environment.\n * @returns {Promise<any>} The result of the actionFn.\n */\nexport async function withSessionEnv(sessionEnv, actionFn) {\n\tif (\n\t\t!sessionEnv ||\n\t\ttypeof sessionEnv !== 'object' ||\n\t\tObject.keys(sessionEnv).length === 0\n\t) {\n\t\t// If no sessionEnv is provided, just run the action directly\n\t\treturn await actionFn();\n\t}\n\n\tconst originalEnv = {};\n\tconst keysToRestore = [];\n\n\t// Set environment variables from sessionEnv\n\tfor (const key in sessionEnv) {\n\t\tif (Object.prototype.hasOwnProperty.call(sessionEnv, key)) {\n\t\t\t// Store original value if it exists, otherwise mark for deletion\n\t\t\tif (process.env[key] !== undefined) {\n\t\t\t\toriginalEnv[key] = process.env[key];\n\t\t\t}\n\t\t\tkeysToRestore.push(key);\n\t\t\tprocess.env[key] = sessionEnv[key];\n\t\t}\n\t}\n\n\ttry {\n\t\t// Execute the provided action function\n\t\treturn await actionFn();\n\t} finally {\n\t\t// Restore original environment variables\n\t\tfor (const key of keysToRestore) {\n\t\t\tif (Object.prototype.hasOwnProperty.call(originalEnv, key)) {\n\t\t\t\tprocess.env[key] = originalEnv[key];\n\t\t\t} else {\n\t\t\t\t// If the key didn't exist originally, delete it\n\t\t\t\tdelete process.env[key];\n\t\t\t}\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/perplexity.js", "/**\n * perplexity.js\n * AI provider implementation for Perplexity models using Vercel AI SDK.\n */\n\nimport { createPerplexity } from '@ai-sdk/perplexity';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class PerplexityAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Perplexity';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Perplexity API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'PERPLEXITY_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns a Perplexity client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Perplexity API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Perplexity client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Perplexity API key is required.');\n\t\t\t}\n\n\t\t\treturn createPerplexity({\n\t\t\t\tapiKey,\n\t\t\t\tbaseURL: baseURL || 'https://api.perplexity.ai'\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/scripts/modules/task-manager.js", "/**\n * task-manager.js\n * Task management functions for the Task Master CLI\n */\n\nimport { findTaskById } from './utils.js';\nimport parsePRD from './task-manager/parse-prd.js';\nimport updateTasks from './task-manager/update-tasks.js';\nimport updateTaskById from './task-manager/update-task-by-id.js';\nimport generateTaskFiles from './task-manager/generate-task-files.js';\nimport setTaskStatus from './task-manager/set-task-status.js';\nimport updateSingleTaskStatus from './task-manager/update-single-task-status.js';\nimport listTasks from './task-manager/list-tasks.js';\nimport expandTask from './task-manager/expand-task.js';\nimport expandAllTasks from './task-manager/expand-all-tasks.js';\nimport clearSubtasks from './task-manager/clear-subtasks.js';\nimport addTask from './task-manager/add-task.js';\nimport analyzeTaskComplexity from './task-manager/analyze-task-complexity.js';\nimport findNextTask from './task-manager/find-next-task.js';\nimport addSubtask from './task-manager/add-subtask.js';\nimport removeSubtask from './task-manager/remove-subtask.js';\nimport updateSubtaskById from './task-manager/update-subtask-by-id.js';\nimport removeTask from './task-manager/remove-task.js';\nimport taskExists from './task-manager/task-exists.js';\nimport isTaskDependentOn from './task-manager/is-task-dependent.js';\nimport setResponseLanguage from './task-manager/response-language.js';\nimport moveTask from './task-manager/move-task.js';\nimport { migrateProject } from './task-manager/migrate.js';\nimport { performResearch } from './task-manager/research.js';\nimport { readComplexityReport } from './utils.js';\n\n// Export task manager functions\nexport {\n\tparsePRD,\n\tupdateTasks,\n\tupdateTaskById,\n\tupdateSubtaskById,\n\tgenerateTaskFiles,\n\tsetTaskStatus,\n\tupdateSingleTaskStatus,\n\tlistTasks,\n\texpandTask,\n\texpandAllTasks,\n\tclearSubtasks,\n\taddTask,\n\taddSubtask,\n\tremoveSubtask,\n\tfindNextTask,\n\tanalyzeTaskComplexity,\n\tremoveTask,\n\tfindTaskById,\n\ttaskExists,\n\tisTaskDependentOn,\n\tsetResponseLanguage,\n\tmoveTask,\n\treadComplexityReport,\n\tmigrateProject,\n\tperformResearch\n};\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/types.js", "/**\n * @fileoverview Type definitions for Claude Code AI SDK provider\n * These JSDoc types mirror the TypeScript interfaces from the original provider\n */\n\n/**\n * Claude Code provider settings\n * @typedef {Object} ClaudeCodeSettings\n * @property {string} [pathToClaudeCodeExecutable='claude'] - Custom path to Claude Code CLI executable\n * @property {string} [customSystemPrompt] - Custom system prompt to use\n * @property {string} [appendSystemPrompt] - Append additional content to the system prompt\n * @property {number} [maxTurns] - Maximum number of turns for the conversation\n * @property {number} [maxThinkingTokens] - Maximum thinking tokens for the model\n * @property {string} [cwd] - Working directory for CLI operations\n * @property {'bun'|'deno'|'node'} [executable='node'] - JavaScript runtime to use\n * @property {string[]} [executableArgs] - Additional arguments for the JavaScript runtime\n * @property {'default'|'acceptEdits'|'bypassPermissions'|'plan'} [permissionMode='default'] - Permission mode for tool usage\n * @property {string} [permissionPromptToolName] - Custom tool name for permission prompts\n * @property {boolean} [continue] - Continue the most recent conversation\n * @property {string} [resume] - Resume a specific session by ID\n * @property {string[]} [allowedTools] - Tools to explicitly allow during execution (e.g., ['Read', 'LS', 'Bash(git log:*)'])\n * @property {string[]} [disallowedTools] - Tools to disallow during execution (e.g., ['Write', 'Edit', 'Bash(rm:*)'])\n * @property {Object.<string, MCPServerConfig>} [mcpServers] - MCP server configuration\n * @property {boolean} [verbose] - Enable verbose logging for debugging\n */\n\n/**\n * MCP Server configuration\n * @typedef {Object} MCPServerConfig\n * @property {'stdio'|'sse'} [type='stdio'] - Server type\n * @property {string} command - Command to execute (for stdio type)\n * @property {string[]} [args] - Arguments for the command\n * @property {Object.<string, string>} [env] - Environment variables\n * @property {string} url - URL for SSE type servers\n * @property {Object.<string, string>} [headers] - Headers for SSE type servers\n */\n\n/**\n * Model ID type - either 'opus', 'sonnet', or any string\n * @typedef {'opus'|'sonnet'|string} ClaudeCodeModelId\n */\n\n/**\n * Language model options\n * @typedef {Object} ClaudeCodeLanguageModelOptions\n * @property {ClaudeCodeModelId} id - The model ID\n * @property {ClaudeCodeSettings} [settings] - Optional settings\n */\n\n/**\n * Error metadata for Claude Code errors\n * @typedef {Object} ClaudeCodeErrorMetadata\n * @property {string} [code] - Error code\n * @property {number} [exitCode] - Process exit code\n * @property {string} [stderr] - Standard error output\n * @property {string} [promptExcerpt] - Excerpt of the prompt that caused the error\n */\n\n/**\n * Claude Code provider interface\n * @typedef {Object} ClaudeCodeProvider\n * @property {function(ClaudeCodeModelId, ClaudeCodeSettings=): Object} languageModel - Create a language model\n * @property {function(ClaudeCodeModelId, ClaudeCodeSettings=): Object} chat - Alias for languageModel\n * @property {function(string): never} textEmbeddingModel - Throws NoSuchModelError (not supported)\n */\n\n/**\n * Claude Code provider settings\n * @typedef {Object} ClaudeCodeProviderSettings\n * @property {ClaudeCodeSettings} [defaultSettings] - Default settings to use for all models\n */\n\nexport {}; // This ensures the file is treated as a module\n"], ["/claude-task-master/src/utils/getVersion.js", "import fs from 'fs';\nimport path from 'path';\nimport { fileURLToPath } from 'url';\nimport { log } from '../../scripts/modules/utils.js';\n\n/**\n * Reads the version from the nearest package.json relative to this file.\n * Returns 'unknown' if not found or on error.\n * @returns {string} The version string or 'unknown'.\n */\nexport function getTaskMasterVersion() {\n\tlet version = 'unknown';\n\ttry {\n\t\t// Get the directory of the current module (getPackageVersion.js)\n\t\tconst currentModuleFilename = fileURLToPath(import.meta.url);\n\t\tconst currentModuleDirname = path.dirname(currentModuleFilename);\n\t\t// Construct the path to package.json relative to this file (../../package.json)\n\t\tconst packageJsonPath = path.join(\n\t\t\tcurrentModuleDirname,\n\t\t\t'..',\n\t\t\t'..',\n\t\t\t'package.json'\n\t\t);\n\n\t\tif (fs.existsSync(packageJsonPath)) {\n\t\t\tconst packageJsonContent = fs.readFileSync(packageJsonPath, 'utf8');\n\t\t\tconst packageJson = JSON.parse(packageJsonContent);\n\t\t\tversion = packageJson.version;\n\t\t}\n\t} catch (error) {\n\t\t// Silently fall back to default version\n\t\tlog('warn', 'Could not read own package.json for version info.', error);\n\t}\n\treturn version;\n}\n"], ["/claude-task-master/src/ai-providers/ollama.js", "/**\n * ollama.js\n * AI provider implementation for Ollama models using the ollama-ai-provider package.\n */\n\nimport { createOllama } from 'ollama-ai-provider';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class OllamaAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Ollama';\n\t}\n\n\t/**\n\t * Override auth validation - Ollama doesn't require API keys\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(_params) {\n\t\t// Ollama runs locally and doesn't require API keys\n\t\t// No authentication validation needed\n\t}\n\n\t/**\n\t * Creates and returns an Ollama client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} [params.baseURL] - Optional Ollama base URL (defaults to http://localhost:11434)\n\t * @returns {Function} Ollama client function\n\t * @throws {Error} If initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { baseURL } = params;\n\n\t\t\treturn createOllama({\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name for Ollama.\n\t * @returns {string} The environment variable name\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'OLLAMA_API_KEY';\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/azure.js", "/**\n * azure.js\n * AI provider implementation for Azure OpenAI models using Vercel AI SDK.\n */\n\nimport { createAzure } from '@ai-sdk/azure';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class AzureProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Azure OpenAI';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Azure OpenAI API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'AZURE_OPENAI_API_KEY';\n\t}\n\n\t/**\n\t * Validates Azure-specific authentication parameters\n\t * @param {object} params - Parameters to validate\n\t * @throws {Error} If required parameters are missing\n\t */\n\tvalidateAuth(params) {\n\t\tif (!params.apiKey) {\n\t\t\tthrow new Error('Azure API key is required');\n\t\t}\n\n\t\tif (!params.baseURL) {\n\t\t\tthrow new Error(\n\t\t\t\t'Azure endpoint URL is required. Set it in .taskmasterconfig global.azureBaseURL or models.[role].baseURL'\n\t\t\t);\n\t\t}\n\t}\n\n\t/**\n\t * Creates and returns an Azure OpenAI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Azure OpenAI API key\n\t * @param {string} params.baseURL - Azure OpenAI endpoint URL (from .taskmasterconfig global.azureBaseURL or models.[role].baseURL)\n\t * @returns {Function} Azure OpenAI client function\n\t * @throws {Error} If required parameters are missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\treturn createAzure({\n\t\t\t\tapiKey,\n\t\t\t\tbaseURL\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/google.js", "/**\n * google.js\n * AI provider implementation for Google AI models using Vercel AI SDK.\n */\n\nimport { createGoogleGenerativeAI } from '@ai-sdk/google';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class GoogleAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Google';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Google API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'GOOGLE_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns a Google AI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Google API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Google AI client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Google API key is required.');\n\t\t\t}\n\n\t\t\treturn createGoogleGenerativeAI({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/openai.js", "/**\n * openai.js\n * AI provider implementation for OpenAI models using Vercel AI SDK.\n */\n\nimport { createOpenAI } from '@ai-sdk/openai';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class OpenAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'OpenAI';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the OpenAI API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'OPENAI_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an OpenAI client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - OpenAI API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} OpenAI client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('OpenAI API key is required.');\n\t\t\t}\n\n\t\t\treturn createOpenAI({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/groq.js", "/**\n * src/ai-providers/groq.js\n *\n * Implementation for interacting with Groq models\n * using the Vercel AI SDK.\n */\n\nimport { createGroq } from '@ai-sdk/groq';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class GroqProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Groq';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the Groq API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'GROQ_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns a Groq client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - Groq API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} Groq client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('Groq API key is required.');\n\t\t\t}\n\n\t\t\treturn createGroq({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/openrouter.js", "/**\n * openrouter.js\n * AI provider implementation for OpenRouter models using Vercel AI SDK.\n */\n\nimport { createOpenRouter } from '@openrouter/ai-sdk-provider';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class OpenRouterAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'OpenRouter';\n\t}\n\n\t/**\n\t * Returns the environment variable name required for this provider's API key.\n\t * @returns {string} The environment variable name for the OpenRouter API key\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'OPENROUTER_API_KEY';\n\t}\n\n\t/**\n\t * Creates and returns an OpenRouter client instance.\n\t * @param {object} params - Parameters for client initialization\n\t * @param {string} params.apiKey - OpenRouter API key\n\t * @param {string} [params.baseURL] - Optional custom API endpoint\n\t * @returns {Function} OpenRouter client function\n\t * @throws {Error} If API key is missing or initialization fails\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst { apiKey, baseURL } = params;\n\n\t\t\tif (!apiKey) {\n\t\t\t\tthrow new Error('OpenRouter API key is required.');\n\t\t\t}\n\n\t\t\treturn createOpenRouter({\n\t\t\t\tapiKey,\n\t\t\t\t...(baseURL && { baseURL })\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/constants/task-status.js", "/**\n * @typedef {'pending' | 'done' | 'in-progress' | 'review' | 'deferred' | 'cancelled'} TaskStatus\n */\n\n/**\n * Task status options list\n * @type {TaskStatus[]}\n * @description Defines possible task statuses:\n * - pending: Task waiting to start\n * - done: Task completed\n * - in-progress: Task in progress\n * - review: Task completed and waiting for review\n * - deferred: Task postponed or paused\n * - cancelled: Task cancelled and will not be completed\n */\nexport const TASK_STATUS_OPTIONS = [\n\t'pending',\n\t'done',\n\t'in-progress',\n\t'review',\n\t'deferred',\n\t'cancelled'\n];\n\n/**\n * Check if a given status is a valid task status\n * @param {string} status - The status to check\n * @returns {boolean} True if the status is valid, false otherwise\n */\nexport function isValidTaskStatus(status) {\n\treturn TASK_STATUS_OPTIONS.includes(status);\n}\n"], ["/claude-task-master/mcp-server/src/core/__tests__/context-manager.test.js", "import { jest } from '@jest/globals';\nimport { ContextManager } from '../context-manager.js';\n\ndescribe('ContextManager', () => {\n\tlet contextManager;\n\n\tbeforeEach(() => {\n\t\tcontextManager = new ContextManager({\n\t\t\tmaxCacheSize: 10,\n\t\t\tttl: 1000, // 1 second for testing\n\t\t\tmaxContextSize: 1000\n\t\t});\n\t});\n\n\tdescribe('getContext', () => {\n\t\tit('should create a new context when not in cache', async () => {\n\t\t\tconst context = await contextManager.getContext('test-id', {\n\t\t\t\ttest: true\n\t\t\t});\n\t\t\texpect(context.id).toBe('test-id');\n\t\t\texpect(context.metadata.test).toBe(true);\n\t\t\texpect(contextManager.stats.misses).toBe(1);\n\t\t\texpect(contextManager.stats.hits).toBe(0);\n\t\t});\n\n\t\tit('should return cached context when available', async () => {\n\t\t\t// First call creates the context\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\n\t\t\t// Second call should hit cache\n\t\t\tconst context = await contextManager.getContext('test-id', {\n\t\t\t\ttest: true\n\t\t\t});\n\t\t\texpect(context.id).toBe('test-id');\n\t\t\texpect(context.metadata.test).toBe(true);\n\t\t\texpect(contextManager.stats.hits).toBe(1);\n\t\t\texpect(contextManager.stats.misses).toBe(1);\n\t\t});\n\n\t\tit('should respect TTL settings', async () => {\n\t\t\t// Create context\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\n\t\t\t// Wait for TTL to expire\n\t\t\tawait new Promise((resolve) => setTimeout(resolve, 1100));\n\n\t\t\t// Should create new context\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\texpect(contextManager.stats.misses).toBe(2);\n\t\t\texpect(contextManager.stats.hits).toBe(0);\n\t\t});\n\t});\n\n\tdescribe('updateContext', () => {\n\t\tit('should update existing context metadata', async () => {\n\t\t\tawait contextManager.getContext('test-id', { initial: true });\n\t\t\tconst updated = await contextManager.updateContext('test-id', {\n\t\t\t\tupdated: true\n\t\t\t});\n\n\t\t\texpect(updated.metadata.initial).toBe(true);\n\t\t\texpect(updated.metadata.updated).toBe(true);\n\t\t});\n\t});\n\n\tdescribe('invalidateContext', () => {\n\t\tit('should remove context from cache', async () => {\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\tcontextManager.invalidateContext('test-id', { test: true });\n\n\t\t\t// Should be a cache miss\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\texpect(contextManager.stats.invalidations).toBe(1);\n\t\t\texpect(contextManager.stats.misses).toBe(2);\n\t\t});\n\t});\n\n\tdescribe('getStats', () => {\n\t\tit('should return current cache statistics', async () => {\n\t\t\tawait contextManager.getContext('test-id', { test: true });\n\t\t\tconst stats = contextManager.getStats();\n\n\t\t\texpect(stats.hits).toBe(0);\n\t\t\texpect(stats.misses).toBe(1);\n\t\t\texpect(stats.invalidations).toBe(0);\n\t\t\texpect(stats.size).toBe(1);\n\t\t\texpect(stats.maxSize).toBe(10);\n\t\t\texpect(stats.ttl).toBe(1000);\n\t\t});\n\t});\n});\n"], ["/claude-task-master/src/ai-providers/custom-sdk/claude-code/json-extractor.js", "/**\n * @fileoverview Extract JSON from Claude's response, handling markdown blocks and other formatting\n */\n\n/**\n * Extract JSON from Claude's response\n * @param {string} text - The text to extract JSON from\n * @returns {string} - The extracted JSON string\n */\nexport function extractJson(text) {\n\t// Remove markdown code blocks if present\n\tlet jsonText = text.trim();\n\n\t// Remove ```json blocks\n\tjsonText = jsonText.replace(/^```json\\s*/gm, '');\n\tjsonText = jsonText.replace(/^```\\s*/gm, '');\n\tjsonText = jsonText.replace(/```\\s*$/gm, '');\n\n\t// Remove common TypeScript/JavaScript patterns\n\tjsonText = jsonText.replace(/^const\\s+\\w+\\s*=\\s*/, ''); // Remove \"const varName = \"\n\tjsonText = jsonText.replace(/^let\\s+\\w+\\s*=\\s*/, ''); // Remove \"let varName = \"\n\tjsonText = jsonText.replace(/^var\\s+\\w+\\s*=\\s*/, ''); // Remove \"var varName = \"\n\tjsonText = jsonText.replace(/;?\\s*$/, ''); // Remove trailing semicolons\n\n\t// Try to extract JSON object or array\n\tconst objectMatch = jsonText.match(/{[\\s\\S]*}/);\n\tconst arrayMatch = jsonText.match(/\\[[\\s\\S]*\\]/);\n\n\tif (objectMatch) {\n\t\tjsonText = objectMatch[0];\n\t} else if (arrayMatch) {\n\t\tjsonText = arrayMatch[0];\n\t}\n\n\t// First try to parse as valid JSON\n\ttry {\n\t\tJSON.parse(jsonText);\n\t\treturn jsonText;\n\t} catch {\n\t\t// If it's not valid JSON, it might be a JavaScript object literal\n\t\t// Try to convert it to valid JSON\n\t\ttry {\n\t\t\t// This is a simple conversion that handles basic cases\n\t\t\t// Replace unquoted keys with quoted keys\n\t\t\tconst converted = jsonText\n\t\t\t\t.replace(/([{,]\\s*)([a-zA-Z_$][a-zA-Z0-9_$]*)\\s*:/g, '$1\"$2\":')\n\t\t\t\t// Replace single quotes with double quotes\n\t\t\t\t.replace(/'/g, '\"');\n\n\t\t\t// Validate the converted JSON\n\t\t\tJSON.parse(converted);\n\t\t\treturn converted;\n\t\t} catch {\n\t\t\t// If all else fails, return the original text\n\t\t\t// The AI SDK will handle the error appropriately\n\t\t\treturn text;\n\t\t}\n\t}\n}\n"], ["/claude-task-master/src/ai-providers/bedrock.js", "import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';\nimport { fromNodeProviderChain } from '@aws-sdk/credential-providers';\nimport { BaseAIProvider } from './base-provider.js';\n\nexport class BedrockAIProvider extends BaseAIProvider {\n\tconstructor() {\n\t\tsuper();\n\t\tthis.name = 'Bedrock';\n\t}\n\n\tisRequiredApiKey() {\n\t\treturn false;\n\t}\n\n\t/**\n\t * Returns the required API key environment variable name for Bedrock.\n\t * Bedrock uses AWS credentials, so we return the AWS access key identifier.\n\t * @returns {string} The environment variable name\n\t */\n\tgetRequiredApiKeyName() {\n\t\treturn 'AWS_ACCESS_KEY_ID';\n\t}\n\n\t/**\n\t * Override auth validation - Bedrock uses AWS credentials instead of API keys\n\t * @param {object} params - Parameters to validate\n\t */\n\tvalidateAuth(params) {}\n\n\t/**\n\t * Creates and returns a Bedrock client instance.\n\t * See https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html\n\t * for AWS SDK environment variables and configuration options.\n\t */\n\tgetClient(params) {\n\t\ttry {\n\t\t\tconst credentialProvider = fromNodeProviderChain();\n\n\t\t\treturn createAmazonBedrock({\n\t\t\t\tcredentialProvider\n\t\t\t});\n\t\t} catch (error) {\n\t\t\tthis.handleError('client initialization', error);\n\t\t}\n\t}\n}\n"], ["/claude-task-master/test-config-manager.js", "// test-config-manager.js\nconsole.log('=== ENVIRONMENT TEST ===');\nconsole.log('Working directory:', process.cwd());\nconsole.log('NODE_PATH:', process.env.NODE_PATH);\n\n// Test basic imports\ntry {\n\tconsole.log('Importing config-manager');\n\t// Use dynamic import for ESM\n\tconst configManagerModule = await import(\n\t\t'./scripts/modules/config-manager.js'\n\t);\n\tconst configManager = configManagerModule.default || configManagerModule;\n\tconsole.log('Config manager loaded successfully');\n\n\tconsole.log('Loading supported models');\n\t// Add after line 14 (after \"Config manager loaded successfully\")\n\tconsole.log('Config manager exports:', Object.keys(configManager));\n} catch (error) {\n\tconsole.error('Import error:', error.message);\n\tconsole.error(error.stack);\n}\n\n// Test file access\ntry {\n\tconsole.log('Checking for .taskmasterconfig');\n\t// Use dynamic import for ESM\n\tconst { readFileSync, existsSync } = await import('fs');\n\tconst { resolve } = await import('path');\n\n\tconst configExists = existsSync('./.taskmasterconfig');\n\tconsole.log('.taskmasterconfig exists:', configExists);\n\n\tif (configExists) {\n\t\tconst config = JSON.parse(readFileSync('./.taskmasterconfig', 'utf-8'));\n\t\tconsole.log('Config keys:', Object.keys(config));\n\t}\n\n\tconsole.log('Checking for supported-models.json');\n\tconst modelsPath = resolve('./scripts/modules/supported-models.json');\n\tconsole.log('Models path:', modelsPath);\n\tconst modelsExists = existsSync(modelsPath);\n\tconsole.log('supported-models.json exists:', modelsExists);\n} catch (error) {\n\tconsole.error('File access error:', error.message);\n}\n\nconsole.log('=== TEST COMPLETE ===');\n"], ["/claude-task-master/src/constants/profiles.js", "/**\n * @typedef {'amp' | 'claude' | 'cline' | 'codex' | 'cursor' | 'gemini' | 'kiro' | 'opencode' | 'roo' | 'trae' | 'windsurf' | 'vscode' | 'zed'} RulesProfile\n */\n\n/**\n * Available rule profiles for project initialization and rules command\n *\n * ⚠️ SINGLE SOURCE OF TRUTH: This is the authoritative list of all supported rule profiles.\n * This constant is used directly throughout the codebase (previously aliased as PROFILE_NAMES).\n *\n * @type {RulesProfile[]}\n * @description Defines possible rule profile sets:\n * - amp: Amp Code integration\n * - claude: Claude Code integration\n * - cline: Cline IDE rules\n * - codex: Codex integration\n * - cursor: Cursor IDE rules\n * - gemini: Gemini integration\n * - kiro: Kiro IDE rules\n * - opencode: OpenCode integration\n * - roo: Roo Code IDE rules\n * - trae: Trae IDE rules\n * - vscode: VS Code with GitHub Copilot integration\n * - windsurf: Windsurf IDE rules\n * - zed: Zed IDE rules\n *\n * To add a new rule profile:\n * 1. Add the profile name to this array\n * 2. Create a profile file in src/profiles/{profile}.js\n * 3. Export it as {profile}Profile in src/profiles/index.js\n */\nexport const RULE_PROFILES = [\n\t'amp',\n\t'claude',\n\t'cline',\n\t'codex',\n\t'cursor',\n\t'gemini',\n\t'kiro',\n\t'opencode',\n\t'roo',\n\t'trae',\n\t'vscode',\n\t'windsurf',\n\t'zed'\n];\n\n/**\n * Centralized enum for all supported Roo agent modes\n * @type {string[]}\n * @description Available Roo Code IDE modes for rule generation\n */\nexport const ROO_MODES = [\n\t'architect',\n\t'ask',\n\t'orchestrator',\n\t'code',\n\t'debug',\n\t'test'\n];\n\n/**\n * Check if a given rule profile is valid\n * @param {string} rulesProfile - The rule profile to check\n * @returns {boolean} True if the rule profile is valid, false otherwise\n */\nexport function isValidRulesProfile(rulesProfile) {\n\treturn RULE_PROFILES.includes(rulesProfile);\n}\n"], ["/claude-task-master/test-version-check.js", "import {\n\tdisplayUpgradeNotification,\n\tcompareVersions\n} from './scripts/modules/commands.js';\n\n// Simulate different version scenarios\nconsole.log('=== Simulating version check ===\\n');\n\n// 1. Current version is older than latest (should show update notice)\nconsole.log('Scenario 1: Current version older than latest');\ndisplayUpgradeNotification('0.9.30', '1.0.0');\n\n// 2. Current version same as latest (no update needed)\nconsole.log(\n\t'\\nScenario 2: Current version same as latest (this would not normally show a notice)'\n);\nconsole.log('Current: 1.0.0, Latest: 1.0.0');\nconsole.log('compareVersions result:', compareVersions('1.0.0', '1.0.0'));\nconsole.log(\n\t'Update needed:',\n\tcompareVersions('1.0.0', '1.0.0') < 0 ? 'Yes' : 'No'\n);\n\n// 3. Current version newer than latest (e.g., development version, would not show notice)\nconsole.log(\n\t'\\nScenario 3: Current version newer than latest (this would not normally show a notice)'\n);\nconsole.log('Current: 1.1.0, Latest: 1.0.0');\nconsole.log('compareVersions result:', compareVersions('1.1.0', '1.0.0'));\nconsole.log(\n\t'Update needed:',\n\tcompareVersions('1.1.0', '1.0.0') < 0 ? 'Yes' : 'No'\n);\n\nconsole.log('\\n=== Test complete ===');\n"], ["/claude-task-master/src/constants/task-priority.js", "/**\n * @typedef {'high' | 'medium' | 'low'} TaskPriority\n */\n\n/**\n * Task priority options\n * @type {TaskPriority[]}\n * @description Defines possible task priorities:\n * - high: Critical tasks that need immediate attention\n * - medium: Standard priority tasks (default)\n * - low: Tasks that can be deferred or are nice-to-have\n */\nexport const TASK_PRIORITY_OPTIONS = ['high', 'medium', 'low'];\n\n/**\n * Default task priority\n * @type {TaskPriority}\n */\nexport const DEFAULT_TASK_PRIORITY = 'medium';\n\n/**\n * Check if a given priority is valid\n * @param {string} priority - The priority to check\n * @returns {boolean} True if the priority is valid, false otherwise\n */\nexport function isValidTaskPriority(priority) {\n\treturn TASK_PRIORITY_OPTIONS.includes(priority?.toLowerCase());\n}\n\n/**\n * Normalize a priority value to lowercase\n * @param {string} priority - The priority to normalize\n * @returns {TaskPriority|null} The normalized priority or null if invalid\n */\nexport function normalizeTaskPriority(priority) {\n\tif (!priority) return null;\n\tconst normalized = priority.toLowerCase();\n\treturn isValidTaskPriority(normalized) ? normalized : null;\n}\n"], ["/claude-task-master/jest.config.js", "export default {\n\t// Use Node.js environment for testing\n\ttestEnvironment: 'node',\n\n\t// Automatically clear mock calls between every test\n\tclearMocks: true,\n\n\t// Indicates whether the coverage information should be collected while executing the test\n\tcollectCoverage: false,\n\n\t// The directory where Jest should output its coverage files\n\tcoverageDirectory: 'coverage',\n\n\t// A list of paths to directories that Jest should use to search for files in\n\troots: ['<rootDir>/tests'],\n\n\t// The glob patterns Jest uses to detect test files\n\ttestMatch: ['**/__tests__/**/*.js', '**/?(*.)+(spec|test).js'],\n\n\t// Transform files\n\ttransform: {},\n\n\t// Disable transformations for node_modules\n\ttransformIgnorePatterns: ['/node_modules/'],\n\n\t// Set moduleNameMapper for absolute paths\n\tmoduleNameMapper: {\n\t\t'^@/(.*)$': '<rootDir>/$1'\n\t},\n\n\t// Setup module aliases\n\tmoduleDirectories: ['node_modules', '<rootDir>'],\n\n\t// Configure test coverage thresholds\n\tcoverageThreshold: {\n\t\tglobal: {\n\t\t\tbranches: 80,\n\t\t\tfunctions: 80,\n\t\t\tlines: 80,\n\t\t\tstatements: 80\n\t\t}\n\t},\n\n\t// Generate coverage report in these formats\n\tcoverageReporters: ['text', 'lcov'],\n\n\t// Verbose output\n\tverbose: true,\n\n\t// Setup file\n\tsetupFilesAfterEnv: ['<rootDir>/tests/setup.js']\n};\n"], ["/claude-task-master/src/constants/paths.js", "/**\n * Path constants for Task Master application\n */\n\n// .taskmaster directory structure paths\nexport const TASKMASTER_DIR = '.taskmaster';\nexport const TASKMASTER_TASKS_DIR = '.taskmaster/tasks';\nexport const TASKMASTER_DOCS_DIR = '.taskmaster/docs';\nexport const TASKMASTER_REPORTS_DIR = '.taskmaster/reports';\nexport const TASKMASTER_TEMPLATES_DIR = '.taskmaster/templates';\n\n// Task Master configuration files\nexport const TASKMASTER_CONFIG_FILE = '.taskmaster/config.json';\nexport const TASKMASTER_STATE_FILE = '.taskmaster/state.json';\nexport const LEGACY_CONFIG_FILE = '.taskmasterconfig';\n\n// Task Master report files\nexport const COMPLEXITY_REPORT_FILE =\n\t'.taskmaster/reports/task-complexity-report.json';\nexport const LEGACY_COMPLEXITY_REPORT_FILE =\n\t'scripts/task-complexity-report.json';\n\n// Task Master PRD file paths\nexport const PRD_FILE = '.taskmaster/docs/prd.txt';\nexport const LEGACY_PRD_FILE = 'scripts/prd.txt';\n\n// Task Master template files\nexport const EXAMPLE_PRD_FILE = '.taskmaster/templates/example_prd.txt';\nexport const LEGACY_EXAMPLE_PRD_FILE = 'scripts/example_prd.txt';\n\n// Task Master task file paths\nexport const TASKMASTER_TASKS_FILE = '.taskmaster/tasks/tasks.json';\nexport const LEGACY_TASKS_FILE = 'tasks/tasks.json';\n\n// General project files (not Task Master specific but commonly used)\nexport const ENV_EXAMPLE_FILE = '.env.example';\nexport const GITIGNORE_FILE = '.gitignore';\n\n// Task file naming pattern\nexport const TASK_FILE_PREFIX = 'task_';\nexport const TASK_FILE_EXTENSION = '.txt';\n\n/**\n * Project markers used to identify a task-master project root\n * These files/directories indicate that a directory is a Task Master project\n */\nexport const PROJECT_MARKERS = [\n\t'.taskmaster', // New taskmaster directory\n\tLEGACY_CONFIG_FILE, // .taskmasterconfig\n\t'tasks.json', // Generic tasks file\n\tLEGACY_TASKS_FILE, // tasks/tasks.json (legacy location)\n\tTASKMASTER_TASKS_FILE, // .taskmaster/tasks/tasks.json (new location)\n\t'.git', // Git repository\n\t'.svn' // SVN repository\n];\n"], ["/claude-task-master/mcp-server/server.js", "#!/usr/bin/env node\n\nimport TaskMasterMCPServer from './src/index.js';\nimport dotenv from 'dotenv';\nimport logger from './src/logger.js';\n\n// Load environment variables\ndotenv.config();\n\n/**\n * Start the MCP server\n */\nasync function startServer() {\n\tconst server = new TaskMasterMCPServer();\n\n\t// Handle graceful shutdown\n\tprocess.on('SIGINT', async () => {\n\t\tawait server.stop();\n\t\tprocess.exit(0);\n\t});\n\n\tprocess.on('SIGTERM', async () => {\n\t\tawait server.stop();\n\t\tprocess.exit(0);\n\t});\n\n\ttry {\n\t\tawait server.start();\n\t} catch (error) {\n\t\tlogger.error(`Failed to start MCP server: ${error.message}`);\n\t\tprocess.exit(1);\n\t}\n}\n\n// Start the server\nstartServer();\n"], ["/claude-task-master/src/profiles/codex.js", "// Codex profile for rule-transformer\nimport { createProfile } from './base-profile.js';\n\n// Create and export codex profile using the base factory\nexport const codexProfile = createProfile({\n\tname: 'codex',\n\tdisplayName: 'Codex',\n\turl: 'codex.ai',\n\tdocsUrl: 'platform.openai.com/docs/codex',\n\tprofileDir: '.', // Root directory\n\trulesDir: '.', // No specific rules directory needed\n\tmcpConfig: false,\n\tmcpConfigName: null,\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': 'AGENTS.md'\n\t}\n});\n"], ["/claude-task-master/src/profiles/gemini.js", "// Gemini profile for rule-transformer\nimport { createProfile } from './base-profile.js';\n\n// Create and export gemini profile using the base factory\nexport const geminiProfile = createProfile({\n\tname: 'gemini',\n\tdisplayName: 'Gemini',\n\turl: 'codeassist.google',\n\tdocsUrl: 'github.com/google-gemini/gemini-cli',\n\tprofileDir: '.gemini', // Keep .gemini for settings.json\n\trulesDir: '.', // Root directory for GEMINI.md\n\tmcpConfigName: 'settings.json', // Override default 'mcp.json'\n\tincludeDefaultRules: false,\n\tfileMap: {\n\t\t'AGENTS.md': 'GEMINI.md'\n\t}\n});\n"], ["/claude-task-master/src/constants/rules-actions.js", "/**\n * @typedef {'add' | 'remove'} RulesAction\n */\n\n/**\n * Individual rules action constants\n */\nexport const RULES_ACTIONS = {\n\tADD: 'add',\n\tREMOVE: 'remove'\n};\n\n/**\n * Special rules command (not a CRUD operation)\n */\nexport const RULES_SETUP_ACTION = 'setup';\n\n/**\n * Check if a given action is a valid rules action\n * @param {string} action - The action to check\n * @returns {boolean} True if the action is valid, false otherwise\n */\nexport function isValidRulesAction(action) {\n\treturn Object.values(RULES_ACTIONS).includes(action);\n}\n"], ["/claude-task-master/src/constants/providers.js", "/**\n * Provider validation constants\n * Defines which providers should be validated against the supported-models.json file\n */\n\n// Providers that have predefined model lists and should be validated\nexport const VALIDATED_PROVIDERS = [\n\t'anthropic',\n\t'openai',\n\t'google',\n\t'perplexity',\n\t'xai',\n\t'groq',\n\t'mistral'\n];\n\n// Custom providers object for easy named access\nexport const CUSTOM_PROVIDERS = {\n\tAZURE: 'azure',\n\tVERTEX: 'vertex',\n\tBEDROCK: 'bedrock',\n\tOPENROUTER: 'openrouter',\n\tOLLAMA: 'ollama',\n\tCLAUDE_CODE: 'claude-code',\n\tMCP: 'mcp',\n\tGEMINI_CLI: 'gemini-cli'\n};\n\n// Custom providers array (for backward compatibility and iteration)\nexport const CUSTOM_PROVIDERS_ARRAY = Object.values(CUSTOM_PROVIDERS);\n\n// All known providers (for reference)\nexport const ALL_PROVIDERS = [\n\t...VALIDATED_PROVIDERS,\n\t...CUSTOM_PROVIDERS_ARRAY\n];\n"], ["/claude-task-master/scripts/dev.js", "#!/usr/bin/env node\n\n/**\n * dev.js\n * Task Master CLI - AI-driven development task management\n *\n * This is the refactored entry point that uses the modular architecture.\n * It imports functionality from the modules directory and provides a CLI.\n */\n\nimport dotenv from 'dotenv';\ndotenv.config();\n\n// Add at the very beginning of the file\nif (process.env.DEBUG === '1') {\n\tconsole.error('DEBUG - dev.js received args:', process.argv.slice(2));\n}\n\nimport { runCLI } from './modules/commands.js';\n\n// Run the CLI with the process arguments\nrunCLI(process.argv);\n"], ["/claude-task-master/src/profiles/cline.js", "// Cline conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export cline profile using the base factory\nexport const clineProfile = createProfile({\n\tname: 'cline',\n\tdisplayName: 'Cline',\n\turl: 'cline.bot',\n\tdocsUrl: 'docs.cline.bot',\n\tprofileDir: '.clinerules',\n\trulesDir: '.clinerules',\n\tmcpConfig: false\n});\n"], ["/claude-task-master/src/profiles/trae.js", "// Trae conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export trae profile using the base factory\nexport const traeProfile = createProfile({\n\tname: 'trae',\n\tdisplayName: 'Trae',\n\turl: 'trae.ai',\n\tdocsUrl: 'docs.trae.ai',\n\tmcpConfig: false\n});\n"], ["/claude-task-master/src/profiles/cursor.js", "// Cursor conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export cursor profile using the base factory\nexport const cursorProfile = createProfile({\n\tname: 'cursor',\n\tdisplayName: 'Cursor',\n\turl: 'cursor.so',\n\tdocsUrl: 'docs.cursor.com',\n\ttargetExtension: '.mdc', // Cursor keeps .mdc extension\n\tsupportsRulesSubdirectories: true\n});\n"], ["/claude-task-master/src/constants/commands.js", "/**\n * Command related constants\n * Defines which commands trigger AI processing\n */\n\n// Command names that trigger AI processing\nexport const AI_COMMAND_NAMES = [\n\t'add-task',\n\t'analyze-complexity',\n\t'expand-task',\n\t'parse-prd',\n\t'research',\n\t'research-save',\n\t'update-subtask',\n\t'update-task',\n\t'update-tasks'\n];\n"], ["/claude-task-master/src/profiles/windsurf.js", "// Windsurf conversion profile for rule-transformer\nimport { createProfile, COMMON_TOOL_MAPPINGS } from './base-profile.js';\n\n// Create and export windsurf profile using the base factory\nexport const windsurfProfile = createProfile({\n\tname: 'windsurf',\n\tdisplayName: 'Windsurf',\n\turl: 'windsurf.com',\n\tdocsUrl: 'docs.windsurf.com'\n});\n"], ["/claude-task-master/scripts/modules/index.js", "/**\n * index.js\n * Main export point for all Task Master CLI modules\n */\n\n// Export all modules\nexport * from './ui.js';\nexport * from './utils.js';\nexport * from './commands.js';\nexport * from './task-manager.js';\nexport * from './prompt-manager.js';\n"], ["/claude-task-master/src/ai-providers/index.js", "/**\n * src/ai-providers/index.js\n * Central export point for all AI provider classes\n */\n\nexport { AnthropicAIProvider } from './anthropic.js';\nexport { PerplexityAIProvider } from './perplexity.js';\nexport { GoogleAIProvider } from './google.js';\nexport { OpenAIProvider } from './openai.js';\nexport { XAIProvider } from './xai.js';\nexport { GroqProvider } from './groq.js';\nexport { OpenRouterAIProvider } from './openrouter.js';\nexport { OllamaAIProvider } from './ollama.js';\nexport { BedrockAIProvider } from './bedrock.js';\nexport { AzureProvider } from './azure.js';\nexport { VertexAIProvider } from './google-vertex.js';\nexport { ClaudeCodeProvider } from './claude-code.js';\nexport { GeminiCliProvider } from './gemini-cli.js';\n"], ["/claude-task-master/src/profiles/index.js", "// Profile exports for centralized importing\nexport { ampProfile } from './amp.js';\nexport { claudeProfile } from './claude.js';\nexport { clineProfile } from './cline.js';\nexport { codexProfile } from './codex.js';\nexport { cursorProfile } from './cursor.js';\nexport { geminiProfile } from './gemini.js';\nexport { kiroProfile } from './kiro.js';\nexport { opencodeProfile } from './opencode.js';\nexport { rooProfile } from './roo.js';\nexport { traeProfile } from './trae.js';\nexport { vscodeProfile } from './vscode.js';\nexport { windsurfProfile } from './windsurf.js';\nexport { zedProfile } from './zed.js';\n"], ["/claude-task-master/test-tag-functions.js", ""]], "task_instance_info": {"created_time": "2025-08-21 00:05:55", "created_task_model": "DeepSeek-R1", "code_description": "This code segment functions to: Process tag metadata by aggregating task status statistics and subtask distribution metrics while handling system resource configurations and error states during data retrieval operations."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```javascript\n[TASK_BEGIN]\nasync ({ args, log, context }) => {\n\ttry {\n\t\tenableSilentMode();\n\t\t\n\t\tconst allTags = tags;\n\n\t\tif (!allTags || allTags.length === 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'NO_TAGS_FOUND',\n\t\t\t\t\tmessage: 'No tags available in the system.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\n\t\tconst resultData = args.showMetadata\n\t\t\t? allTags\n\t\t\t: allTags.map(tag => tag.name);\n\n\t\treturn { success: true, data: resultData };\n\t} catch (error) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'LIST_TAGS_FAILED',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode();\n\t}\n}\n[TASK_END]\n```", "inference_time": "2025-08-21 00-06-21"}, "editdistance_info": {"edit_distance": 16.753, "calculate_time": "2025-08-21 00:06:21", "true_code_clean": "export async function listTagsDirect(args, log, context = {}) {\n\tconst { tasksJsonPath, showMetadata = false, projectRoot } = args;\n\tconst { session } = context;\n\tenableSilentMode();\n\tconst mcpLog = createLogWrapper(log);\n\ttry {\n\t\tif (!tasksJsonPath) {\n\t\t\tlog.error('listTagsDirect called without tasksJsonPath');\n\t\t\tdisableSilentMode();\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'MISSING_ARGUMENT',\n\t\t\t\t\tmessage: 'tasksJsonPath is required'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\tlog.info('Listing all tags');\n\t\tconst options = {\n\t\t\tshowMetadata\n\t\t};\n\t\tconst result = await tags(\n\t\t\ttasksJsonPath,\n\t\t\toptions,\n\t\t\t{\n\t\t\t\tsession,\n\t\t\t\tmcpLog,\n\t\t\t\tprojectRoot\n\t\t\t},\n\t\t\t'json' \n\t\t);\n\t\tconst tagsSummary = result.tags.map((tag) => {\n\t\t\tconst tasks = tag.tasks || [];\n\t\t\tconst statusBreakdown = tasks.reduce((acc, task) => {\n\t\t\t\tconst status = task.status || 'pending';\n\t\t\t\tacc[status] = (acc[status] || 0) + 1;\n\t\t\t\treturn acc;\n\t\t\t}, {});\n\t\t\tconst subtaskCounts = tasks.reduce(\n\t\t\t\t(acc, task) => {\n\t\t\t\t\tif (task.subtasks && task.subtasks.length > 0) {\n\t\t\t\t\t\tacc.totalSubtasks += task.subtasks.length;\n\t\t\t\t\t\ttask.subtasks.forEach((subtask) => {\n\t\t\t\t\t\t\tconst subStatus = subtask.status || 'pending';\n\t\t\t\t\t\t\tacc.subtasksByStatus[subStatus] =\n\t\t\t\t\t\t\t\t(acc.subtasksByStatus[subStatus] || 0) + 1;\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t\treturn acc;\n\t\t\t\t},\n\t\t\t\t{ totalSubtasks: 0, subtasksByStatus: {} }\n\t\t\t);\n\t\t\treturn {\n\t\t\t\tname: tag.name,\n\t\t\t\tisCurrent: tag.isCurrent,\n\t\t\t\ttaskCount: tasks.length,\n\t\t\t\tcompletedTasks: tag.completedTasks,\n\t\t\t\tstatusBreakdown,\n\t\t\t\tsubtaskCounts,\n\t\t\t\tcreated: tag.created,\n\t\t\t\tdescription: tag.description\n\t\t\t};\n\t\t});\n\t\tdisableSilentMode();\n\t\treturn {\n\t\t\tsuccess: true,\n\t\t\tdata: {\n\t\t\t\ttags: tagsSummary,\n\t\t\t\tcurrentTag: result.currentTag,\n\t\t\t\ttotalTags: result.totalTags,\n\t\t\t\tmessage: `Found ${result.totalTags} tag(s)`\n\t\t\t}\n\t\t};\n\t} catch (error) {\n\t\tdisableSilentMode();\n\t\tlog.error(`Error in listTagsDirect: ${error.message}`);\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: error.code || 'LIST_TAGS_ERROR',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t}\n}", "predict_code_clean": "async ({ args, log, context }) => {\n\ttry {\n\t\tenableSilentMode();\n\t\tconst allTags = tags;\n\t\tif (!allTags || allTags.length === 0) {\n\t\t\treturn {\n\t\t\t\tsuccess: false,\n\t\t\t\terror: {\n\t\t\t\t\tcode: 'NO_TAGS_FOUND',\n\t\t\t\t\tmessage: 'No tags available in the system.'\n\t\t\t\t}\n\t\t\t};\n\t\t}\n\t\tconst resultData = args.showMetadata\n\t\t\t? allTags\n\t\t\t: allTags.map(tag => tag.name);\n\t\treturn { success: true, data: resultData };\n\t} catch (error) {\n\t\treturn {\n\t\t\tsuccess: false,\n\t\t\terror: {\n\t\t\t\tcode: 'LIST_TAGS_FAILED',\n\t\t\t\tmessage: error.message\n\t\t\t}\n\t\t};\n\t} finally {\n\t\tdisableSilentMode();\n\t}\n}"}}