| import { mockLogger } from '@n8n/backend-test-utils'; |
| import type { TestRun } from '@n8n/db'; |
| import type { TestCaseExecutionRepository } from '@n8n/db'; |
| import type { TestRunRepository } from '@n8n/db'; |
| import type { WorkflowRepository } from '@n8n/db'; |
| import { readFileSync } from 'fs'; |
| import type { Mock } from 'jest-mock'; |
| import { mock } from 'jest-mock-extended'; |
| import type { ErrorReporter } from 'n8n-core'; |
| import { EVALUATION_NODE_TYPE, EVALUATION_TRIGGER_NODE_TYPE } from 'n8n-workflow'; |
| import type { IWorkflowBase } from 'n8n-workflow'; |
| import type { IRun, ExecutionError } from 'n8n-workflow'; |
| import path from 'path'; |
|
|
| import type { ActiveExecutions } from '@/active-executions'; |
| import config from '@/config'; |
| import { TestRunError } from '@/evaluation.ee/test-runner/errors.ee'; |
| import { LoadNodesAndCredentials } from '@/load-nodes-and-credentials'; |
| import type { Telemetry } from '@/telemetry'; |
| import type { WorkflowRunner } from '@/workflow-runner'; |
| import { mockInstance } from '@test/mocking'; |
| import { mockNodeTypesData } from '@test-integration/utils/node-types-data'; |
|
|
| import { TestRunnerService } from '../test-runner.service.ee'; |
|
|
| const wfUnderTestJson = JSON.parse( |
| readFileSync(path.join(__dirname, './mock-data/workflow.under-test.json'), { encoding: 'utf-8' }), |
| ); |
|
|
| const errorReporter = mock<ErrorReporter>(); |
| const logger = mockLogger(); |
| const telemetry = mock<Telemetry>(); |
|
|
| describe('TestRunnerService', () => { |
| const workflowRepository = mock<WorkflowRepository>(); |
| const workflowRunner = mock<WorkflowRunner>(); |
| const activeExecutions = mock<ActiveExecutions>(); |
| const testRunRepository = mock<TestRunRepository>(); |
| const testCaseExecutionRepository = mock<TestCaseExecutionRepository>(); |
| let testRunnerService: TestRunnerService; |
|
|
| mockInstance(LoadNodesAndCredentials, { |
| loadedNodes: mockNodeTypesData(['manualTrigger', 'set', 'if', 'code', 'evaluation']), |
| }); |
|
|
| beforeEach(() => { |
| testRunnerService = new TestRunnerService( |
| logger, |
| telemetry, |
| workflowRepository, |
| workflowRunner, |
| activeExecutions, |
| testRunRepository, |
| testCaseExecutionRepository, |
| errorReporter, |
| ); |
|
|
| testRunRepository.createTestRun.mockResolvedValue(mock<TestRun>({ id: 'test-run-id' })); |
| }); |
|
|
| afterEach(() => { |
| jest.resetAllMocks(); |
| }); |
|
|
| describe('findEvaluationTriggerNode', () => { |
| test('should find the trigger node in a workflow', () => { |
| |
| const workflowWithTrigger = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Dataset Trigger', |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| { |
| id: 'node2', |
| name: 'Regular Node', |
| type: 'n8n-nodes-base.noOp', |
| typeVersion: 1, |
| position: [100, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| |
| const result = (testRunnerService as any).findEvaluationTriggerNode(workflowWithTrigger); |
|
|
| |
| expect(result).toBeDefined(); |
| expect(result.type).toBe(EVALUATION_TRIGGER_NODE_TYPE); |
| expect(result.name).toBe('Dataset Trigger'); |
| }); |
|
|
| test('should return undefined when no trigger node is found', () => { |
| |
| const workflowWithoutTrigger = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Regular Node 1', |
| type: 'n8n-nodes-base.noOp', |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| { |
| id: 'node2', |
| name: 'Regular Node 2', |
| type: 'n8n-nodes-base.set', |
| typeVersion: 1, |
| position: [100, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| |
| const result = (testRunnerService as any).findEvaluationTriggerNode(workflowWithoutTrigger); |
| expect(result).toBeUndefined(); |
| }); |
|
|
| test('should work with the actual workflow.under-test.json', () => { |
| const result = (testRunnerService as any).findEvaluationTriggerNode(wfUnderTestJson); |
|
|
| |
| expect(result).toBeDefined(); |
| expect(result.type).toBe(EVALUATION_TRIGGER_NODE_TYPE); |
| expect(result.name).toBe('When fetching a dataset row'); |
| }); |
| }); |
|
|
| describe('extractDatasetTriggerOutput', () => { |
| test('should extract trigger output data from execution', () => { |
| |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'triggerNodeId', |
| name: 'TriggerNode', |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| |
| const mockOutputItems = [ |
| { json: { id: 1, name: 'Test 1' } }, |
| { json: { id: 2, name: 'Test 2' } }, |
| ]; |
|
|
| const execution = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: { |
| TriggerNode: [ |
| { |
| data: { |
| main: [mockOutputItems], |
| }, |
| error: undefined, |
| }, |
| ], |
| }, |
| }, |
| }, |
| }); |
|
|
| |
| const result = (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
|
|
| |
| expect(result).toEqual(mockOutputItems); |
| }); |
|
|
| test('should throw an error if trigger node output is not present', () => { |
| |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'triggerNodeId', |
| name: 'TriggerNode', |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| |
| const execution = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: {}, |
| }, |
| }, |
| }); |
|
|
| |
| expect(() => { |
| (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
| }).toThrow(TestRunError); |
|
|
| |
| try { |
| (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('TEST_CASES_NOT_FOUND'); |
| } |
| }); |
|
|
| test('should throw an error if evaluation trigger could not fetch data', () => { |
| |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'triggerNodeId', |
| name: 'TriggerNode', |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| |
| const execution = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: { |
| TriggerNode: [ |
| { |
| error: mock<ExecutionError>(), |
| }, |
| ], |
| }, |
| }, |
| }, |
| }); |
|
|
| |
| expect(() => { |
| (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
| }).toThrow(TestRunError); |
|
|
| |
| try { |
| (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('CANT_FETCH_TEST_CASES'); |
| } |
| }); |
|
|
| test('should throw an error if trigger node output is empty list', () => { |
| |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'triggerNodeId', |
| name: 'TriggerNode', |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| |
| const execution = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: { |
| TriggerNode: [ |
| { |
| data: { |
| main: [[]], |
| }, |
| error: undefined, |
| }, |
| ], |
| }, |
| }, |
| }, |
| }); |
|
|
| |
| expect(() => { |
| (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
| }).toThrow(TestRunError); |
|
|
| |
| try { |
| (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('TEST_CASES_NOT_FOUND'); |
| } |
| }); |
|
|
| test('should work with actual execution data format', () => { |
| |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'triggerNodeId', |
| name: "When clicking 'Execute workflow'", |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| |
| const expectedItems = [ |
| { json: { query: 'First item' }, pairedItem: { item: 0 } }, |
| { json: { query: 'Second item' }, pairedItem: { item: 0 } }, |
| { json: { query: 'Third item' }, pairedItem: { item: 0 } }, |
| ]; |
|
|
| |
| const execution = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: { |
| "When clicking 'Execute workflow'": [ |
| { |
| data: { |
| main: [expectedItems], |
| }, |
| error: undefined, |
| }, |
| ], |
| }, |
| }, |
| }, |
| }); |
|
|
| |
| const result = (testRunnerService as any).extractDatasetTriggerOutput(execution, workflow); |
|
|
| |
| expect(result).toEqual(expectedItems); |
| }); |
| }); |
|
|
| describe('runDatasetTrigger', () => { |
| beforeEach(() => { |
| |
| const mockExecutionId = 'mock-execution-id'; |
| const mockExecutionData = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: {}, |
| }, |
| }, |
| }); |
|
|
| |
| workflowRunner.run.mockResolvedValue(mockExecutionId); |
|
|
| |
| activeExecutions.getPostExecutePromise.mockResolvedValue(mockExecutionData); |
| }); |
|
|
| test('should throw an error if trigger node is not found', async () => { |
| |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Regular Node', |
| type: 'n8n-nodes-base.noOp', |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| |
| await expect( |
| (testRunnerService as any).runDatasetTrigger(workflow, metadata), |
| ).rejects.toThrow(TestRunError); |
|
|
| |
| try { |
| await (testRunnerService as any).runDatasetTrigger(workflow, metadata); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('EVALUATION_TRIGGER_NOT_FOUND'); |
| } |
| }); |
|
|
| test('should call workflowRunner.run with correct data in normal execution mode', async () => { |
| |
| const triggerNodeName = 'Dataset Trigger'; |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: triggerNodeName, |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| settings: { |
| saveDataErrorExecution: 'all', |
| }, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| |
| await (testRunnerService as any).runDatasetTrigger(workflow, metadata); |
|
|
| |
| expect(workflowRunner.run).toHaveBeenCalledTimes(1); |
|
|
| |
| const runCallArg = workflowRunner.run.mock.calls[0][0]; |
|
|
| |
| expect(runCallArg).toHaveProperty('destinationNode', triggerNodeName); |
| expect(runCallArg).toHaveProperty('executionMode', 'manual'); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveManualExecutions', false); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveDataErrorExecution', 'none'); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveDataSuccessExecution', 'none'); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveExecutionProgress', false); |
| expect(runCallArg).toHaveProperty('userId', metadata.userId); |
| expect(runCallArg).toHaveProperty('partialExecutionVersion', 2); |
|
|
| expect(runCallArg).toHaveProperty('executionData.executionData.nodeExecutionStack'); |
| const nodeExecutionStack = runCallArg.executionData?.executionData?.nodeExecutionStack; |
| expect(nodeExecutionStack).toBeInstanceOf(Array); |
| expect(nodeExecutionStack).toHaveLength(1); |
| expect(nodeExecutionStack?.[0]).toHaveProperty('node.name', triggerNodeName); |
| expect(nodeExecutionStack?.[0]).toHaveProperty('node.forceCustomOperation', { |
| resource: 'dataset', |
| operation: 'getRows', |
| }); |
| expect(nodeExecutionStack?.[0]).toHaveProperty('data.main[0][0].json', {}); |
| expect(runCallArg).toHaveProperty('workflowData.nodes[0].forceCustomOperation', { |
| resource: 'dataset', |
| operation: 'getRows', |
| }); |
| }); |
|
|
| test('should call workflowRunner.run with correct data in queue execution mode and manual offload', async () => { |
| config.set('executions.mode', 'queue'); |
| process.env.OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS = 'true'; |
|
|
| |
| const triggerNodeName = 'Dataset Trigger'; |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: triggerNodeName, |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| settings: { |
| saveDataErrorExecution: 'all', |
| }, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| |
| await (testRunnerService as any).runDatasetTrigger(workflow, metadata); |
|
|
| |
| expect(workflowRunner.run).toHaveBeenCalledTimes(1); |
|
|
| |
| const runCallArg = workflowRunner.run.mock.calls[0][0]; |
|
|
| |
| expect(runCallArg).toHaveProperty('destinationNode', triggerNodeName); |
| expect(runCallArg).toHaveProperty('executionMode', 'manual'); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveManualExecutions', false); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveDataErrorExecution', 'none'); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveDataSuccessExecution', 'none'); |
| expect(runCallArg).toHaveProperty('workflowData.settings.saveExecutionProgress', false); |
| expect(runCallArg).toHaveProperty('userId', metadata.userId); |
| expect(runCallArg).toHaveProperty('partialExecutionVersion', 2); |
|
|
| expect(runCallArg).not.toHaveProperty('executionData.executionData'); |
| expect(runCallArg).not.toHaveProperty('executionData.executionData.nodeExecutionStack'); |
| expect(runCallArg).toHaveProperty('workflowData.nodes[0].forceCustomOperation', { |
| resource: 'dataset', |
| operation: 'getRows', |
| }); |
|
|
| |
| config.set('executions.mode', 'regular'); |
| delete process.env.OFFLOAD_MANUAL_EXECUTIONS_TO_WORKERS; |
| }); |
|
|
| test('should wait for execution to finish and return result', async () => { |
| |
| const triggerNodeName = 'Dataset Trigger'; |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: triggerNodeName, |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| |
| const mockExecutionId = 'dataset-execution-id'; |
| const mockExecutionResult = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: { |
| [triggerNodeName]: [ |
| { |
| data: { |
| main: [[{ json: { test: 'data1' } }, { json: { test: 'data2' } }]], |
| }, |
| }, |
| ], |
| }, |
| }, |
| }, |
| }); |
|
|
| workflowRunner.run.mockResolvedValue(mockExecutionId); |
| activeExecutions.getPostExecutePromise.mockResolvedValue(mockExecutionResult); |
|
|
| |
| const result = await (testRunnerService as any).runDatasetTrigger(workflow, metadata); |
|
|
| |
| expect(activeExecutions.getPostExecutePromise).toHaveBeenCalledWith(mockExecutionId); |
|
|
| |
| expect(result).toEqual(mockExecutionResult); |
| }); |
| }); |
|
|
| describe('runTestCase', () => { |
| beforeEach(() => { |
| |
| const mockExecutionId = 'mock-execution-id'; |
| const mockExecutionData = mock<IRun>({ |
| data: { |
| resultData: { |
| runData: {}, |
| }, |
| }, |
| }); |
|
|
| |
| workflowRunner.run.mockResolvedValue(mockExecutionId); |
|
|
| |
| activeExecutions.getPostExecutePromise.mockResolvedValue(mockExecutionData); |
| }); |
|
|
| test('should return undefined if abortSignal is aborted', async () => { |
| |
| const abortController = new AbortController(); |
| abortController.abort(); |
|
|
| |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [], |
| connections: {}, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| const testCase = { json: { id: 1, name: 'Test 1' } }; |
|
|
| |
| const result = await (testRunnerService as any).runTestCase( |
| workflow, |
| metadata, |
| testCase, |
| abortController.signal, |
| ); |
|
|
| |
| expect(result).toBeUndefined(); |
| expect(workflowRunner.run).not.toHaveBeenCalled(); |
| }); |
|
|
| test('should call workflowRunner.run with correct data', async () => { |
| |
| const triggerNodeName = 'TriggerNode'; |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: triggerNodeName, |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| const testCase = { json: { id: 1, name: 'Test 1' } }; |
| const abortController = new AbortController(); |
|
|
| |
| await (testRunnerService as any).runTestCase( |
| workflow, |
| metadata, |
| testCase, |
| abortController.signal, |
| ); |
|
|
| |
| expect(workflowRunner.run).toHaveBeenCalledTimes(1); |
|
|
| const runCallArg = workflowRunner.run.mock.calls[0][0]; |
|
|
| |
| expect(runCallArg).toEqual( |
| expect.objectContaining({ |
| executionMode: 'evaluation', |
| pinData: { |
| [triggerNodeName]: [testCase], |
| }, |
| workflowData: { |
| ...workflow, |
| settings: { |
| ...workflow.settings, |
| saveManualExecutions: true, |
| saveDataErrorExecution: 'all', |
| saveDataSuccessExecution: 'all', |
| saveExecutionProgress: false, |
| }, |
| }, |
| userId: metadata.userId, |
| partialExecutionVersion: 2, |
| triggerToStartFrom: { |
| name: triggerNodeName, |
| }, |
| }), |
| ); |
| }); |
|
|
| test('should register abort event listener and return execution results', async () => { |
| |
| const triggerNodeName = 'TriggerNode'; |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: triggerNodeName, |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| const testCase = { json: { id: 1, name: 'Test 1' } }; |
| const abortController = new AbortController(); |
|
|
| |
| const mockAddEventListener = jest.fn(); |
| const originalAddEventListener = abortController.signal.addEventListener; |
| abortController.signal.addEventListener = mockAddEventListener; |
|
|
| try { |
| |
| const result = await (testRunnerService as any).runTestCase( |
| workflow, |
| metadata, |
| testCase, |
| abortController.signal, |
| ); |
|
|
| |
| expect(mockAddEventListener).toHaveBeenCalledTimes(1); |
| expect(mockAddEventListener.mock.calls[0][0]).toBe('abort'); |
|
|
| |
| expect(result).toHaveProperty('executionData'); |
| expect(result.executionData?.data).toBeDefined(); |
| expect(result).toHaveProperty('executionId'); |
| expect(result.executionId).toEqual(expect.any(String)); |
| } finally { |
| |
| abortController.signal.addEventListener = originalAddEventListener; |
| } |
| }); |
|
|
| describe('runTestCase - Queue Mode', () => { |
| beforeEach(() => { |
| |
| jest.spyOn(config, 'getEnv').mockImplementation((key) => { |
| if (key === 'executions.mode') { |
| return 'queue'; |
| } |
| return undefined; |
| }); |
| }); |
|
|
| afterEach(() => { |
| (config.getEnv as unknown as Mock).mockRestore(); |
| }); |
|
|
| test('should call workflowRunner.run with correct data in queue mode', async () => { |
| |
| const triggerNodeName = 'TriggerNode'; |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: triggerNodeName, |
| type: EVALUATION_TRIGGER_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| forceCustomOperation: undefined, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| const metadata = { |
| testRunId: 'test-run-id', |
| userId: 'user-id', |
| }; |
|
|
| const testCase = { json: { id: 1, name: 'Test 1' } }; |
| const abortController = new AbortController(); |
|
|
| |
| await (testRunnerService as any).runTestCase( |
| workflow, |
| metadata, |
| testCase, |
| abortController.signal, |
| ); |
|
|
| |
| expect(workflowRunner.run).toHaveBeenCalledTimes(1); |
|
|
| const runCallArg = workflowRunner.run.mock.calls[0][0]; |
|
|
| |
| expect(runCallArg).toEqual( |
| expect.objectContaining({ |
| executionMode: 'evaluation', |
| pinData: { |
| [triggerNodeName]: [testCase], |
| }, |
| workflowData: { |
| ...workflow, |
| settings: { |
| ...workflow.settings, |
| saveManualExecutions: true, |
| saveDataErrorExecution: 'all', |
| saveDataSuccessExecution: 'all', |
| saveExecutionProgress: false, |
| }, |
| }, |
| userId: metadata.userId, |
| partialExecutionVersion: 2, |
| triggerToStartFrom: { |
| name: triggerNodeName, |
| }, |
| executionData: { |
| resultData: { |
| pinData: { |
| [triggerNodeName]: [testCase], |
| }, |
| runData: {}, |
| }, |
| manualData: { |
| userId: metadata.userId, |
| partialExecutionVersion: 2, |
| triggerToStartFrom: { |
| name: triggerNodeName, |
| }, |
| }, |
| }, |
| }), |
| ); |
| }); |
| }); |
| }); |
|
|
| describe('validateSetMetricsNodes', () => { |
| it('should pass when metrics nodes are properly configured', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Metrics', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setMetrics', |
| metrics: { |
| assignments: [ |
| { |
| id: '1', |
| name: 'accuracy', |
| value: 0.95, |
| }, |
| { |
| id: '2', |
| name: 'precision', |
| value: 0.87, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| }).not.toThrow(); |
| }); |
|
|
| it('should throw SET_METRICS_NODE_NOT_FOUND when no metrics nodes exist', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Regular Node', |
| type: 'n8n-nodes-base.noOp', |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_METRICS_NODE_NOT_FOUND'); |
| } |
| }); |
|
|
| it('should throw SET_METRICS_NODE_NOT_CONFIGURED when metrics node has no parameters', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Metrics', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setMetrics', |
| metrics: undefined, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Metrics' }); |
| } |
| }); |
|
|
| it('should throw SET_METRICS_NODE_NOT_CONFIGURED when metrics node has empty assignments', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Metrics', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setMetrics', |
| metrics: { |
| assignments: [], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Metrics' }); |
| } |
| }); |
|
|
| it('should throw SET_METRICS_NODE_NOT_CONFIGURED when assignment has no name', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Metrics', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setMetrics', |
| metrics: { |
| assignments: [ |
| { |
| id: '1', |
| name: '', |
| value: 0.95, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Metrics' }); |
| } |
| }); |
|
|
| it('should throw SET_METRICS_NODE_NOT_CONFIGURED when assignment has null value', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Metrics', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setMetrics', |
| metrics: { |
| assignments: [ |
| { |
| id: '1', |
| name: 'accuracy', |
| value: null, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_METRICS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Metrics' }); |
| } |
| }); |
|
|
| it('should validate multiple metrics nodes successfully', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Metrics 1', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setMetrics', |
| metrics: { |
| assignments: [ |
| { |
| id: '1', |
| name: 'accuracy', |
| value: 0.95, |
| }, |
| ], |
| }, |
| }, |
| }, |
| { |
| id: 'node2', |
| name: 'Set Metrics 2', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [100, 0], |
| parameters: { |
| operation: 'setMetrics', |
| metrics: { |
| assignments: [ |
| { |
| id: '2', |
| name: 'precision', |
| value: 0.87, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetMetricsNodes(workflow); |
| }).not.toThrow(); |
| }); |
| }); |
|
|
| describe('validateSetOutputsNodes', () => { |
| it('should pass when outputs nodes are properly configured', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Outputs', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setOutputs', |
| outputs: { |
| assignments: [ |
| { |
| id: '1', |
| name: 'result', |
| value: 'success', |
| }, |
| { |
| id: '2', |
| name: 'score', |
| value: 95, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).not.toThrow(); |
| }); |
|
|
| it('should pass when operation is default (undefined)', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Outputs', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: undefined, |
| outputs: { |
| assignments: [ |
| { |
| id: '1', |
| name: 'result', |
| value: 'success', |
| }, |
| { |
| id: '2', |
| name: 'score', |
| value: 95, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).not.toThrow(); |
| }); |
|
|
| it('should throw SET_OUTPUTS_NODE_NOT_FOUND when no outputs nodes exist', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Regular Node', |
| type: 'n8n-nodes-base.noOp', |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: {}, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_FOUND'); |
| } |
| }); |
|
|
| it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when outputs node has no parameters', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Outputs', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setOutputs', |
| outputs: undefined, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Outputs' }); |
| } |
| }); |
|
|
| it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when outputs node has empty assignments', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Outputs', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setOutputs', |
| outputs: { |
| assignments: [], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Outputs' }); |
| } |
| }); |
|
|
| it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when assignment has no name', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Outputs', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setOutputs', |
| outputs: { |
| assignments: [ |
| { |
| id: '1', |
| name: '', |
| value: 'result', |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Outputs' }); |
| } |
| }); |
|
|
| it('should throw SET_OUTPUTS_NODE_NOT_CONFIGURED when assignment has null value', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Outputs', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setOutputs', |
| outputs: { |
| assignments: [ |
| { |
| id: '1', |
| name: 'result', |
| value: null, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).toThrow(TestRunError); |
|
|
| try { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| } catch (error) { |
| expect(error).toBeInstanceOf(TestRunError); |
| expect(error.code).toBe('SET_OUTPUTS_NODE_NOT_CONFIGURED'); |
| expect(error.extra).toEqual({ node_name: 'Set Outputs' }); |
| } |
| }); |
|
|
| it('should validate multiple outputs nodes successfully', () => { |
| const workflow = mock<IWorkflowBase>({ |
| nodes: [ |
| { |
| id: 'node1', |
| name: 'Set Outputs 1', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [0, 0], |
| parameters: { |
| operation: 'setOutputs', |
| outputs: { |
| assignments: [ |
| { |
| id: '1', |
| name: 'result', |
| value: 'success', |
| }, |
| ], |
| }, |
| }, |
| }, |
| { |
| id: 'node2', |
| name: 'Set Outputs 2', |
| type: EVALUATION_NODE_TYPE, |
| typeVersion: 1, |
| position: [100, 0], |
| parameters: { |
| operation: 'setOutputs', |
| outputs: { |
| assignments: [ |
| { |
| id: '2', |
| name: 'score', |
| value: 95, |
| }, |
| ], |
| }, |
| }, |
| }, |
| ], |
| connections: {}, |
| }); |
|
|
| expect(() => { |
| (testRunnerService as any).validateSetOutputsNodes(workflow); |
| }).not.toThrow(); |
| }); |
| }); |
| }); |
|
|