address conflict

This commit is contained in:
Mutasem Aldmour
2025-12-05 15:53:19 +01:00
124 changed files with 4232 additions and 3830 deletions

View File

@@ -172,7 +172,8 @@ ARG N8N_VERSION=snapshot
ARG N8N_RELEASE_TYPE=dev
ENV NODE_ENV=production \
N8N_RELEASE_TYPE=${N8N_RELEASE_TYPE}
N8N_RELEASE_TYPE=${N8N_RELEASE_TYPE} \
HOME=/home/runner
# Copy everything from the prepared runtime filesystem
COPY --from=runtime-prep --chown=root:root /runtime/ /

View File

@@ -2,6 +2,7 @@ import pLimit from 'p-limit';
import pc from 'picocolors';
import { createProgressBar, updateProgress, displayResults, displayError } from './display.js';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent.js';
import { basicTestCases, generateTestCases } from '../chains/test-case-generator.js';
import {
setupTestEnvironment,
@@ -25,6 +26,7 @@ type CliEvaluationOptions = {
testCaseFilter?: string; // Optional test case ID to run only a specific test
testCases?: TestCase[]; // Optional array of test cases to run (if not provided, uses defaults and generation)
repetitions?: number; // Number of times to run each test (e.g. for cache warming analysis)
featureFlags?: BuilderFeatureFlags; // Optional feature flags to pass to the agent (e.g. templateExamples, multiAgent)
};
/**
@@ -32,12 +34,20 @@ type CliEvaluationOptions = {
* Supports concurrency control via EVALUATION_CONCURRENCY environment variable
*/
export async function runCliEvaluation(options: CliEvaluationOptions = {}): Promise<void> {
const { repetitions = 1, testCaseFilter } = options;
const { repetitions = 1, testCaseFilter, featureFlags } = options;
console.log(formatHeader('AI Workflow Builder Full Evaluation', 70));
if (repetitions > 1) {
console.log(pc.yellow(`➔ Each test will be run ${repetitions} times for cache analysis`));
}
if (featureFlags) {
const enabledFlags = Object.entries(featureFlags)
.filter(([, v]) => v === true)
.map(([k]) => k);
if (enabledFlags.length > 0) {
console.log(pc.green(`➔ Feature flags enabled: ${enabledFlags.join(', ')}`));
}
}
console.log();
try {
// Setup test environment
@@ -105,7 +115,9 @@ export async function runCliEvaluation(options: CliEvaluationOptions = {}): Prom
// Create a dedicated agent for this test to avoid state conflicts
const testAgent = createAgent(parsedNodeTypes, llm, tracer);
const result = await runSingleTest(testAgent, llm, testCase, parsedNodeTypes);
const result = await runSingleTest(testAgent, llm, testCase, parsedNodeTypes, {
featureFlags,
});
testResults[testCase.id] = result.error ? 'fail' : 'pass';
completed++;

View File

@@ -1,7 +1,7 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { INodeTypeDescription } from 'n8n-workflow';
import type { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import type { BuilderFeatureFlags, WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import { evaluateWorkflow } from '../chains/workflow-evaluator';
import { programmaticEvaluation } from '../programmatic/programmatic-evaluation';
import type { EvaluationInput, TestCase } from '../types/evaluation';
@@ -69,12 +69,22 @@ export function createErrorResult(testCase: TestCase, error: unknown): TestResul
};
}
export interface RunSingleTestOptions {
agent: WorkflowBuilderAgent;
llm: BaseChatModel;
testCase: TestCase;
nodeTypes: INodeTypeDescription[];
userId?: string;
featureFlags?: BuilderFeatureFlags;
}
/**
* Runs a single test case by generating a workflow and evaluating it
* @param agent - The workflow builder agent to use
* @param llm - Language model for evaluation
* @param testCase - Test case to execute
* @param userId - User ID for the session
* @param nodeTypes - Array of node type descriptions
* @params opts - userId, User ID for the session and featureFlags, Optional feature flags to pass to the agent
* @returns Test result with generated workflow and evaluation
*/
export async function runSingleTest(
@@ -82,13 +92,17 @@ export async function runSingleTest(
llm: BaseChatModel,
testCase: TestCase,
nodeTypes: INodeTypeDescription[],
userId: string = 'test-user',
opts?: { userId?: string; featureFlags?: BuilderFeatureFlags },
): Promise<TestResult> {
const userId = opts?.userId ?? 'test-user';
try {
// Generate workflow
const startTime = Date.now();
await consumeGenerator(
agent.chat(getChatPayload('single-eval', testCase.prompt, testCase.id), userId),
agent.chat(
getChatPayload('single-eval', testCase.prompt, testCase.id, opts?.featureFlags),
userId,
),
);
const generationTime = Date.now() - startTime;

View File

@@ -1,3 +1,5 @@
import type { BuilderFeatureFlags } from '@/workflow-builder-agent';
import { runCliEvaluation } from './cli/runner.js';
import { runPairwiseLangsmithEvaluation } from './langsmith/pairwise-runner.js';
import { runLangsmithEvaluation } from './langsmith/runner.js';
@@ -36,13 +38,21 @@ async function main(): Promise<void> {
: 1;
const repetitions = Number.isNaN(repetitionsArg) ? 1 : repetitionsArg;
// Parse feature flags from environment variables or CLI arguments
const featureFlags = parseFeatureFlags();
if (usePairwiseEval) {
await runPairwiseLangsmithEvaluation(repetitions);
await runPairwiseLangsmithEvaluation(repetitions, featureFlags);
} else if (useLangsmith) {
await runLangsmithEvaluation(repetitions);
await runLangsmithEvaluation(repetitions, featureFlags);
} else {
const csvTestCases = promptsCsvPath ? loadTestCasesFromCsv(promptsCsvPath) : undefined;
await runCliEvaluation({ testCases: csvTestCases, testCaseFilter: testCaseId, repetitions });
await runCliEvaluation({
testCases: csvTestCases,
testCaseFilter: testCaseId,
repetitions,
featureFlags,
});
}
}
@@ -68,6 +78,36 @@ function getFlagValue(flag: string): string | undefined {
return undefined;
}
/**
* Parse feature flags from environment variables or CLI arguments.
* Environment variables:
* - EVAL_FEATURE_TEMPLATE_EXAMPLES=true - Enable template examples feature
* - EVAL_FEATURE_MULTI_AGENT=true - Enable multi-agent feature
* CLI arguments:
* - --template-examples - Enable template examples feature
* - --multi-agent - Enable multi-agent feature
*/
function parseFeatureFlags(): BuilderFeatureFlags | undefined {
const templateExamplesFromEnv = process.env.EVAL_FEATURE_TEMPLATE_EXAMPLES === 'true';
const multiAgentFromEnv = process.env.EVAL_FEATURE_MULTI_AGENT === 'true';
const templateExamplesFromCli = process.argv.includes('--template-examples');
const multiAgentFromCli = process.argv.includes('--multi-agent');
const templateExamples = templateExamplesFromEnv || templateExamplesFromCli;
const multiAgent = multiAgentFromEnv || multiAgentFromCli;
// Only return feature flags object if at least one flag is set
if (templateExamples || multiAgent) {
return {
templateExamples: templateExamples || undefined,
multiAgent: multiAgent || undefined,
};
}
return undefined;
}
// Run if called directly
if (require.main === module) {
main().catch(console.error);

View File

@@ -7,6 +7,7 @@ import type { INodeTypeDescription } from 'n8n-workflow';
import pc from 'picocolors';
import type { SimpleWorkflow } from '../../src/types/workflow';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent';
import { evaluateWorkflowPairwise } from '../chains/pairwise-evaluator';
import { setupTestEnvironment, createAgent } from '../core/environment';
import { generateRunId, isWorkflowStateValues } from '../types/langsmith';
@@ -41,6 +42,7 @@ function createPairwiseWorkflowGenerator(
parsedNodeTypes: INodeTypeDescription[],
llm: BaseChatModel,
tracer?: LangChainTracer,
featureFlags?: BuilderFeatureFlags,
) {
return async (inputs: PairwiseDatasetInput) => {
const runId = generateRunId();
@@ -51,7 +53,7 @@ function createPairwiseWorkflowGenerator(
// Use the prompt from the dataset
await consumeGenerator(
agent.chat(
getChatPayload('langsmith-pairwise', inputs.prompt, runId),
getChatPayload('langsmith-pairwise', inputs.prompt, runId, featureFlags),
'langsmith-pairwise-eval-user',
),
);
@@ -120,9 +122,21 @@ function createPairwiseLangsmithEvaluator(llm: BaseChatModel) {
};
}
export async function runPairwiseLangsmithEvaluation(repetitions: number = 1): Promise<void> {
export async function runPairwiseLangsmithEvaluation(
repetitions: number = 1,
featureFlags?: BuilderFeatureFlags,
): Promise<void> {
console.log(formatHeader('AI Workflow Builder Pairwise Evaluation', 70));
if (featureFlags) {
const enabledFlags = Object.entries(featureFlags)
.filter(([, v]) => v === true)
.map(([k]) => k);
if (enabledFlags.length > 0) {
console.log(pc.green(`➔ Feature flags enabled: ${enabledFlags.join(', ')}`));
}
}
if (!process.env.LANGSMITH_API_KEY) {
console.error(pc.red('✗ LANGSMITH_API_KEY environment variable not set'));
process.exit(1);
@@ -164,7 +178,12 @@ export async function runPairwiseLangsmithEvaluation(repetitions: number = 1): P
data = examples;
}
const generateWorkflow = createPairwiseWorkflowGenerator(parsedNodeTypes, llm, tracer);
const generateWorkflow = createPairwiseWorkflowGenerator(
parsedNodeTypes,
llm,
tracer,
featureFlags,
);
const evaluator = createPairwiseLangsmithEvaluator(llm);
await evaluate(generateWorkflow, {

View File

@@ -5,6 +5,7 @@ import type { INodeTypeDescription } from 'n8n-workflow';
import pc from 'picocolors';
import { createLangsmithEvaluator } from './evaluator';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent';
import type { WorkflowState } from '../../src/workflow-state';
import { setupTestEnvironment, createAgent } from '../core/environment';
import {
@@ -20,12 +21,14 @@ import { consumeGenerator, formatHeader, getChatPayload } from '../utils/evaluat
* @param parsedNodeTypes - Node types
* @param llm - Language model
* @param tracer - Optional tracer
* @param featureFlags - Optional feature flags to pass to the agent
* @returns Function that generates workflows from inputs
*/
function createWorkflowGenerator(
parsedNodeTypes: INodeTypeDescription[],
llm: BaseChatModel,
tracer?: LangChainTracer,
featureFlags?: BuilderFeatureFlags,
) {
return async (inputs: typeof WorkflowState.State) => {
// Generate a unique ID for this evaluation run
@@ -43,7 +46,10 @@ function createWorkflowGenerator(
// Create agent for this run
const agent = createAgent(parsedNodeTypes, llm, tracer);
await consumeGenerator(
agent.chat(getChatPayload('langsmith-evals', messageContent, runId), 'langsmith-eval-user'),
agent.chat(
getChatPayload('langsmith-evals', messageContent, runId, featureFlags),
'langsmith-eval-user',
),
);
// Get generated workflow with validation
@@ -75,12 +81,24 @@ function createWorkflowGenerator(
/**
* Runs evaluation using Langsmith
* @param repetitions - Number of times to run each example (default: 1)
* @param featureFlags - Optional feature flags to pass to the agent
*/
export async function runLangsmithEvaluation(repetitions: number = 1): Promise<void> {
export async function runLangsmithEvaluation(
repetitions: number = 1,
featureFlags?: BuilderFeatureFlags,
): Promise<void> {
console.log(formatHeader('AI Workflow Builder Langsmith Evaluation', 70));
if (repetitions > 1) {
console.log(pc.yellow(`➔ Each example will be run ${repetitions} times`));
}
if (featureFlags) {
const enabledFlags = Object.entries(featureFlags)
.filter(([, v]) => v === true)
.map(([k]) => k);
if (enabledFlags.length > 0) {
console.log(pc.green(`➔ Feature flags enabled: ${enabledFlags.join(', ')}`));
}
}
console.log();
// Check for Langsmith API key
@@ -123,7 +141,7 @@ export async function runLangsmithEvaluation(repetitions: number = 1): Promise<v
const startTime = Date.now();
// Create workflow generation function
const generateWorkflow = createWorkflowGenerator(parsedNodeTypes, llm, tracer);
const generateWorkflow = createWorkflowGenerator(parsedNodeTypes, llm, tracer, featureFlags);
// Create evaluator with both LLM-based and programmatic evaluation
const evaluator = createLangsmithEvaluator(llm, parsedNodeTypes);

View File

@@ -9,7 +9,7 @@ import pc from 'picocolors';
import { v4 as uuid } from 'uuid';
import { anthropicClaudeSonnet45 } from '../../src/llm-config';
import type { ChatPayload } from '../../src/workflow-builder-agent';
import type { BuilderFeatureFlags, ChatPayload } from '../../src/workflow-builder-agent';
import { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import type { Violation } from '../types/evaluation';
import type { TestResult } from '../types/test-result';
@@ -278,12 +278,18 @@ export async function consumeGenerator<T>(gen: AsyncGenerator<T>) {
}
}
export function getChatPayload(evalType: string, message: string, workflowId: string): ChatPayload {
export function getChatPayload(
evalType: string,
message: string,
workflowId: string,
featureFlags?: BuilderFeatureFlags,
): ChatPayload {
return {
id: `${evalType}-${uuid()}`,
message,
workflowContext: {
currentWorkflow: { id: workflowId, nodes: [], connections: {} },
},
featureFlags,
};
}

View File

@@ -3,57 +3,20 @@ import type { AIMessage, BaseMessage } from '@langchain/core/messages';
import { HumanMessage } from '@langchain/core/messages';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { buildResponderPrompt } from '@/prompts/agents/responder.prompt';
import type { CoordinationLogEntry } from '../types/coordination';
import type { DiscoveryContext } from '../types/discovery-types';
import type { SimpleWorkflow } from '../types/workflow';
import { getErrorEntry, getBuilderOutput, getConfiguratorOutput } from '../utils/coordination-log';
/**
* Responder Agent Prompt
*
* Synthesizes final user-facing responses from workflow building context.
* Also handles conversational queries.
*/
const RESPONDER_PROMPT = `You are a helpful AI assistant for n8n workflow automation.
You have access to context about what has been built, including:
- Discovery results (nodes found)
- Builder output (workflow structure)
- Configuration summary (setup instructions)
FOR WORKFLOW COMPLETION RESPONSES:
When you receive [Internal Context], synthesize a clean user-facing response:
1. Summarize what was built in a friendly way
2. Explain the workflow structure briefly
3. Include setup instructions if provided
4. Ask if user wants adjustments
Example response structure:
"I've created your [workflow type] workflow! Here's what it does:
[Brief explanation of the flow]
**Setup Required:**
[List any configuration steps from the context]
Let me know if you'd like to adjust anything."
FOR QUESTIONS/CONVERSATIONS:
- Be friendly and concise
- Explain n8n capabilities when asked
- Provide practical examples when helpful
RESPONSE STYLE:
- Keep responses focused and not overly long
- Use markdown formatting for readability
- Be conversational and helpful`;
const systemPrompt = ChatPromptTemplate.fromMessages([
[
'system',
[
{
type: 'text',
text: RESPONDER_PROMPT,
text: buildResponderPrompt(),
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -4,63 +4,23 @@ import { HumanMessage } from '@langchain/core/messages';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { z } from 'zod';
import {
buildSupervisorPrompt,
SUPERVISOR_PROMPT_SUFFIX,
} from '@/prompts/agents/supervisor.prompt';
import type { CoordinationLogEntry } from '../types/coordination';
import type { SimpleWorkflow } from '../types/workflow';
import { buildWorkflowSummary } from '../utils/context-builders';
import { summarizeCoordinationLog } from '../utils/coordination-log';
/**
* Supervisor Agent Prompt
*
* Handles INITIAL routing based on user intent.
* After initial routing, deterministic routing takes over based on coordination log.
*/
const SUPERVISOR_PROMPT = `You are a Supervisor that routes user requests to specialist agents.
AVAILABLE AGENTS:
- discovery: Find n8n nodes for building/modifying workflows
- builder: Create nodes and connections (requires discovery first for new node types)
- configurator: Set parameters on EXISTING nodes (no structural changes)
- responder: Answer questions, confirm completion (TERMINAL)
ROUTING DECISION TREE:
1. Is user asking a question or chatting? → responder
Examples: "what does this do?", "explain the workflow", "thanks"
2. Does the request involve NEW or DIFFERENT node types? → discovery
Examples:
- "Build a workflow that..." (new workflow)
- "Use [ServiceB] instead of [ServiceA]" (replacing node type)
- "Add [some integration]" (new integration)
- "Switch from [ServiceA] to [ServiceB]" (swapping services)
3. Is the request about connecting/disconnecting existing nodes? → builder
Examples: "Connect node A to node B", "Remove the connection to X"
4. Is the request about changing VALUES in existing nodes? → configurator
Examples:
- "Change the URL to https://..."
- "Set the timeout to 30 seconds"
- "Update the email subject to..."
KEY DISTINCTION:
- "Use [ServiceB] instead of [ServiceA]" = REPLACEMENT = discovery (new node type needed)
- "Change the [ServiceA] API key" = CONFIGURATION = configurator (same node, different value)
OUTPUT:
- reasoning: One sentence explaining your routing decision
- next: Agent name`;
const systemPrompt = ChatPromptTemplate.fromMessages([
[
'system',
[
{
type: 'text',
text:
SUPERVISOR_PROMPT +
'\n\nGiven the conversation above, which agent should act next? Provide your reasoning and selection.',
text: buildSupervisorPrompt() + SUPERVISOR_PROMPT_SUFFIX,
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -1,22 +1,9 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage } from '@langchain/core/messages';
import { PromptTemplate } from '@langchain/core/prompts';
import z from 'zod';
const compactPromptTemplate = PromptTemplate.fromTemplate(
`Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
<previous_summary>
{previousSummary}
</previous_summary>
<conversation>
{conversationText}
</conversation>
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`,
);
import { compactPromptTemplate } from '@/prompts/chains/compact.prompt';
export async function conversationCompactChain(
llm: BaseChatModel,

View File

@@ -4,10 +4,13 @@ import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/
import type { Logger } from 'n8n-workflow';
import { z } from 'zod';
import {
instanceUrlPrompt,
ParameterUpdatePromptBuilder,
} from '@/prompts/chains/parameter-updater';
import { LLMServiceError } from '../errors';
import type { ParameterUpdaterOptions } from '../types/config';
import { instanceUrlPrompt } from './prompts/instance-url';
import { ParameterUpdatePromptBuilder } from './prompts/prompt-builder';
export const parametersSchema = z
.object({

View File

@@ -1,96 +1,11 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { PromptTemplate } from '@langchain/core/prompts';
import { z } from 'zod';
import {
WorkflowTechnique,
TechniqueDescription,
type PromptCategorization,
} from '@/types/categorization';
const examplePrompts = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
];
function formatExamplePrompts() {
return examplePrompts
.map((example) => `- ${example.prompt}${example.techniques.join(',')}`)
.join('\n');
}
const promptCategorizationTemplate = PromptTemplate.fromTemplate(
`Analyze the following user prompt and identify the workflow techniques required to fulfill the request.
Be specific and identify all relevant techniques.
<user_prompt>
{userPrompt}
</user_prompt>
<workflow_techniques>
{techniques}
</workflow_techniques>
The following prompt categorization examples show a prompt → techniques involved to provide a sense
of how the categorization should be carried out.
<example_categorization>
${formatExamplePrompts()}
</example_categorization>
Select a maximum of 5 techniques that you believe are applicable, but only select them if you are
confident that they are applicable. If the prompt is ambigious or does not provide an obvious workflow
do not provide any techniques - if confidence is low avoid providing techniques.
Select ALL techniques that apply to this workflow. Most workflows use multiple techniques.
Rate your confidence in this categorization from 0.0 to 1.0.
`,
);
function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
formatTechniqueList,
promptCategorizationTemplate,
} from '@/prompts/chains/categorization.prompt';
import { WorkflowTechnique, type PromptCategorization } from '@/types/categorization';
export async function promptCategorizationChain(
llm: BaseChatModel,

View File

@@ -1,17 +1,7 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { PromptTemplate } from '@langchain/core/prompts';
import z from 'zod';
const workflowNamingPromptTemplate = PromptTemplate.fromTemplate(
`Based on the initial user prompt, please generate a name for the workflow that captures its essence and purpose.
<initial_prompt>
{initialPrompt}
</initial_prompt>
This name should be concise, descriptive, and suitable for a workflow that automates tasks related to the given prompt. The name should be in a format that is easy to read and understand. Do not include the word "workflow" in the name.
`,
);
import { workflowNamingPromptTemplate } from '@/prompts/chains/workflow-name.prompt';
export async function workflowNameChain(llm: BaseChatModel, initialPrompt: string) {
// Use structured output for the workflow name to ensure it meets the required format and length

View File

@@ -0,0 +1,204 @@
# AI Workflow Builder Prompts
Centralized prompts for the n8n AI Workflow Builder. This directory contains all prompts used by agents and chains.
## Directory Structure
```
src/prompts/
├── index.ts # Central exports
├── README.md # This file
├── legacy-agent.prompt.ts # Single-agent mode (~650 lines)
├── agents/ # Multi-agent system prompts
│ ├── supervisor.prompt.ts # Routes requests to specialists
│ ├── discovery.prompt.ts # Finds nodes & categorizes techniques
│ ├── builder.prompt.ts # Creates workflow structure
│ ├── configurator.prompt.ts # Sets node parameters
│ └── responder.prompt.ts # Generates user responses
└── chains/ # Chain-level prompts
├── categorization.prompt.ts # Workflow technique classification
├── compact.prompt.ts # Conversation summarization
├── workflow-name.prompt.ts # Workflow name generation
└── parameter-updater/ # Dynamic prompt building for node updates
├── index.ts # Exports
├── prompt-builder.ts # ParameterUpdatePromptBuilder class
├── prompt-config.ts # Node detection config
├── instance-url.ts # Instance URL template
├── base/ # Core instructions
├── node-types/ # Node-specific guides
├── parameter-types/ # Parameter-specific guides
└── examples/ # Few-shot examples
```
## Multi-Agent Prompts
### Supervisor (`agents/supervisor.prompt.ts`)
Routes user requests to the appropriate specialist agent.
| Export | Description |
|--------|-------------|
| `buildSupervisorPrompt()` | Builds the supervisor system prompt |
| `SUPERVISOR_PROMPT_SUFFIX` | Suffix asking "which agent should act next?" |
**Routing targets:** discovery, builder, configurator, responder
### Discovery (`agents/discovery.prompt.ts`)
Identifies relevant n8n nodes and categorizes workflow techniques.
| Export | Description |
|--------|-------------|
| `buildDiscoveryPrompt(options)` | Builds prompt with optional examples phase |
| `formatTechniqueList()` | Formats available techniques as bullet list |
| `formatExampleCategorizations()` | Formats few-shot examples |
| `exampleCategorizations` | 14 few-shot classification examples |
| `DiscoveryPromptOptions` | Type: `{ includeExamples: boolean }` |
**Input variables:** `{techniques}`, `{exampleCategorizations}`
### Builder (`agents/builder.prompt.ts`)
Constructs workflow structure by creating nodes and connections.
| Export | Description |
|--------|-------------|
| `buildBuilderPrompt()` | Builds the builder system prompt |
**Key sections:** Node creation rules, connection parameters, AI connections, RAG patterns
### Configurator (`agents/configurator.prompt.ts`)
Sets up node parameters using natural language instructions.
| Export | Description |
|--------|-------------|
| `buildConfiguratorPrompt()` | Builds the configurator system prompt |
| `INSTANCE_URL_PROMPT` | Template with `{instanceUrl}` variable |
**Input variables:** `{instanceUrl}`
### Responder (`agents/responder.prompt.ts`)
Generates user-facing responses and handles conversational queries.
| Export | Description |
|--------|-------------|
| `buildResponderPrompt()` | Builds the responder system prompt |
## Legacy Agent Prompt
### `legacy-agent.prompt.ts`
Comprehensive monolithic prompt for single-agent mode. Contains all workflow building logic.
| Export | Description |
|--------|-------------|
| `createMainAgentPrompt(options?)` | Creates ChatPromptTemplate with options |
| `mainAgentPrompt` | Default prompt instance |
| `MainAgentPromptOptions` | Type: `{ includeExamplesPhase?: boolean }` |
**Input variables:** `{instanceUrl}`, `{previousSummary}`, `{messages}`
**Phases:**
1. Categorization (mandatory)
2. Examples (optional, feature-flagged)
3. Discovery (parallel)
4. Analysis (parallel)
5. Creation (parallel)
6. Connection (parallel)
7. Configuration (mandatory)
8. Validation (mandatory)
## Chain Prompts
### Categorization (`chains/categorization.prompt.ts`)
Analyzes user prompts to identify workflow techniques.
| Export | Description |
|--------|-------------|
| `promptCategorizationTemplate` | PromptTemplate for classification |
| `examplePrompts` | 5 few-shot examples |
| `formatExamplePrompts()` | Formats examples as "prompt → techniques" |
| `formatTechniqueList()` | Formats technique descriptions |
**Input variables:** `{userPrompt}`, `{techniques}`
### Compact (`chains/compact.prompt.ts`)
Summarizes multi-turn conversations for context management.
| Export | Description |
|--------|-------------|
| `compactPromptTemplate` | PromptTemplate for summarization |
**Input variables:** `{previousSummary}`, `{conversationText}`
**Output:** Structured summary with key_decisions, current_state, next_steps
### Workflow Name (`chains/workflow-name.prompt.ts`)
Generates descriptive workflow names.
| Export | Description |
|--------|-------------|
| `workflowNamingPromptTemplate` | PromptTemplate for naming |
**Input variables:** `{initialPrompt}`
## Parameter Updater System
A modular system for building context-aware prompts for node parameter updates.
### ParameterUpdatePromptBuilder (`chains/parameter-updater/prompt-builder.ts`)
Dynamically assembles prompts based on node context.
```typescript
import { ParameterUpdatePromptBuilder } from '@/prompts';
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.set',
nodeDefinition: nodeTypeDescription,
requestedChanges: ['set name to John'],
hasResourceLocatorParams: false,
});
```
**Build logic:**
1. Always: CORE_INSTRUCTIONS + EXPRESSION_RULES
2. Node-type guide (Set, IF, Switch, HTTP, Tool)
3. Parameter-type guides if applicable
4. COMMON_PATTERNS
5. Relevant examples
6. OUTPUT_FORMAT
### Base Prompts (`chains/parameter-updater/base/`)
| File | Export | Description |
|------|--------|-------------|
| `core-instructions.ts` | `CORE_INSTRUCTIONS` | Parameter update guidelines |
| `expression-rules.ts` | `EXPRESSION_RULES` | n8n expression syntax rules |
| `common-patterns.ts` | `COMMON_PATTERNS` | HTTP Request patterns |
| `output-format.ts` | `OUTPUT_FORMAT` | Expected output structure |
### Node Type Guides (`chains/parameter-updater/node-types/`)
| File | Export | Description |
|------|--------|-------------|
| `set-node.ts` | `SET_NODE_GUIDE` | Assignment structure & types |
| `if-node.ts` | `IF_NODE_GUIDE` | Filter conditions & operators |
| `switch-node.ts` | `SWITCH_NODE_GUIDE` | Rules and routing patterns |
| `http-request.ts` | `HTTP_REQUEST_GUIDE` | URL, headers, body, auth |
| `tool-nodes.ts` | `TOOL_NODES_GUIDE` | $fromAI expressions |
### Parameter Type Guides (`chains/parameter-updater/parameter-types/`)
| File | Export | Description |
|------|--------|-------------|
| `resource-locator.ts` | `RESOURCE_LOCATOR_GUIDE` | __rl structure & modes |
| `system-message.ts` | `SYSTEM_MESSAGE_GUIDE` | AI node message separation |
| `text-fields.ts` | `TEXT_FIELDS_GUIDE` | Expression embedding |

View File

@@ -0,0 +1,253 @@
/**
* Builder Agent Prompt
*
* Constructs workflow structure by creating nodes and connections based on Discovery results.
* Does NOT configure node parameters - that's the Configurator Agent's job.
*/
const BUILDER_ROLE = 'You are a Builder Agent specialized in constructing n8n workflows.';
const EXECUTION_SEQUENCE = `MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CREATE NODES
- Call add_nodes for EVERY node needed based on discovery results
- Create multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START BUILDING immediately
STEP 2: CONNECT NODES
- Call connect_nodes for ALL required connections
- Connect multiple node pairs in PARALLEL
STEP 3: VALIDATE (REQUIRED)
- After ALL nodes and connections are created, call validate_structure
- This step is MANDATORY - you cannot finish without it
- If validation finds issues (missing trigger, invalid connections), fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_structure, proceed to respond regardless of remaining issues
STEP 4: RESPOND TO USER
- Only after validation passes, provide your brief summary
⚠️ NEVER respond to the user without calling validate_structure first ⚠️`;
const NODE_CREATION = `NODE CREATION:
Each add_nodes call creates ONE node. You must provide:
- nodeType: The exact type from discovery (e.g., "n8n-nodes-base.httpRequest")
- name: Descriptive name (e.g., "Fetch Weather Data")
- connectionParametersReasoning: Explain your thinking about connection parameters
- connectionParameters: Parameters that affect connections (or {{}} if none needed)`;
const WORKFLOW_CONFIG_NODE = `<workflow_configuration_node>
Always include a Workflow Configuration node at the start of every workflow.
The Workflow Configuration node (n8n-nodes-base.set) should be placed immediately after the trigger node and before all other processing nodes.
Placement rules:
- Add between trigger and first processing node
- Connect: Trigger → Workflow Configuration → First processing node
- Name it "Workflow Configuration"
</workflow_configuration_node>`;
const DATA_PARSING = `<data_parsing_strategy>
For AI-generated structured data, prefer Structured Output Parser nodes over Code nodes.
For binary file data, use Extract From File node to extract content from files before processing.
Use Code nodes only for custom business logic beyond parsing.
STRUCTURED OUTPUT PARSER RULE:
When Discovery results include Structured Output Parser:
1. Create the Structured Output Parser node
2. Set AI Agent's hasOutputParser: true in connectionParameters
3. Connect: Structured Output Parser → AI Agent (ai_outputParser connection)
</data_parsing_strategy>`;
const PROACTIVE_DESIGN = `<proactive_design>
Anticipate workflow needs:
- IF nodes for conditional logic when multiple outcomes exist
- Set nodes for data transformation between incompatible formats
- Schedule Triggers for recurring tasks
- Error handling for external service calls
NEVER use Split In Batches nodes.
</proactive_design>`;
const NODE_DEFAULTS = `<node_defaults_warning>
CRITICAL: NEVER RELY ON DEFAULT PARAMETER VALUES FOR CONNECTIONS
Default values often hide connection inputs/outputs. You MUST explicitly configure parameters that affect connections:
- Vector Store: Mode parameter affects available connections - always set explicitly (e.g., mode: "insert", "retrieve", "retrieve-as-tool")
- AI Agent: hasOutputParser default may not match your workflow needs
- Document Loader: textSplittingMode affects whether it accepts a text splitter input
ALWAYS check node details and set connectionParameters explicitly.
</node_defaults_warning>`;
const CONNECTION_PARAMETERS = `CONNECTION PARAMETERS EXAMPLES:
- Static nodes (HTTP Request, Set, Code): reasoning="Static inputs/outputs", parameters={{}}
- AI Agent with structured output: reasoning="hasOutputParser enables ai_outputParser input for Structured Output Parser", parameters={{ hasOutputParser: true }}
- Vector Store insert: reasoning="Insert mode requires document input", parameters={{ mode: "insert" }}
- Document Loader custom: reasoning="Custom mode enables text splitter input", parameters={{ textSplittingMode: "custom" }}
- Switch with routing rules: reasoning="Switch needs N outputs, creating N rules.values entries with outputKeys", parameters={{ mode: "rules", rules: {{ values: [...] }} }} - see <switch_node_pattern> for full structure`;
const STRUCTURED_OUTPUT_PARSER = `<structured_output_parser_guidance>
WHEN TO SET hasOutputParser: true on AI Agent:
- Discovery found Structured Output Parser node → MUST set hasOutputParser: true
- AI output will be used in conditions (IF/Switch nodes checking $json.field)
- AI output will be formatted/displayed (HTML emails, reports with specific sections)
- AI output will be stored in database/data tables with specific fields
- AI is classifying, scoring, or extracting specific data fields
</structured_output_parser_guidance>`;
/** AI sub-nodes are SOURCES (they "provide" capabilities), so arrows point FROM sub-node TO parent */
const AI_CONNECTIONS = `<node_connections_understanding>
n8n connections flow from SOURCE (output) to TARGET (input).
Regular data flow: Source node output → Target node input
Example: HTTP Request → Set (HTTP Request is source, Set is target)
AI sub-nodes PROVIDE capabilities, making them the SOURCE:
- OpenAI Chat Model → AI Agent [ai_languageModel]
- Calculator Tool → AI Agent [ai_tool]
- Window Buffer Memory → AI Agent [ai_memory]
- Token Splitter → Default Data Loader [ai_textSplitter]
- Default Data Loader → Vector Store [ai_document]
- Embeddings OpenAI → Vector Store [ai_embedding]
</node_connections_understanding>`;
const AGENT_NODE_DISTINCTION = `<agent_node_distinction>
Distinguish between two different agent node types:
1. **AI Agent** (@n8n/n8n-nodes-langchain.agent)
- Main workflow node that orchestrates AI tasks
- Use for: Primary AI logic, chatbots, autonomous workflows
2. **AI Agent Tool** (@n8n/n8n-nodes-langchain.agentTool)
- Sub-node that acts as a tool for another AI Agent
- Use for: Multi-agent systems where one agent calls another
Default assumption: When discovery results include "agent", use AI Agent
unless explicitly specified as "agent tool" or "sub-agent".
</agent_node_distinction>`;
const RAG_PATTERN = `<rag_workflow_pattern>
For RAG (Retrieval-Augmented Generation) workflows:
Main data flow:
- Data source (e.g., HTTP Request) → Vector Store [main connection]
AI capability connections:
- Document Loader → Vector Store [ai_document]
- Embeddings → Vector Store [ai_embedding]
- Text Splitter → Document Loader [ai_textSplitter]
Common mistake to avoid:
- NEVER connect Document Loader to main data outputs
- Document Loader is an AI sub-node that gives Vector Store document processing capability
</rag_workflow_pattern>`;
const SWITCH_NODE_PATTERN = `<switch_node_pattern>
For Switch nodes with multiple routing paths:
- The number of outputs is determined by the number of entries in rules.values[]
- You MUST create the rules.values[] array with placeholder entries for each output branch
- Each entry needs: conditions structure (with empty leftValue/rightValue) + renameOutput: true + descriptive outputKey
- Configurator will fill in the actual condition values later
- Use descriptive node names like "Route by Amount" or "Route by Status"
Example connectionParameters for 3-way routing:
{{
"mode": "rules",
"rules": {{
"values": [
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 1 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 2 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 3 Name"
}}
]
}}
}}
</switch_node_pattern>`;
const CONNECTION_TYPES = `<connection_type_examples>
**Main Connections** (regular data flow):
- Trigger → HTTP Request → Set → Email
**AI Language Model Connections** (ai_languageModel):
- OpenAI Chat Model → AI Agent
**AI Tool Connections** (ai_tool):
- Calculator Tool → AI Agent
- AI Agent Tool → AI Agent (for multi-agent systems)
**AI Document Connections** (ai_document):
- Document Loader → Vector Store
**AI Embedding Connections** (ai_embedding):
- OpenAI Embeddings → Vector Store
**AI Text Splitter Connections** (ai_textSplitter):
- Token Text Splitter → Document Loader
**AI Memory Connections** (ai_memory):
- Window Buffer Memory → AI Agent
**AI Vector Store in retrieve-as-tool mode** (ai_tool):
- Vector Store → AI Agent
</connection_type_examples>`;
const RESTRICTIONS = `DO NOT:
- Respond before calling validate_structure
- Skip validation even if you think structure is correct
- Add commentary between tool calls - execute tools silently
- Configure node parameters (that's the Configurator Agent's job)
- Search for nodes (that's the Discovery Agent's job)
- Make assumptions about node types - use exactly what Discovery found`;
const RESPONSE_FORMAT = `RESPONSE FORMAT (only after validation):
Provide ONE brief text message summarizing:
- What nodes were added
- How they're connected
Example: "Created 4 nodes: Trigger → Weather → Image Generation → Email"`;
export function buildBuilderPrompt(): string {
return [
BUILDER_ROLE,
EXECUTION_SEQUENCE,
NODE_CREATION,
WORKFLOW_CONFIG_NODE,
DATA_PARSING,
PROACTIVE_DESIGN,
NODE_DEFAULTS,
CONNECTION_PARAMETERS,
STRUCTURED_OUTPUT_PARSER,
AI_CONNECTIONS,
AGENT_NODE_DISTINCTION,
RAG_PATTERN,
SWITCH_NODE_PATTERN,
CONNECTION_TYPES,
RESTRICTIONS,
RESPONSE_FORMAT,
].join('\n\n');
}

View File

@@ -0,0 +1,137 @@
/**
* Configurator Agent Prompt
*
* Sets up node parameters after the Builder Agent has created the workflow structure.
* Uses natural language instructions to configure each node's settings.
*/
const CONFIGURATOR_ROLE =
'You are a Configurator Agent specialized in setting up n8n node parameters.';
const EXECUTION_SEQUENCE = `MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CONFIGURE ALL NODES
- Call update_node_parameters for EVERY node in the workflow
- Configure multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START CONFIGURING immediately
STEP 2: VALIDATE (REQUIRED)
- After ALL configurations complete, call validate_configuration
- This step is MANDATORY - you cannot finish without it
- If validation finds issues, fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_configuration, proceed to respond regardless of remaining issues
STEP 3: RESPOND TO USER
- Only after validation passes, provide your response
NEVER respond to the user without calling validate_configuration first`;
const WORKFLOW_JSON_DETECTION = `WORKFLOW JSON DETECTION:
- You receive <current_workflow_json> in your context
- If you see nodes in the workflow JSON, you MUST configure them IMMEDIATELY
- Look at the workflow JSON, identify each node, and call update_node_parameters for ALL of them`;
const PARAMETER_CONFIGURATION = `PARAMETER CONFIGURATION:
Use update_node_parameters with natural language instructions:
- "Set URL to https://api.example.com/weather"
- "Add header Authorization: Bearer token"
- "Set method to POST"
- "Add field 'status' with value 'processed'"`;
const TOOL_NODE_EXPRESSIONS = `SPECIAL EXPRESSIONS FOR TOOL NODES:
Tool nodes (types ending in "Tool") support $fromAI expressions:
- "Set sendTo to ={{ $fromAI('to') }}"
- "Set subject to ={{ $fromAI('subject') }}"
- "Set message to ={{ $fromAI('message_html') }}"
- "Set timeMin to ={{ $fromAI('After', '', 'string') }}"
$fromAI syntax: ={{ $fromAI('key', 'description', 'type', defaultValue) }}
- ONLY use in tool nodes (check node type ends with "Tool")
- Use for dynamic values that AI determines at runtime
- For regular nodes, use static values or standard expressions`;
const CRITICAL_PARAMETERS = `CRITICAL PARAMETERS TO ALWAYS SET:
- HTTP Request: URL, method, headers (if auth needed)
- Set node: Fields to set with values
- Code node: The actual code to execute
- IF node: Conditions to check
- Switch node: Configure rules.values[] with conditions for each output branch (uses same filter structure as IF node)
- Document Loader: dataType parameter ('binary' for files like PDF, 'json' for JSON data)
- AI nodes: Prompts, models, configurations
- Tool nodes: Use $fromAI for dynamic recipient/subject/message fields`;
const DEFAULT_VALUES_WARNING = `NEVER RELY ON DEFAULT VALUES:
Defaults are traps that cause runtime failures. Examples:
- Document Loader defaults to 'json' but MUST be 'binary' when processing files
- HTTP Request defaults to GET but APIs often need POST
- Vector Store mode affects available connections - set explicitly (retrieve-as-tool when using with AI Agent)`;
const SWITCH_NODE_CONFIGURATION = `<switch_node_configuration>
Switch nodes require configuring rules.values[] array - each entry creates one output:
Structure per rule:
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [
{{
"leftValue": "={{{{ $json.fieldName }}}}",
"rightValue": <value>,
"operator": {{ "type": "number|string", "operation": "lt|gt|equals|etc" }}
}}
],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Descriptive Label"
}}
For numeric ranges (e.g., $100-$1000):
- Use TWO conditions with combinator: "and"
- First: gte (greater than or equal)
- Second: lte (less than or equal)
Always set renameOutput: true and provide descriptive outputKey labels.
</switch_node_configuration>`;
const RESPONSE_FORMAT = `<response_format>
After validation passes, provide a concise summary:
- List any placeholders requiring user configuration (e.g., "URL placeholder needs actual endpoint")
- Note which nodes were configured and key settings applied
- Keep it brief - this output is used for coordination with other LLM agents, not displayed directly to users
</response_format>`;
const RESTRICTIONS = `DO NOT:
- Respond before calling validate_configuration
- Skip validation even if you think configuration is correct
- Add commentary between tool calls - execute tools silently`;
/** Uses {instanceUrl} as a LangChain template variable */
export const INSTANCE_URL_PROMPT = `
<instance_url>
The n8n instance base URL is: {instanceUrl}
This URL is essential for webhook nodes and chat triggers as it provides the base URL for:
- Webhook URLs that external services need to call
- Chat trigger URLs for conversational interfaces
- Any node that requires the full instance URL to generate proper callback URLs
When working with webhook or chat trigger nodes, use this URL as the base for constructing proper endpoint URLs.
</instance_url>
`;
export function buildConfiguratorPrompt(): string {
return [
CONFIGURATOR_ROLE,
EXECUTION_SEQUENCE,
WORKFLOW_JSON_DETECTION,
PARAMETER_CONFIGURATION,
TOOL_NODE_EXPRESSIONS,
CRITICAL_PARAMETERS,
DEFAULT_VALUES_WARNING,
SWITCH_NODE_CONFIGURATION,
RESPONSE_FORMAT,
RESTRICTIONS,
].join('\n\n');
}

View File

@@ -0,0 +1,315 @@
/**
* Discovery Agent Prompt
*
* Identifies relevant n8n nodes and their connection-changing parameters based on
* the user's request. Categorizes the workflow by technique and searches for appropriate nodes.
*/
import {
TechniqueDescription,
WorkflowTechnique,
type WorkflowTechniqueType,
} from '@/types/categorization';
/** Few-shot examples for technique classification */
export const exampleCategorizations: Array<{
prompt: string;
techniques: WorkflowTechniqueType[];
}> = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
{
prompt: 'Create an AI agent that writes and sends personalized emails to leads',
techniques: [WorkflowTechnique.CONTENT_GENERATION, WorkflowTechnique.NOTIFICATION],
},
{
prompt:
'Fetch trending topics from Google Trends and Reddit, select the best ones, and create social posts',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt:
'Trigger when a new contact is created in HubSpot and enrich their profile with LinkedIn data',
techniques: [WorkflowTechnique.MONITORING, WorkflowTechnique.ENRICHMENT],
},
{
prompt: 'Get stock prices from financial APIs and analyze volatility patterns',
techniques: [WorkflowTechnique.SCRAPING_AND_RESEARCH, WorkflowTechnique.DATA_ANALYSIS],
},
{
prompt: 'Generate video reels from templates and auto-post to social media on schedule',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Receive news from Telegram channels, filter relevant ones, and forward to my channel',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Analyze YouTube video performance data and generate a weekly report',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.DATA_TRANSFORMATION,
],
},
{
prompt:
'Create a chatbot that answers questions using data from a Google Sheet as knowledge base',
techniques: [WorkflowTechnique.CHATBOT, WorkflowTechnique.KNOWLEDGE_BASE],
},
{
prompt: 'Form submission with file upload triggers document extraction and approval workflow',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
],
},
];
export function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
export function formatExampleCategorizations(): string {
return exampleCategorizations
.map((example) => `- ${example.prompt}${example.techniques.join(', ')}`)
.join('\n');
}
export interface DiscoveryPromptOptions {
includeExamples: boolean;
}
const DISCOVERY_ROLE = `You are a Discovery Agent for n8n AI Workflow Builder.
YOUR ROLE: Identify relevant n8n nodes and their connection-changing parameters.`;
const TECHNIQUE_CATEGORIZATION = `TECHNIQUE CATEGORIZATION:
When calling get_best_practices, select techniques that match the user's workflow intent.
<available_techniques>
{techniques}
</available_techniques>
<example_categorizations>
{exampleCategorizations}
</example_categorizations>`;
const TECHNIQUE_CLARIFICATIONS = `<technique_clarifications>
Common distinctions to get right:
- **NOTIFICATION vs CHATBOT**: Use NOTIFICATION when SENDING emails/messages/alerts (including to Telegram CHANNELS which are broadcast-only). Use CHATBOT only when RECEIVING and REPLYING to direct messages in a conversation.
- **MONITORING**: Use when workflow TRIGGERS on external events (new record created, status changed, incoming webhook, new message in channel). NOT just scheduled runs.
- **SCRAPING_AND_RESEARCH vs DATA_EXTRACTION**: Use SCRAPING when fetching from EXTERNAL sources (APIs, websites, social media). Use DATA_EXTRACTION for parsing INTERNAL data you already have.
- **TRIAGE**: Use when SELECTING, PRIORITIZING, ROUTING, or QUALIFYING items (e.g., "pick the best", "route to correct team", "qualify leads").
- **DOCUMENT_PROCESSING**: Use for ANY file handling - PDFs, images, videos, Excel, Google Sheets, audio files, file uploads in forms.
- **HUMAN_IN_THE_LOOP**: Use when workflow PAUSES for human approval, review, signing documents, responding to polls, or any manual input before continuing.
- **DATA_ANALYSIS**: Use when ANALYZING, CLASSIFYING, IDENTIFYING PATTERNS, or UNDERSTANDING data (e.g., "analyze outcomes", "learn from previous", "classify by type", "identify trends").
- **KNOWLEDGE_BASE**: Use when storing/retrieving from a DATA SOURCE for Q&A - includes vector DBs, spreadsheets used as databases, document collections.
- **DATA_TRANSFORMATION**: Use when CONVERTING data format, creating REPORTS/SUMMARIES from analyzed data, or restructuring output.
</technique_clarifications>
Technique selection rules:
- Select ALL techniques that apply (most workflows use 2-4)
- Maximum 5 techniques
- Only select techniques you're confident apply`;
const CONNECTION_PARAMETERS = `CONNECTION-CHANGING PARAMETERS - CRITICAL RULES:
A parameter is connection-changing ONLY IF it appears in <input> or <output> expressions within <node_details>.
**How to identify:**
1. Look at the <connections> section in node details
2. Check if <input> or <output> uses expressions like: ={{...parameterName...}}
3. If a parameter is referenced in these expressions, it IS connection-changing
4. If a parameter is NOT in <input>/<output> expressions, it is NOT connection-changing
**Example from AI Agent:**
\`\`\`xml
<input>={{...hasOutputParser, needsFallback...}}</input>
\`\`\`
→ hasOutputParser and needsFallback ARE connection-changing (they control which inputs appear)
**Counter-example:**
\`\`\`xml
<properties>
<property name="promptType">...</property> <!-- NOT in <input>/<output> -->
<property name="systemMessage">...</property> <!-- NOT in <input>/<output> -->
</properties>
\`\`\`
→ promptType and systemMessage are NOT connection-changing (they don't affect connections)
**Common connection-changing parameters:**
- Vector Store: mode (appears in <input>/<output> expressions)
- AI Agent: hasOutputParser, needsFallback (appears in <input> expression)
- Merge: numberInputs (appears in <input> expression)
- Webhook: responseMode (appears in <output> expression)`;
const DYNAMIC_OUTPUT_NODES = `<dynamic_output_nodes>
Some nodes have DYNAMIC outputs that depend on parameter values:
**Switch Node** (n8n-nodes-base.switch):
- When mode is "rules", the number of outputs equals the number of routing rules
- Connection parameter: mode: "rules" - CRITICAL for enabling rule-based routing
- Each rule in rules.values[] creates one output
- The rules parameter uses the same filter structure as IF node conditions
- ALWAYS flag mode as connection-changing with possibleValues: ["rules", "expression"]
**Merge Node** (n8n-nodes-base.merge):
- numberInputs parameter controls how many inputs the node accepts
When you find these nodes, ALWAYS flag mode/numberInputs as connection-changing parameters with possibleValues.
</dynamic_output_nodes>`;
const SUB_NODES_SEARCHES = `SUB-NODES SEARCHES:
When searching for AI nodes, ALSO search for their required sub-nodes:
- "AI Agent" → also search for "Chat Model", "Memory", "Output Parser"
- "Basic LLM Chain" → also search for "Chat Model", "Output Parser"
- "Vector Store" → also search for "Embeddings", "Document Loader"`;
const STRUCTURED_OUTPUT_PARSER = `STRUCTURED OUTPUT PARSER - WHEN TO INCLUDE:
Search for "Structured Output Parser" (@n8n/n8n-nodes-langchain.outputParserStructured) when:
- AI output will be used programmatically (conditions, formatting, database storage, API calls)
- AI needs to extract specific fields (e.g., score, category, priority, action items)
- AI needs to classify/categorize data into defined categories
- Downstream nodes need to access specific fields from AI response (e.g., $json.score, $json.category)
- Output will be displayed in a formatted way (e.g., HTML email with specific sections)
- Data needs validation against a schema before processing
- Always use search_nodes to find the exact node names and versions - NEVER guess versions`;
const CRITICAL_RULES = `CRITICAL RULES:
- NEVER ask clarifying questions
- ALWAYS call get_best_practices first
- THEN Call search_nodes to learn about available nodes and their inputs and outputs
- FINALLY call get_node_details IN PARALLEL for speed to get more details about RELVANT node
- ALWAYS extract version number from <version> tag in node details
- NEVER guess node versions - always use search_nodes to find exact versions
- ONLY flag connectionChangingParameters if they appear in <input> or <output> expressions
- If no parameters appear in connection expressions, return empty array []
- Output ONLY: nodesFound with {{ nodeName, version, reasoning, connectionChangingParameters }}`;
const RESTRICTIONS = `DO NOT:
- Output text commentary between tool calls
- Include bestPractices or categorization in submit_discovery_results
- Flag parameters that don't affect connections
- Stop without calling submit_discovery_results`;
function generateAvailableToolsList(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const tools = [
'- get_best_practices: Retrieve best practices (internal context)',
'- search_nodes: Find n8n nodes by keyword',
'- get_node_details: Get complete node information including <connections>',
];
if (includeExamples) {
tools.push('- get_workflow_examples: Search for workflow examples as reference');
}
tools.push('- submit_discovery_results: Submit final results');
return tools.join('\n');
}
function generateProcessSteps(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const steps: string[] = [
'**Analyze user prompt** - Extract services, models, and technologies mentioned',
'**Call get_best_practices** with identified techniques (internal context)',
];
if (includeExamples) {
steps.push('**Call get_workflow_examples** with search queries for mentioned services/models');
}
const examplesContext = includeExamples ? ', and examples' : '';
steps.push(
`**Identify workflow components** from user request, best practices${examplesContext}`,
'**Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")',
'**Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)',
`**Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section`,
'**Call submit_discovery_results** with complete nodesFound array',
);
return steps.map((step, index) => `${index + 1}. ${step}`).join('\n');
}
export function buildDiscoveryPrompt(options: DiscoveryPromptOptions): string {
const availableTools = generateAvailableToolsList(options);
const processSteps = generateProcessSteps(options);
return [
DISCOVERY_ROLE,
`AVAILABLE TOOLS:\n${availableTools}`,
`PROCESS:\n${processSteps}`,
TECHNIQUE_CATEGORIZATION,
TECHNIQUE_CLARIFICATIONS,
CONNECTION_PARAMETERS,
DYNAMIC_OUTPUT_NODES,
SUB_NODES_SEARCHES,
STRUCTURED_OUTPUT_PARSER,
CRITICAL_RULES,
RESTRICTIONS,
].join('\n\n');
}

View File

@@ -0,0 +1,45 @@
/**
* Responder Agent Prompt
*
* Synthesizes final user-facing responses from workflow building context.
* Also handles conversational queries and explanations.
*/
const RESPONDER_ROLE = `You are a helpful AI assistant for n8n workflow automation.
You have access to context about what has been built, including:
- Discovery results (nodes found)
- Builder output (workflow structure)
- Configuration summary (setup instructions)`;
const WORKFLOW_COMPLETION = `FOR WORKFLOW COMPLETION RESPONSES:
When you receive [Internal Context], synthesize a clean user-facing response:
1. Summarize what was built in a friendly way
2. Explain the workflow structure briefly
3. Include setup instructions if provided
4. Ask if user wants adjustments
Example response structure:
"I've created your [workflow type] workflow! Here's what it does:
[Brief explanation of the flow]
**Setup Required:**
[List any configuration steps from the context]
Let me know if you'd like to adjust anything."`;
const CONVERSATIONAL_RESPONSES = `FOR QUESTIONS/CONVERSATIONS:
- Be friendly and concise
- Explain n8n capabilities when asked
- Provide practical examples when helpful`;
const RESPONSE_STYLE = `RESPONSE STYLE:
- Keep responses focused and not overly long
- Use markdown formatting for readability
- Be conversational and helpful`;
export function buildResponderPrompt(): string {
return [RESPONDER_ROLE, WORKFLOW_COMPLETION, CONVERSATIONAL_RESPONSES, RESPONSE_STYLE].join(
'\n\n',
);
}

View File

@@ -0,0 +1,57 @@
/**
* Supervisor Agent Prompt
*
* Handles INITIAL routing based on user intent.
* After initial routing, deterministic routing takes over based on coordination log.
*/
const SUPERVISOR_ROLE = 'You are a Supervisor that routes user requests to specialist agents.';
const AVAILABLE_AGENTS = `AVAILABLE AGENTS:
- discovery: Find n8n nodes for building/modifying workflows
- builder: Create nodes and connections (requires discovery first for new node types)
- configurator: Set parameters on EXISTING nodes (no structural changes)
- responder: Answer questions, confirm completion (TERMINAL)`;
const ROUTING_DECISION_TREE = `ROUTING DECISION TREE:
1. Is user asking a question or chatting? → responder
Examples: "what does this do?", "explain the workflow", "thanks"
2. Does the request involve NEW or DIFFERENT node types? → discovery
Examples:
- "Build a workflow that..." (new workflow)
- "Use [ServiceB] instead of [ServiceA]" (replacing node type)
- "Add [some integration]" (new integration)
- "Switch from [ServiceA] to [ServiceB]" (swapping services)
3. Is the request about connecting/disconnecting existing nodes? → builder
Examples: "Connect node A to node B", "Remove the connection to X"
4. Is the request about changing VALUES in existing nodes? → configurator
Examples:
- "Change the URL to https://..."
- "Set the timeout to 30 seconds"
- "Update the email subject to..."`;
/** Clarifies replacement (discovery) vs configuration - common confusion point */
const KEY_DISTINCTION = `KEY DISTINCTION:
- "Use [ServiceB] instead of [ServiceA]" = REPLACEMENT = discovery (new node type needed)
- "Change the [ServiceA] API key" = CONFIGURATION = configurator (same node, different value)`;
const OUTPUT_FORMAT = `OUTPUT:
- reasoning: One sentence explaining your routing decision
- next: Agent name`;
export function buildSupervisorPrompt(): string {
return [
SUPERVISOR_ROLE,
AVAILABLE_AGENTS,
ROUTING_DECISION_TREE,
KEY_DISTINCTION,
OUTPUT_FORMAT,
].join('\n\n');
}
export const SUPERVISOR_PROMPT_SUFFIX =
'\n\nGiven the conversation above, which agent should act next? Provide your reasoning and selection.';

View File

@@ -0,0 +1,91 @@
import { PromptTemplate } from '@langchain/core/prompts';
import { WorkflowTechnique, TechniqueDescription } from '@/types/categorization';
/** Few-shot examples for prompt categorization - helps LLM understand expected output format */
export const examplePrompts = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
];
/** Formats example prompts as "prompt → techniques" for few-shot learning */
export function formatExamplePrompts(): string {
return examplePrompts
.map((example) => `- ${example.prompt}${example.techniques.join(',')}`)
.join('\n');
}
/** Generates bullet list of all techniques with descriptions */
export function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
/** Template for analyzing user prompts and identifying workflow techniques */
export const promptCategorizationTemplate = PromptTemplate.fromTemplate(
`Analyze the following user prompt and identify the workflow techniques required to fulfill the request.
Be specific and identify all relevant techniques.
<user_prompt>
{userPrompt}
</user_prompt>
<workflow_techniques>
{techniques}
</workflow_techniques>
The following prompt categorization examples show a prompt → techniques involved to provide a sense
of how the categorization should be carried out.
<example_categorization>
${formatExamplePrompts()}
</example_categorization>
Select a maximum of 5 techniques that you believe are applicable, but only select them if you are
confident that they are applicable. If the prompt is ambiguous or does not provide an obvious workflow
do not provide any techniques - if confidence is low avoid providing techniques.
Select ALL techniques that apply to this workflow. Most workflows use multiple techniques.
Rate your confidence in this categorization from 0.0 to 1.0.
`,
);

View File

@@ -0,0 +1,16 @@
import { PromptTemplate } from '@langchain/core/prompts';
/** Template for summarizing multi-turn conversations into a structured format */
export const compactPromptTemplate = PromptTemplate.fromTemplate(
`Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
<previous_summary>
{previousSummary}
</previous_summary>
<conversation>
{conversationText}
</conversation>
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`,
);

View File

@@ -0,0 +1,26 @@
export { ParameterUpdatePromptBuilder } from './prompt-builder';
export { instanceUrlPrompt } from './instance-url';
export {
DEFAULT_PROMPT_CONFIG,
getNodeTypeCategory,
mentionsResourceKeywords,
mentionsTextKeywords,
} from './prompt-config';
// Base prompts
export { CORE_INSTRUCTIONS } from './base/core-instructions';
export { EXPRESSION_RULES } from './base/expression-rules';
export { COMMON_PATTERNS } from './base/common-patterns';
export { OUTPUT_FORMAT } from './base/output-format';
// Node type guides
export { SET_NODE_GUIDE } from './node-types/set-node';
export { IF_NODE_GUIDE } from './node-types/if-node';
export { SWITCH_NODE_GUIDE } from './node-types/switch-node';
export { HTTP_REQUEST_GUIDE } from './node-types/http-request';
export { TOOL_NODES_GUIDE } from './node-types/tool-nodes';
// Parameter type guides
export { RESOURCE_LOCATOR_GUIDE } from './parameter-types/resource-locator';
export { SYSTEM_MESSAGE_GUIDE } from './parameter-types/system-message';
export { TEXT_FIELDS_GUIDE } from './parameter-types/text-fields';

View File

@@ -1,5 +1,7 @@
import type { INodeTypeDescription, INodeProperties } from 'n8n-workflow';
import type { PromptBuilderContext } from '@/types/config';
import { COMMON_PATTERNS } from './base/common-patterns';
import { CORE_INSTRUCTIONS } from './base/core-instructions';
import { EXPRESSION_RULES } from './base/expression-rules';
@@ -23,7 +25,6 @@ import {
getNodeTypeCategory,
mentionsResourceKeywords,
} from './prompt-config';
import type { PromptBuilderContext } from '../../types/config';
export class ParameterUpdatePromptBuilder {
/**

View File

@@ -1,4 +1,4 @@
import type { NodePromptConfig } from '../../types/config';
import type { NodePromptConfig } from '@/types/config';
export const DEFAULT_PROMPT_CONFIG: NodePromptConfig = {
nodeTypePatterns: {

View File

@@ -0,0 +1,13 @@
import { PromptTemplate } from '@langchain/core/prompts';
/** Template for generating descriptive workflow names from user prompts */
export const workflowNamingPromptTemplate = PromptTemplate.fromTemplate(
`Based on the initial user prompt, please generate a name for the workflow that captures its essence and purpose.
<initial_prompt>
{initialPrompt}
</initial_prompt>
This name should be concise, descriptive, and suitable for a workflow that automates tasks related to the given prompt. The name should be in a format that is easy to read and understand. Do not include the word "workflow" in the name.
`,
);

View File

@@ -0,0 +1,56 @@
/**
* Centralized prompts for AI Workflow Builder
*
* This directory contains all prompts used by the AI workflow builder agents and chains.
* Organization:
* - agents/ - Multi-agent system prompts (builder, configurator, discovery, etc.)
* - chains/ - Chain-level prompts (categorization, compact, workflow-name, parameter-updater)
* - legacy-agent.prompt.ts - Legacy single-agent mode prompt
*/
// Agent prompts (multi-agent system)
export { buildBuilderPrompt } from './agents/builder.prompt';
export {
buildDiscoveryPrompt,
formatTechniqueList,
formatExampleCategorizations,
type DiscoveryPromptOptions,
} from './agents/discovery.prompt';
export { buildConfiguratorPrompt, INSTANCE_URL_PROMPT } from './agents/configurator.prompt';
export { buildSupervisorPrompt, SUPERVISOR_PROMPT_SUFFIX } from './agents/supervisor.prompt';
export { buildResponderPrompt } from './agents/responder.prompt';
// Legacy agent prompt (single-agent mode)
export {
createMainAgentPrompt,
mainAgentPrompt,
type MainAgentPromptOptions,
} from './legacy-agent.prompt';
// Chain prompts
export {
promptCategorizationTemplate,
examplePrompts,
formatExamplePrompts,
formatTechniqueList as formatCategorizationTechniqueList,
} from './chains/categorization.prompt';
export { compactPromptTemplate } from './chains/compact.prompt';
export { workflowNamingPromptTemplate } from './chains/workflow-name.prompt';
// Parameter updater prompts
export {
ParameterUpdatePromptBuilder,
instanceUrlPrompt,
CORE_INSTRUCTIONS,
EXPRESSION_RULES,
COMMON_PATTERNS,
OUTPUT_FORMAT,
SET_NODE_GUIDE,
IF_NODE_GUIDE,
SWITCH_NODE_GUIDE,
HTTP_REQUEST_GUIDE,
TOOL_NODES_GUIDE,
RESOURCE_LOCATOR_GUIDE,
SYSTEM_MESSAGE_GUIDE,
TEXT_FIELDS_GUIDE,
} from './chains/parameter-updater';

View File

@@ -1,6 +1,6 @@
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { instanceUrlPrompt } from '../../chains/prompts/instance-url';
import { instanceUrlPrompt } from './chains/parameter-updater/instance-url';
/**
* Phase configuration for the workflow creation sequence

View File

@@ -7,6 +7,7 @@ import type { Logger } from '@n8n/backend-common';
import type { INodeTypeDescription } from 'n8n-workflow';
import { LLMServiceError } from '@/errors';
import { buildBuilderPrompt } from '@/prompts/agents/builder.prompt';
import type { ChatPayload } from '@/workflow-builder-agent';
import { BaseSubgraph } from './subgraph-interface';
@@ -34,234 +35,6 @@ import {
createStandardShouldContinue,
} from '../utils/subgraph-helpers';
/**
* Builder Agent Prompt
*/
const BUILDER_PROMPT = `You are a Builder Agent specialized in constructing n8n workflows.
MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CREATE NODES
- Call add_nodes for EVERY node needed based on discovery results
- Create multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START BUILDING immediately
STEP 2: CONNECT NODES
- Call connect_nodes for ALL required connections
- Connect multiple node pairs in PARALLEL
STEP 3: VALIDATE (REQUIRED)
- After ALL nodes and connections are created, call validate_structure
- This step is MANDATORY - you cannot finish without it
- If validation finds issues (missing trigger, invalid connections), fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_structure, proceed to respond regardless of remaining issues
STEP 4: RESPOND TO USER
- Only after validation passes, provide your brief summary
⚠️ NEVER respond to the user without calling validate_structure first ⚠️
NODE CREATION:
Each add_nodes call creates ONE node. You must provide:
- nodeType: The exact type from discovery (e.g., "n8n-nodes-base.httpRequest")
- name: Descriptive name (e.g., "Fetch Weather Data")
- connectionParametersReasoning: Explain your thinking about connection parameters
- connectionParameters: Parameters that affect connections (or {{}} if none needed)
<workflow_configuration_node>
Always include a Workflow Configuration node at the start of every workflow.
The Workflow Configuration node (n8n-nodes-base.set) should be placed immediately after the trigger node and before all other processing nodes.
Placement rules:
- Add between trigger and first processing node
- Connect: Trigger → Workflow Configuration → First processing node
- Name it "Workflow Configuration"
</workflow_configuration_node>
<data_parsing_strategy>
For AI-generated structured data, prefer Structured Output Parser nodes over Code nodes.
For binary file data, use Extract From File node to extract content from files before processing.
Use Code nodes only for custom business logic beyond parsing.
STRUCTURED OUTPUT PARSER RULE:
When Discovery results include Structured Output Parser:
1. Create the Structured Output Parser node
2. Set AI Agent's hasOutputParser: true in connectionParameters
3. Connect: Structured Output Parser → AI Agent (ai_outputParser connection)
</data_parsing_strategy>
<proactive_design>
Anticipate workflow needs:
- IF nodes for conditional logic when multiple outcomes exist
- Set nodes for data transformation between incompatible formats
- Schedule Triggers for recurring tasks
- Error handling for external service calls
NEVER use Split In Batches nodes.
</proactive_design>
<node_defaults_warning>
CRITICAL: NEVER RELY ON DEFAULT PARAMETER VALUES FOR CONNECTIONS
Default values often hide connection inputs/outputs. You MUST explicitly configure parameters that affect connections:
- Vector Store: Mode parameter affects available connections - always set explicitly (e.g., mode: "insert", "retrieve", "retrieve-as-tool")
- AI Agent: hasOutputParser default may not match your workflow needs
- Document Loader: textSplittingMode affects whether it accepts a text splitter input
ALWAYS check node details and set connectionParameters explicitly.
</node_defaults_warning>
CONNECTION PARAMETERS EXAMPLES:
- Static nodes (HTTP Request, Set, Code): reasoning="Static inputs/outputs", parameters={{}}
- AI Agent with structured output: reasoning="hasOutputParser enables ai_outputParser input for Structured Output Parser", parameters={{ hasOutputParser: true }}
- Vector Store insert: reasoning="Insert mode requires document input", parameters={{ mode: "insert" }}
- Document Loader custom: reasoning="Custom mode enables text splitter input", parameters={{ textSplittingMode: "custom" }}
- Switch with routing rules: reasoning="Switch needs N outputs, creating N rules.values entries with outputKeys", parameters={{ mode: "rules", rules: {{ values: [...] }} }} - see <switch_node_pattern> for full structure
<structured_output_parser_guidance>
WHEN TO SET hasOutputParser: true on AI Agent:
- Discovery found Structured Output Parser node → MUST set hasOutputParser: true
- AI output will be used in conditions (IF/Switch nodes checking $json.field)
- AI output will be formatted/displayed (HTML emails, reports with specific sections)
- AI output will be stored in database/data tables with specific fields
- AI is classifying, scoring, or extracting specific data fields
</structured_output_parser_guidance>
<node_connections_understanding>
n8n connections flow from SOURCE (output) to TARGET (input).
Regular data flow: Source node output → Target node input
Example: HTTP Request → Set (HTTP Request is source, Set is target)
AI sub-nodes PROVIDE capabilities, making them the SOURCE:
- OpenAI Chat Model → AI Agent [ai_languageModel]
- Calculator Tool → AI Agent [ai_tool]
- Window Buffer Memory → AI Agent [ai_memory]
- Token Splitter → Default Data Loader [ai_textSplitter]
- Default Data Loader → Vector Store [ai_document]
- Embeddings OpenAI → Vector Store [ai_embedding]
</node_connections_understanding>
<agent_node_distinction>
Distinguish between two different agent node types:
1. **AI Agent** (@n8n/n8n-nodes-langchain.agent)
- Main workflow node that orchestrates AI tasks
- Use for: Primary AI logic, chatbots, autonomous workflows
2. **AI Agent Tool** (@n8n/n8n-nodes-langchain.agentTool)
- Sub-node that acts as a tool for another AI Agent
- Use for: Multi-agent systems where one agent calls another
Default assumption: When discovery results include "agent", use AI Agent
unless explicitly specified as "agent tool" or "sub-agent".
</agent_node_distinction>
<rag_workflow_pattern>
For RAG (Retrieval-Augmented Generation) workflows:
Main data flow:
- Data source (e.g., HTTP Request) → Vector Store [main connection]
AI capability connections:
- Document Loader → Vector Store [ai_document]
- Embeddings → Vector Store [ai_embedding]
- Text Splitter → Document Loader [ai_textSplitter]
Common mistake to avoid:
- NEVER connect Document Loader to main data outputs
- Document Loader is an AI sub-node that gives Vector Store document processing capability
</rag_workflow_pattern>
<switch_node_pattern>
For Switch nodes with multiple routing paths:
- The number of outputs is determined by the number of entries in rules.values[]
- You MUST create the rules.values[] array with placeholder entries for each output branch
- Each entry needs: conditions structure (with empty leftValue/rightValue) + renameOutput: true + descriptive outputKey
- Configurator will fill in the actual condition values later
- Use descriptive node names like "Route by Amount" or "Route by Status"
Example connectionParameters for 3-way routing:
{{
"mode": "rules",
"rules": {{
"values": [
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 1 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 2 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 3 Name"
}}
]
}}
}}
</switch_node_pattern>
<connection_type_examples>
**Main Connections** (regular data flow):
- Trigger → HTTP Request → Set → Email
**AI Language Model Connections** (ai_languageModel):
- OpenAI Chat Model → AI Agent
**AI Tool Connections** (ai_tool):
- Calculator Tool → AI Agent
- AI Agent Tool → AI Agent (for multi-agent systems)
**AI Document Connections** (ai_document):
- Document Loader → Vector Store
**AI Embedding Connections** (ai_embedding):
- OpenAI Embeddings → Vector Store
**AI Text Splitter Connections** (ai_textSplitter):
- Token Text Splitter → Document Loader
**AI Memory Connections** (ai_memory):
- Window Buffer Memory → AI Agent
**AI Vector Store in retrieve-as-tool mode** (ai_tool):
- Vector Store → AI Agent
</connection_type_examples>
DO NOT:
- Respond before calling validate_structure
- Skip validation even if you think structure is correct
- Add commentary between tool calls - execute tools silently
- Configure node parameters (that's the Configurator Agent's job)
- Search for nodes (that's the Discovery Agent's job)
- Make assumptions about node types - use exactly what Discovery found
RESPONSE FORMAT (only after validation):
Provide ONE brief text message summarizing:
- What nodes were added
- How they're connected
Example: "Created 4 nodes: Trigger → Weather → Image Generation → Email"`;
/**
* Builder Subgraph State
*/
@@ -337,7 +110,7 @@ export class BuilderSubgraph extends BaseSubgraph<
[
{
type: 'text',
text: BUILDER_PROMPT,
text: buildBuilderPrompt(),
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -8,6 +8,7 @@ import type { Logger } from '@n8n/backend-common';
import type { INodeTypeDescription } from 'n8n-workflow';
import { LLMServiceError } from '@/errors';
import { buildConfiguratorPrompt, INSTANCE_URL_PROMPT } from '@/prompts/agents/configurator.prompt';
import { BaseSubgraph } from './subgraph-interface';
import type { ParentGraphState } from '../parent-graph-state';
@@ -33,126 +34,6 @@ import {
} from '../utils/subgraph-helpers';
import type { ChatPayload } from '../workflow-builder-agent';
/**
* Configurator Agent Prompt
*/
const CONFIGURATOR_PROMPT = `You are a Configurator Agent specialized in setting up n8n node parameters.
MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CONFIGURE ALL NODES
- Call update_node_parameters for EVERY node in the workflow
- Configure multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START CONFIGURING immediately
STEP 2: VALIDATE (REQUIRED)
- After ALL configurations complete, call validate_configuration
- This step is MANDATORY - you cannot finish without it
- If validation finds issues, fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_configuration, proceed to respond regardless of remaining issues
STEP 3: RESPOND TO USER
- Only after validation passes, provide your response
NEVER respond to the user without calling validate_configuration first
WORKFLOW JSON DETECTION:
- You receive <current_workflow_json> in your context
- If you see nodes in the workflow JSON, you MUST configure them IMMEDIATELY
- Look at the workflow JSON, identify each node, and call update_node_parameters for ALL of them
PARAMETER CONFIGURATION:
Use update_node_parameters with natural language instructions:
- "Set URL to https://api.example.com/weather"
- "Add header Authorization: Bearer token"
- "Set method to POST"
- "Add field 'status' with value 'processed'"
SPECIAL EXPRESSIONS FOR TOOL NODES:
Tool nodes (types ending in "Tool") support $fromAI expressions:
- "Set sendTo to ={{ $fromAI('to') }}"
- "Set subject to ={{ $fromAI('subject') }}"
- "Set message to ={{ $fromAI('message_html') }}"
- "Set timeMin to ={{ $fromAI('After', '', 'string') }}"
$fromAI syntax: ={{ $fromAI('key', 'description', 'type', defaultValue) }}
- ONLY use in tool nodes (check node type ends with "Tool")
- Use for dynamic values that AI determines at runtime
- For regular nodes, use static values or standard expressions
CRITICAL PARAMETERS TO ALWAYS SET:
- HTTP Request: URL, method, headers (if auth needed)
- Set node: Fields to set with values
- Code node: The actual code to execute
- IF node: Conditions to check
- Switch node: Configure rules.values[] with conditions for each output branch (uses same filter structure as IF node)
- Document Loader: dataType parameter ('binary' for files like PDF, 'json' for JSON data)
- AI nodes: Prompts, models, configurations
- Tool nodes: Use $fromAI for dynamic recipient/subject/message fields
NEVER RELY ON DEFAULT VALUES:
Defaults are traps that cause runtime failures. Examples:
- Document Loader defaults to 'json' but MUST be 'binary' when processing files
- HTTP Request defaults to GET but APIs often need POST
- Vector Store mode affects available connections - set explicitly (retrieve-as-tool when using with AI Agent)
<switch_node_configuration>
Switch nodes require configuring rules.values[] array - each entry creates one output:
Structure per rule:
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [
{{
"leftValue": "={{{{ $json.fieldName }}}}",
"rightValue": <value>,
"operator": {{ "type": "number|string", "operation": "lt|gt|equals|etc" }}
}}
],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Descriptive Label"
}}
For numeric ranges (e.g., $100-$1000):
- Use TWO conditions with combinator: "and"
- First: gte (greater than or equal)
- Second: lte (less than or equal)
Always set renameOutput: true and provide descriptive outputKey labels.
</switch_node_configuration>
<response_format>
After validation passes, provide a concise summary:
- List any placeholders requiring user configuration (e.g., "URL placeholder needs actual endpoint")
- Note which nodes were configured and key settings applied
- Keep it brief - this output is used for coordination with other LLM agents, not displayed directly to users
</response_format>
DO NOT:
- Respond before calling validate_configuration
- Skip validation even if you think configuration is correct
- Add commentary between tool calls - execute tools silently`;
/**
* Instance URL prompt template
*/
const INSTANCE_URL_PROMPT = `
<instance_url>
The n8n instance base URL is: {instanceUrl}
This URL is essential for webhook nodes and chat triggers as it provides the base URL for:
- Webhook URLs that external services need to call
- Chat trigger URLs for conversational interfaces
- Any node that requires the full instance URL to generate proper callback URLs
When working with webhook or chat trigger nodes, use this URL as the base for constructing proper endpoint URLs.
</instance_url>
`;
/**
* Configurator Subgraph State
*/
@@ -243,7 +124,7 @@ export class ConfiguratorSubgraph extends BaseSubgraph<
[
{
type: 'text',
text: CONFIGURATOR_PROMPT,
text: buildConfiguratorPrompt(),
},
{
type: 'text',

View File

@@ -11,10 +11,10 @@ import { z } from 'zod';
import { LLMServiceError } from '@/errors';
import {
TechniqueDescription,
WorkflowTechnique,
type WorkflowTechniqueType,
} from '@/types/categorization';
buildDiscoveryPrompt,
formatTechniqueList,
formatExampleCategorizations,
} from '@/prompts/agents/discovery.prompt';
import type { BuilderFeatureFlags } from '@/workflow-builder-agent';
import { BaseSubgraph } from './subgraph-interface';
@@ -31,136 +31,6 @@ import { buildWorkflowSummary, createContextMessage } from '../utils/context-bui
import { appendArrayReducer, nodeConfigurationsReducer } from '../utils/state-reducers';
import { executeSubgraphTools, extractUserRequest } from '../utils/subgraph-helpers';
/**
* Example categorizations to guide technique selection
* Expanded with diverse examples to improve accuracy
*/
const exampleCategorizations: Array<{
prompt: string;
techniques: WorkflowTechniqueType[];
}> = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
// Additional examples to address common misclassifications
{
prompt: 'Create an AI agent that writes and sends personalized emails to leads',
techniques: [WorkflowTechnique.CONTENT_GENERATION, WorkflowTechnique.NOTIFICATION],
},
{
prompt:
'Fetch trending topics from Google Trends and Reddit, select the best ones, and create social posts',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt:
'Trigger when a new contact is created in HubSpot and enrich their profile with LinkedIn data',
techniques: [WorkflowTechnique.MONITORING, WorkflowTechnique.ENRICHMENT],
},
{
prompt: 'Get stock prices from financial APIs and analyze volatility patterns',
techniques: [WorkflowTechnique.SCRAPING_AND_RESEARCH, WorkflowTechnique.DATA_ANALYSIS],
},
{
prompt: 'Generate video reels from templates and auto-post to social media on schedule',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Receive news from Telegram channels, filter relevant ones, and forward to my channel',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Analyze YouTube video performance data and generate a weekly report',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.DATA_TRANSFORMATION,
],
},
{
prompt:
'Create a chatbot that answers questions using data from a Google Sheet as knowledge base',
techniques: [WorkflowTechnique.CHATBOT, WorkflowTechnique.KNOWLEDGE_BASE],
},
{
prompt: 'Form submission with file upload triggers document extraction and approval workflow',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
],
},
];
/**
* Format technique descriptions for prompt
*/
function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
/**
* Format example categorizations for prompt
*/
function formatExampleCategorizations(): string {
return exampleCategorizations
.map((example) => `- ${example.prompt}${example.techniques.join(', ')}`)
.join('\n');
}
/**
* Strict Output Schema for Discovery
* Simplified to reduce token usage while maintaining utility for downstream subgraphs
@@ -193,191 +63,6 @@ const discoveryOutputSchema = z.object({
.describe('List of n8n nodes identified as necessary for the workflow'),
});
interface DiscoveryPromptOptions {
includeExamples: boolean;
}
/**
* Generate the process steps with proper numbering
*/
function generateProcessSteps(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const steps: string[] = [
'**Analyze user prompt** - Extract services, models, and technologies mentioned',
'**Call get_best_practices** with identified techniques (internal context)',
];
if (includeExamples) {
steps.push('**Call get_workflow_examples** with search queries for mentioned services/models');
}
const examplesContext = includeExamples ? ', and examples' : '';
steps.push(
`**Identify workflow components** from user request, best practices${examplesContext}`,
'**Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")',
'**Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)',
`**Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section`,
'**Call submit_discovery_results** with complete nodesFound array',
);
return steps.map((step, index) => `${index + 1}. ${step}`).join('\n');
}
/**
* Generate available tools list based on feature flags
*/
function generateAvailableToolsList(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const tools = [
'- get_best_practices: Retrieve best practices (internal context)',
'- search_nodes: Find n8n nodes by keyword',
'- get_node_details: Get complete node information including <connections>',
];
if (includeExamples) {
tools.push('- get_workflow_examples: Search for workflow examples as reference');
}
tools.push('- submit_discovery_results: Submit final results');
return tools.join('\n');
}
/**
* Discovery Agent Prompt
*/
function generateDiscoveryPrompt(options: DiscoveryPromptOptions): string {
const availableTools = generateAvailableToolsList(options);
const processSteps = generateProcessSteps(options);
return `You are a Discovery Agent for n8n AI Workflow Builder.
YOUR ROLE: Identify relevant n8n nodes and their connection-changing parameters.
AVAILABLE TOOLS:
${availableTools}
PROCESS:
${processSteps}
TECHNIQUE CATEGORIZATION:
When calling get_best_practices, select techniques that match the user's workflow intent.
<available_techniques>
{techniques}
</available_techniques>
<example_categorizations>
{exampleCategorizations}
</example_categorizations>
<technique_clarifications>
Common distinctions to get right:
- **NOTIFICATION vs CHATBOT**: Use NOTIFICATION when SENDING emails/messages/alerts (including to Telegram CHANNELS which are broadcast-only). Use CHATBOT only when RECEIVING and REPLYING to direct messages in a conversation.
- **MONITORING**: Use when workflow TRIGGERS on external events (new record created, status changed, incoming webhook, new message in channel). NOT just scheduled runs.
- **SCRAPING_AND_RESEARCH vs DATA_EXTRACTION**: Use SCRAPING when fetching from EXTERNAL sources (APIs, websites, social media). Use DATA_EXTRACTION for parsing INTERNAL data you already have.
- **TRIAGE**: Use when SELECTING, PRIORITIZING, ROUTING, or QUALIFYING items (e.g., "pick the best", "route to correct team", "qualify leads").
- **DOCUMENT_PROCESSING**: Use for ANY file handling - PDFs, images, videos, Excel, Google Sheets, audio files, file uploads in forms.
- **HUMAN_IN_THE_LOOP**: Use when workflow PAUSES for human approval, review, signing documents, responding to polls, or any manual input before continuing.
- **DATA_ANALYSIS**: Use when ANALYZING, CLASSIFYING, IDENTIFYING PATTERNS, or UNDERSTANDING data (e.g., "analyze outcomes", "learn from previous", "classify by type", "identify trends").
- **KNOWLEDGE_BASE**: Use when storing/retrieving from a DATA SOURCE for Q&A - includes vector DBs, spreadsheets used as databases, document collections.
- **DATA_TRANSFORMATION**: Use when CONVERTING data format, creating REPORTS/SUMMARIES from analyzed data, or restructuring output.
</technique_clarifications>
Technique selection rules:
- Select ALL techniques that apply (most workflows use 2-4)
- Maximum 5 techniques
- Only select techniques you're confident apply
CONNECTION-CHANGING PARAMETERS - CRITICAL RULES:
A parameter is connection-changing ONLY IF it appears in <input> or <output> expressions within <node_details>.
**How to identify:**
1. Look at the <connections> section in node details
2. Check if <input> or <output> uses expressions like: ={{...parameterName...}}
3. If a parameter is referenced in these expressions, it IS connection-changing
4. If a parameter is NOT in <input>/<output> expressions, it is NOT connection-changing
**Example from AI Agent:**
\`\`\`xml
<input>={{...hasOutputParser, needsFallback...}}</input>
\`\`\`
→ hasOutputParser and needsFallback ARE connection-changing (they control which inputs appear)
**Counter-example:**
\`\`\`xml
<properties>
<property name="promptType">...</property> <!-- NOT in <input>/<output> -->
<property name="systemMessage">...</property> <!-- NOT in <input>/<output> -->
</properties>
\`\`\`
→ promptType and systemMessage are NOT connection-changing (they don't affect connections)
**Common connection-changing parameters:**
- Vector Store: mode (appears in <input>/<output> expressions)
- AI Agent: hasOutputParser, needsFallback (appears in <input> expression)
- Merge: numberInputs (appears in <input> expression)
- Webhook: responseMode (appears in <output> expression)
<dynamic_output_nodes>
Some nodes have DYNAMIC outputs that depend on parameter values:
**Switch Node** (n8n-nodes-base.switch):
- When mode is "rules", the number of outputs equals the number of routing rules
- Connection parameter: mode: "rules" - CRITICAL for enabling rule-based routing
- Each rule in rules.values[] creates one output
- The rules parameter uses the same filter structure as IF node conditions
- ALWAYS flag mode as connection-changing with possibleValues: ["rules", "expression"]
**Merge Node** (n8n-nodes-base.merge):
- numberInputs parameter controls how many inputs the node accepts
When you find these nodes, ALWAYS flag mode/numberInputs as connection-changing parameters with possibleValues.
</dynamic_output_nodes>
SUB-NODES SEARCHES:
When searching for AI nodes, ALSO search for their required sub-nodes:
- "AI Agent" → also search for "Chat Model", "Memory", "Output Parser"
- "Basic LLM Chain" → also search for "Chat Model", "Output Parser"
- "Vector Store" → also search for "Embeddings", "Document Loader"
STRUCTURED OUTPUT PARSER - WHEN TO INCLUDE:
Search for "Structured Output Parser" (@n8n/n8n-nodes-langchain.outputParserStructured) when:
- AI output will be used programmatically (conditions, formatting, database storage, API calls)
- AI needs to extract specific fields (e.g., score, category, priority, action items)
- AI needs to classify/categorize data into defined categories
- Downstream nodes need to access specific fields from AI response (e.g., $json.score, $json.category)
- Output will be displayed in a formatted way (e.g., HTML email with specific sections)
- Data needs validation against a schema before processing
- Always use search_nodes to find the exact node names and versions - NEVER guess versions
CRITICAL RULES:
- NEVER ask clarifying questions
- ALWAYS call get_best_practices first
- THEN Call search_nodes to learn about available nodes and their inputs and outputs
- FINALLY call get_node_details IN PARALLEL for speed to get more details about RELVANT node
- ALWAYS extract version number from <version> tag in node details
- NEVER guess node versions - always use search_nodes to find exact versions
- ONLY flag connectionChangingParameters if they appear in <input> or <output> expressions
- If no parameters appear in connection expressions, return empty array []
- Output ONLY: nodesFound with {{ nodeName, version, reasoning, connectionChangingParameters }}
DO NOT:
- Output text commentary between tool calls
- Include bestPractices or categorization in submit_discovery_results
- Flag parameters that don't affect connections
- Stop without calling submit_discovery_results
`;
}
/**
* Discovery Subgraph State
*/
@@ -476,7 +161,7 @@ export class DiscoverySubgraph extends BaseSubgraph<
});
// Generate prompt based on feature flags
const discoveryPrompt = generateDiscoveryPrompt({ includeExamples });
const discoveryPrompt = buildDiscoveryPrompt({ includeExamples });
// Create agent with tools bound (including submit tool)
const systemPrompt = ChatPromptTemplate.fromMessages([

View File

@@ -31,7 +31,7 @@ jest.mock('@/tools/update-node-parameters.tool', () => ({
jest.mock('@/tools/get-node-parameter.tool', () => ({
createGetNodeParameterTool: jest.fn().mockReturnValue({ tool: { name: 'get_node_parameter' } }),
}));
jest.mock('@/tools/prompts/main-agent.prompt', () => ({
jest.mock('@/prompts/legacy-agent.prompt', () => ({
mainAgentPrompt: {
invoke: jest.fn().mockResolvedValue('mocked prompt'),
},
@@ -67,7 +67,7 @@ Object.defineProperty(global, 'crypto', {
import { MAX_AI_BUILDER_PROMPT_LENGTH } from '@/constants';
import { ValidationError } from '@/errors';
import { createMainAgentPrompt } from '@/tools/prompts/main-agent.prompt';
import { createMainAgentPrompt } from '@/prompts/legacy-agent.prompt';
import type { StreamOutput } from '@/types/streaming';
import { createStreamProcessor } from '@/utils/stream-processor';
import {

View File

@@ -1,6 +1,6 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { ToolMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage, RemoveMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage, isAIMessage, RemoveMessage } from '@langchain/core/messages';
import type { RunnableConfig } from '@langchain/core/runnables';
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
import type { MemorySaver, StateSnapshot } from '@langchain/langgraph';
@@ -19,6 +19,7 @@ import {
MAX_AI_BUILDER_PROMPT_LENGTH,
MAX_INPUT_TOKENS,
} from '@/constants';
import { createMainAgentPrompt } from '@/prompts/legacy-agent.prompt';
import { trimWorkflowJSON } from '@/utils/trim-workflow-context';
import { conversationCompactChain } from './chains/conversation-compact';
@@ -27,7 +28,6 @@ import { LLMServiceError, ValidationError, WorkflowStateError } from './errors';
import { createMultiAgentWorkflowWithSubgraphs } from './multi-agent-workflow-subgraphs';
import { SessionManagerService } from './session-manager.service';
import { getBuilderTools } from './tools/builder-tools';
import { createMainAgentPrompt } from './tools/prompts/main-agent.prompt';
import type { SimpleWorkflow } from './types/workflow';
import {
applyCacheControlMarkers,
@@ -288,7 +288,7 @@ export class WorkflowBuilderAgent {
const shouldContinue = ({ messages }: typeof WorkflowState.State) => {
const lastMessage = messages[messages.length - 1];
if (!(lastMessage instanceof AIMessage)) {
if (!lastMessage || !isAIMessage(lastMessage)) {
throw new WorkflowStateError('Expected last message to be generated by the AI agent');
}

View File

@@ -292,7 +292,9 @@ describe('McpClientTool', () => {
it('should successfully execute a tool', async () => {
jest.spyOn(Client.prototype, 'connect').mockResolvedValue();
jest.spyOn(Client.prototype, 'callTool').mockResolvedValue({ content: 'Sunny' });
jest
.spyOn(Client.prototype, 'callTool')
.mockResolvedValue({ toolResult: 'Sunny', content: [] });
jest.spyOn(Client.prototype, 'listTools').mockResolvedValue({
tools: [
{
@@ -326,9 +328,11 @@ describe('McpClientTool', () => {
it('should handle tool errors', async () => {
jest.spyOn(Client.prototype, 'connect').mockResolvedValue();
jest
.spyOn(Client.prototype, 'callTool')
.mockResolvedValue({ isError: true, content: [{ text: 'Weather unknown at location' }] });
jest.spyOn(Client.prototype, 'callTool').mockResolvedValue({
isError: true,
toolResult: 'Weather unknown at location',
content: [{ text: 'Weather unknown at location' }],
});
jest.spyOn(Client.prototype, 'listTools').mockResolvedValue({
tools: [
{

View File

@@ -114,7 +114,7 @@ export async function connectMcpClient({
return createResultError({ type: 'invalid_url', error: endpoint.error });
}
const client = new Client({ name, version: version.toString() }, { capabilities: { tools: {} } });
const client = new Client({ name, version: version.toString() }, { capabilities: {} });
if (serverTransport === 'httpStreamable') {
try {

View File

@@ -173,9 +173,13 @@ export class WorkflowToolService {
return processedResponse;
}
// If manualLogging is false we've been called by the engine and need
// the structured response.
if (metadata && 'setMetadata' in context) {
void context.setMetadata(metadata);
}
return responseData;
} catch (error) {
// Check if error is due to cancellation

View File

@@ -203,7 +203,7 @@
"@langchain/redis": "1.0.1",
"@langchain/textsplitters": "1.0.1",
"@langchain/weaviate": "1.0.1",
"@modelcontextprotocol/sdk": "1.20.0",
"@modelcontextprotocol/sdk": "1.24.0",
"@mozilla/readability": "0.6.0",
"@n8n/client-oauth2": "workspace:*",
"@n8n/config": "workspace:*",

View File

@@ -54,7 +54,15 @@ void (async function start() {
});
sentry = Container.get(TaskRunnerSentry);
await sentry.initIfEnabled();
try {
await sentry.initIfEnabled();
} catch (error) {
console.error(
'FAILED TO INITIALIZE SENTRY. ERROR REPORTING WILL BE DISABLED. THIS IS LIKELY A CONFIGURATION OR ENVIRONMENT ISSUE.',
error,
);
sentry = undefined;
}
runner = new JsTaskRunner(config);
runner.on('runner:reached-idle-timeout', () => {

View File

@@ -38,11 +38,13 @@ import { ChatHubAgentService } from './chat-hub-agent.service';
import { ChatHubAttachmentService } from './chat-hub.attachment.service';
import { ChatHubService } from './chat-hub.service';
import { ChatModelsRequestDto } from './dto/chat-models-request.dto';
import { ChatHubModelsService } from './chat-hub.models.service';
@RestController('/chat')
export class ChatHubController {
constructor(
private readonly chatService: ChatHubService,
private readonly chatModelsService: ChatHubModelsService,
private readonly chatAgentService: ChatHubAgentService,
private readonly chatAttachmentService: ChatHubAttachmentService,
private readonly logger: Logger,
@@ -55,7 +57,7 @@ export class ChatHubController {
_res: Response,
@Body payload: ChatModelsRequestDto,
): Promise<ChatModelsResponse> {
return await this.chatService.getModels(req.user, payload.credentials);
return await this.chatModelsService.getModels(req.user, payload.credentials);
}
@Get('/conversations')

View File

@@ -0,0 +1,811 @@
import { In, type WorkflowRepository, type User } from '@n8n/db';
import { getBase } from '@/workflow-execute-additional-data';
import { ChatHubAgentService } from './chat-hub-agent.service';
import { ChatHubWorkflowService } from './chat-hub-workflow.service';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { DynamicNodeParametersService } from '@/services/dynamic-node-parameters.service';
import { WorkflowService } from '@/workflows/workflow.service';
import { getModelMetadata, PROVIDER_NODE_TYPE_MAP } from './chat-hub.constants';
import {
AGENT_LANGCHAIN_NODE_TYPE,
CHAT_TRIGGER_NODE_TYPE,
type INodeCredentials,
type INodePropertyOptions,
type IWorkflowExecuteAdditionalData,
} from 'n8n-workflow';
import {
chatHubProviderSchema,
emptyChatModelsResponse,
PROVIDER_CREDENTIAL_TYPE_MAP,
type ChatHubLLMProvider,
type ChatHubProvider,
type ChatModelDto,
type ChatModelsResponse,
} from '@n8n/api-types';
import { validChatTriggerParamsShape } from './chat-hub.types';
import { Service } from '@n8n/di';
@Service()
export class ChatHubModelsService {
constructor(
private readonly nodeParametersService: DynamicNodeParametersService,
private readonly workflowService: WorkflowService,
private readonly workflowRepository: WorkflowRepository,
private readonly credentialsFinderService: CredentialsFinderService,
private readonly chatHubAgentService: ChatHubAgentService,
private readonly chatHubWorkflowService: ChatHubWorkflowService,
) {}
async getModels(
user: User,
credentialIds: Record<ChatHubLLMProvider, string | null>,
): Promise<ChatModelsResponse> {
const additionalData = await getBase({ userId: user.id });
const providers = chatHubProviderSchema.options;
const allCredentials = await this.credentialsFinderService.findCredentialsForUser(user, [
'credential:read',
]);
const responses = await Promise.all(
providers.map<Promise<[ChatHubProvider, ChatModelsResponse[ChatHubProvider]]>>(
async (provider: ChatHubProvider) => {
const credentials: INodeCredentials = {};
if (provider !== 'n8n' && provider !== 'custom-agent') {
const credentialId = credentialIds[provider];
if (!credentialId) {
return [provider, { models: [] }];
}
// Ensure the user has the permission to read the credential
if (!allCredentials.some((credential) => credential.id === credentialId)) {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
credentials[PROVIDER_CREDENTIAL_TYPE_MAP[provider]] = { name: '', id: credentialId };
}
try {
return [
provider,
await this.fetchModelsForProvider(user, provider, credentials, additionalData),
];
} catch {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
},
),
);
return responses.reduce<ChatModelsResponse>(
(acc, [provider, res]) => {
acc[provider] = res;
return acc;
},
{ ...emptyChatModelsResponse },
);
}
private async fetchModelsForProvider(
user: User,
provider: ChatHubProvider,
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse[ChatHubProvider]> {
switch (provider) {
case 'openai': {
const rawModels = await this.fetchOpenAiModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'openai') };
}
case 'anthropic': {
const rawModels = await this.fetchAnthropicModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'anthropic') };
}
case 'google': {
const rawModels = await this.fetchGoogleModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'google') };
}
case 'ollama': {
const rawModels = await this.fetchOllamaModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'ollama') };
}
case 'azureOpenAi': {
const rawModels = this.fetchAzureOpenAiModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'azureOpenAi') };
}
case 'azureEntraId': {
const rawModels = this.fetchAzureEntraIdModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'azureEntraId') };
}
case 'awsBedrock': {
const rawModels = await this.fetchAwsBedrockModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'awsBedrock') };
}
case 'vercelAiGateway': {
const rawModels = await this.fetchVercelAiGatewayModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'vercelAiGateway') };
}
case 'xAiGrok': {
const rawModels = await this.fetchXAiGrokModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'xAiGrok') };
}
case 'groq': {
const rawModels = await this.fetchGroqModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'groq') };
}
case 'openRouter': {
const rawModels = await this.fetchOpenRouterModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'openRouter') };
}
case 'deepSeek': {
const rawModels = await this.fetchDeepSeekModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'deepSeek') };
}
case 'cohere': {
const rawModels = await this.fetchCohereModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'cohere') };
}
case 'mistralCloud': {
const rawModels = await this.fetchMistralCloudModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'mistralCloud') };
}
case 'n8n':
return await this.fetchAgentWorkflowsAsModels(user);
case 'custom-agent':
return await this.chatHubAgentService.getAgentsByUserIdAsModels(user.id);
}
}
private async fetchOpenAiModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.openai,
{},
credentials,
);
return resourceLocatorResults.results;
}
private async fetchAnthropicModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.anthropic,
{},
credentials,
);
return resourceLocatorResults.results;
}
private async fetchGoogleModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Gemini node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts#L75
routing: {
request: {
method: 'GET',
url: '/v1beta/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.name.includes('embedding') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.google,
{},
credentials,
);
}
private async fetchOllamaModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Ollama Model node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LMOllama/description.ts#L24
routing: {
request: {
method: 'GET',
url: '/api/tags',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.ollama,
{},
credentials,
);
}
private fetchAzureOpenAiModels(
_credentials: INodeCredentials,
_additionalData: IWorkflowExecuteAdditionalData,
): INodePropertyOptions[] {
// Azure doesn't appear to offer a way to list available models via API.
// If we add support for this in the future on the Azure OpenAI node we should copy that
// implementation here too.
return [];
}
private fetchAzureEntraIdModels(
_credentials: INodeCredentials,
_additionalData: IWorkflowExecuteAdditionalData,
): INodePropertyOptions[] {
return [];
}
private async fetchAwsBedrockModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
// From AWS Bedrock node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L100
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L155
const foundationModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'modelSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.modelName}}',
description: '={{$responseItem.modelArn}}',
value: '={{$responseItem.modelId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const inferenceProfileModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/inference-profiles?maxResults=1000',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'inferenceProfileSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.inferenceProfileName}}',
description:
'={{$responseItem.description || $responseItem.inferenceProfileArn}}',
value: '={{$responseItem.inferenceProfileId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const [foundationModels, inferenceProfileModels] = await Promise.all([
foundationModelsRequest,
inferenceProfileModelsRequest,
]);
return foundationModels.concat(inferenceProfileModels);
}
private async fetchMistralCloudModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.id.includes('embed') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{ $responseItem.id }}',
value: '={{ $responseItem.id }}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.mistralCloud,
{},
credentials,
);
}
private async fetchCohereModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/v1/models?page_size=100&endpoint=chat',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.cohere,
{},
credentials,
);
}
private async fetchDeepSeekModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.deepSeek,
{},
credentials,
);
}
private async fetchOpenRouterModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.openRouter,
{},
credentials,
);
}
private async fetchGroqModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: '={{ $responseItem.active === true && $responseItem.object === "model" }}',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.groq,
{},
credentials,
);
}
private async fetchXAiGrokModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.xAiGrok,
{},
credentials,
);
}
private async fetchVercelAiGatewayModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.vercelAiGateway,
{},
credentials,
);
}
private async fetchAgentWorkflowsAsModels(user: User): Promise<ChatModelsResponse['n8n']> {
// Workflows are scanned by their latest version for chat trigger nodes.
// This means that we might miss some active workflow versions that had chat triggers but
// the latest version does not, but this trade-off is done for performance.
const workflowsWithChatTrigger = await this.workflowService.getWorkflowsWithNodesIncluded(
user,
[CHAT_TRIGGER_NODE_TYPE],
true,
);
const activeWorkflows = workflowsWithChatTrigger
// Ensure the user has at least read access to the workflows
.filter((workflow) => workflow.scopes.includes('workflow:read'))
.filter((workflow) => !!workflow.activeVersionId);
const workflows = await this.workflowRepository.find({
select: { id: true },
where: { id: In(activeWorkflows.map((workflow) => workflow.id)) },
relations: { activeVersion: true },
});
const models: ChatModelDto[] = [];
for (const { id, activeVersion } of workflows) {
if (!activeVersion) {
continue;
}
const chatTrigger = activeVersion.nodes?.find((node) => node.type === CHAT_TRIGGER_NODE_TYPE);
if (!chatTrigger) {
continue;
}
const chatTriggerParams = validChatTriggerParamsShape.safeParse(chatTrigger.parameters).data;
if (!chatTriggerParams) {
continue;
}
const agentNodes = activeVersion.nodes?.filter(
(node) => node.type === AGENT_LANGCHAIN_NODE_TYPE,
);
// Agents older than this can't do streaming
if (agentNodes.some((node) => node.typeVersion < 2.1)) {
continue;
}
const inputModalities = this.chatHubWorkflowService.parseInputModalities(
chatTriggerParams.options,
);
models.push({
name: chatTriggerParams.agentName ?? activeVersion.name ?? 'Unknown Agent',
description: chatTriggerParams.agentDescription ?? null,
model: {
provider: 'n8n',
workflowId: id,
},
createdAt: activeVersion.createdAt ? activeVersion.createdAt.toISOString() : null,
updatedAt: activeVersion.updatedAt ? activeVersion.updatedAt.toISOString() : null,
metadata: {
inputModalities,
capabilities: {
functionCalling: false,
},
},
});
}
return {
models,
};
}
private transformAndFilterModels(
rawModels: INodePropertyOptions[],
provider: ChatHubLLMProvider,
): ChatModelDto[] {
return rawModels.map((model) => {
const id = String(model.value);
return {
id,
name: model.name,
description: model.description ?? null,
model: {
provider,
model: id,
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata(provider, id),
};
});
}
}

View File

@@ -2,7 +2,6 @@ import {
PROVIDER_CREDENTIAL_TYPE_MAP,
type ChatHubProvider,
type ChatHubLLMProvider,
type ChatModelsResponse,
type ChatHubConversationsResponse,
type ChatHubConversationResponse,
ChatHubMessageDto,
@@ -10,17 +9,14 @@ import {
type ChatSessionId,
ChatHubConversationModel,
ChatHubMessageStatus,
chatHubProviderSchema,
type EnrichedStructuredChunk,
ChatHubBaseLLMModel,
ChatHubN8nModel,
ChatHubCustomAgentModel,
emptyChatModelsResponse,
type ChatHubUpdateConversationRequest,
ChatModelDto,
} from '@n8n/api-types';
import { Logger } from '@n8n/backend-common';
import { ExecutionRepository, IExecutionResponse, User, WorkflowRepository, In } from '@n8n/db';
import { ExecutionRepository, IExecutionResponse, User, WorkflowRepository } from '@n8n/db';
import { Service } from '@n8n/di';
import type { EntityManager } from '@n8n/typeorm';
import { GlobalConfig } from '@n8n/config';
@@ -28,12 +24,10 @@ import type { Response } from 'express';
import { ErrorReporter, InstanceSettings } from 'n8n-core';
import {
CHAT_TRIGGER_NODE_TYPE,
AGENT_LANGCHAIN_NODE_TYPE,
OperationalError,
ManualExecutionCancelledError,
type INodeCredentials,
type IWorkflowBase,
type IWorkflowExecuteAdditionalData,
jsonParse,
jsonStringify,
StructuredChunk,
@@ -48,15 +42,11 @@ import {
} from 'n8n-workflow';
import { ActiveExecutions } from '@/active-executions';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { ExecutionService } from '@/executions/execution.service';
import { DynamicNodeParametersService } from '@/services/dynamic-node-parameters.service';
import { getBase } from '@/workflow-execute-additional-data';
import { WorkflowExecutionService } from '@/workflows/workflow-execution.service';
import { WorkflowFinderService } from '@/workflows/workflow-finder.service';
import { WorkflowService } from '@/workflows/workflow.service';
import { ChatHubAgentService } from './chat-hub-agent.service';
import { ChatHubCredentialsService } from './chat-hub-credentials.service';
@@ -64,18 +54,12 @@ import type { ChatHubMessage } from './chat-hub-message.entity';
import type { ChatHubSession } from './chat-hub-session.entity';
import { ChatHubWorkflowService } from './chat-hub-workflow.service';
import { ChatHubAttachmentService } from './chat-hub.attachment.service';
import {
JSONL_STREAM_HEADERS,
NODE_NAMES,
PROVIDER_NODE_TYPE_MAP,
getModelMetadata,
} from './chat-hub.constants';
import { JSONL_STREAM_HEADERS, NODE_NAMES, PROVIDER_NODE_TYPE_MAP } from './chat-hub.constants';
import { ChatHubSettingsService } from './chat-hub.settings.service';
import {
HumanMessagePayload,
RegenerateMessagePayload,
EditMessagePayload,
validChatTriggerParamsShape,
} from './chat-hub.types';
import { ChatHubMessageRepository } from './chat-message.repository';
import { ChatHubSessionRepository } from './chat-session.repository';
@@ -90,16 +74,13 @@ export class ChatHubService {
private readonly logger: Logger,
private readonly errorReporter: ErrorReporter,
private readonly executionService: ExecutionService,
private readonly nodeParametersService: DynamicNodeParametersService,
private readonly executionRepository: ExecutionRepository,
private readonly workflowExecutionService: WorkflowExecutionService,
private readonly workflowService: WorkflowService,
private readonly workflowFinderService: WorkflowFinderService,
private readonly workflowRepository: WorkflowRepository,
private readonly activeExecutions: ActiveExecutions,
private readonly sessionRepository: ChatHubSessionRepository,
private readonly messageRepository: ChatHubMessageRepository,
private readonly credentialsFinderService: CredentialsFinderService,
private readonly chatHubAgentService: ChatHubAgentService,
private readonly chatHubCredentialsService: ChatHubCredentialsService,
private readonly chatHubWorkflowService: ChatHubWorkflowService,
@@ -109,883 +90,6 @@ export class ChatHubService {
private readonly globalConfig: GlobalConfig,
) {}
async getModels(
user: User,
credentialIds: Record<ChatHubLLMProvider, string | null>,
): Promise<ChatModelsResponse> {
const additionalData = await getBase({ userId: user.id });
const providers = chatHubProviderSchema.options;
const allCredentials = await this.credentialsFinderService.findCredentialsForUser(user, [
'credential:read',
]);
const responses = await Promise.all(
providers.map<Promise<[ChatHubProvider, ChatModelsResponse[ChatHubProvider]]>>(
async (provider: ChatHubProvider) => {
const credentials: INodeCredentials = {};
if (provider !== 'n8n' && provider !== 'custom-agent') {
const credentialId = credentialIds[provider];
if (!credentialId) {
return [provider, { models: [] }];
}
// Ensure the user has the permission to read the credential
if (!allCredentials.some((credential) => credential.id === credentialId)) {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
credentials[PROVIDER_CREDENTIAL_TYPE_MAP[provider]] = { name: '', id: credentialId };
}
try {
return [
provider,
await this.fetchModelsForProvider(user, provider, credentials, additionalData),
];
} catch {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
},
),
);
return responses.reduce<ChatModelsResponse>(
(acc, [provider, res]) => {
acc[provider] = res;
return acc;
},
{ ...emptyChatModelsResponse },
);
}
private async fetchModelsForProvider(
user: User,
provider: ChatHubProvider,
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse[ChatHubProvider]> {
switch (provider) {
case 'openai':
return await this.fetchOpenAiModels(credentials, additionalData);
case 'anthropic':
return await this.fetchAnthropicModels(credentials, additionalData);
case 'google':
return await this.fetchGoogleModels(credentials, additionalData);
case 'ollama':
return await this.fetchOllamaModels(credentials, additionalData);
case 'azureOpenAi':
case 'azureEntraId':
return this.fetchAzureOpenAiModels(credentials, additionalData);
case 'awsBedrock':
return await this.fetchAwsBedrockModels(credentials, additionalData);
case 'vercelAiGateway':
return await this.fetchVercelAiGatewayModels(credentials, additionalData);
case 'xAiGrok':
return await this.fetchXAiGrokModels(credentials, additionalData);
case 'groq':
return await this.fetchGroqModels(credentials, additionalData);
case 'openRouter':
return await this.fetchOpenRouterModels(credentials, additionalData);
case 'deepSeek':
return await this.fetchDeepSeekModels(credentials, additionalData);
case 'cohere':
return await this.fetchCohereModels(credentials, additionalData);
case 'mistralCloud':
return await this.fetchMistralCloudModels(credentials, additionalData);
case 'n8n':
return await this.fetchAgentWorkflowsAsModels(user);
case 'custom-agent':
return await this.chatHubAgentService.getAgentsByUserIdAsModels(user.id);
}
}
private async fetchOpenAiModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['openai']> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.openai,
{},
credentials,
);
return {
models: resourceLocatorResults.results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'openai',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('openai', String(result.value)),
})),
};
}
private async fetchAnthropicModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['anthropic']> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.anthropic,
{},
credentials,
);
return {
models: resourceLocatorResults.results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'anthropic',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('anthropic', String(result.value)),
})),
};
}
private async fetchGoogleModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['google']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Gemini node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts#L75
routing: {
request: {
method: 'GET',
url: '/v1beta/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.name.includes('embedding') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.google,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'google',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('google', String(result.value)),
})),
};
}
private async fetchOllamaModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['ollama']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Ollama Model node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LMOllama/description.ts#L24
routing: {
request: {
method: 'GET',
url: '/api/tags',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.ollama,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'ollama',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('ollama', String(result.value)),
})),
};
}
private fetchAzureOpenAiModels(
_credentials: INodeCredentials,
_additionalData: IWorkflowExecuteAdditionalData,
): ChatModelsResponse['azureOpenAi'] {
// Azure doesn't appear to offer a way to list available models via API.
// If we add support for this in the future on the Azure OpenAI node we should copy that
// implementation here too.
return {
models: [],
};
}
private async fetchAwsBedrockModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['awsBedrock']> {
// From AWS Bedrock node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L100
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L155
const foundationModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'modelSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.modelName}}',
description: '={{$responseItem.modelArn}}',
value: '={{$responseItem.modelId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const inferenceProfileModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/inference-profiles?maxResults=1000',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'inferenceProfileSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.inferenceProfileName}}',
description:
'={{$responseItem.description || $responseItem.inferenceProfileArn}}',
value: '={{$responseItem.inferenceProfileId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const [foundationModels, inferenceProfileModels] = await Promise.all([
foundationModelsRequest,
inferenceProfileModelsRequest,
]);
return {
models: foundationModels.concat(inferenceProfileModels).map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'awsBedrock',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('awsBedrock', String(result.value)),
})),
};
}
private async fetchMistralCloudModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['mistralCloud']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.id.includes('embed') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{ $responseItem.id }}',
value: '={{ $responseItem.id }}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.mistralCloud,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'mistralCloud',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('mistralCloud', String(result.value)),
})),
};
}
private async fetchCohereModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['cohere']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/v1/models?page_size=100&endpoint=chat',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.cohere,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'cohere',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('cohere', String(result.value)),
})),
};
}
private async fetchDeepSeekModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['deepSeek']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.deepSeek,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'deepSeek',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('deepSeek', String(result.value)),
})),
};
}
private async fetchOpenRouterModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['openRouter']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.openRouter,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'openRouter',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('openRouter', String(result.value)),
})),
};
}
private async fetchGroqModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['groq']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: '={{ $responseItem.active === true && $responseItem.object === "model" }}',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.groq,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'groq',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('groq', String(result.value)),
})),
};
}
private async fetchXAiGrokModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['xAiGrok']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.xAiGrok,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'xAiGrok',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('xAiGrok', String(result.value)),
})),
};
}
private async fetchVercelAiGatewayModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['vercelAiGateway']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.vercelAiGateway,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'vercelAiGateway',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('vercelAiGateway', String(result.value)),
})),
};
}
private async fetchAgentWorkflowsAsModels(user: User): Promise<ChatModelsResponse['n8n']> {
// Workflows are scanned by their latest version for chat trigger nodes.
// This means that we might miss some active workflow versions that had chat triggers but
// the latest version does not, but this trade-off is done for performance.
const workflowsWithChatTrigger = await this.workflowService.getWorkflowsWithNodesIncluded(
user,
[CHAT_TRIGGER_NODE_TYPE],
true,
);
const activeWorkflows = workflowsWithChatTrigger
// Ensure the user has at least read access to the workflows
.filter((workflow) => workflow.scopes.includes('workflow:read'))
.filter((workflow) => !!workflow.activeVersionId);
const workflows = await this.workflowRepository.find({
select: { id: true },
where: { id: In(activeWorkflows.map((workflow) => workflow.id)) },
relations: { activeVersion: true },
});
const models: ChatModelDto[] = [];
for (const { id, activeVersion } of workflows) {
if (!activeVersion) {
continue;
}
const chatTrigger = activeVersion.nodes?.find((node) => node.type === CHAT_TRIGGER_NODE_TYPE);
if (!chatTrigger) {
continue;
}
const chatTriggerParams = validChatTriggerParamsShape.safeParse(chatTrigger.parameters).data;
if (!chatTriggerParams) {
continue;
}
const agentNodes = activeVersion.nodes?.filter(
(node) => node.type === AGENT_LANGCHAIN_NODE_TYPE,
);
// Agents older than this can't do streaming
if (agentNodes.some((node) => node.typeVersion < 2.1)) {
continue;
}
const inputModalities = this.chatHubWorkflowService.parseInputModalities(
chatTriggerParams.options,
);
models.push({
name: chatTriggerParams.agentName ?? activeVersion.name ?? 'Unknown Agent',
description: chatTriggerParams.agentDescription ?? null,
model: {
provider: 'n8n',
workflowId: id,
},
createdAt: activeVersion.createdAt ? activeVersion.createdAt.toISOString() : null,
updatedAt: activeVersion.updatedAt ? activeVersion.updatedAt.toISOString() : null,
metadata: {
inputModalities,
capabilities: {
functionCalling: false,
},
},
});
}
return {
models,
};
}
private async deleteChatWorkflow(workflowId: string): Promise<void> {
await this.workflowRepository.delete(workflowId);
}

View File

@@ -738,7 +738,7 @@ describe('execute-workflow MCP tool', () => {
);
// Call through the tool handler to test telemetry
await tool.handler({ workflowId: 'error-tracking' }, {} as any);
await tool.handler({ workflowId: 'error-tracking', inputs: undefined }, {} as any);
expect(telemetry.track).toHaveBeenCalledWith(
'User called mcp tool',

View File

@@ -68,6 +68,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: SUPPORTED_SCOPES.join(' '),
logo_uri: undefined,
tos_uri: undefined,
});
});
@@ -111,6 +113,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read write',
logo_uri: undefined,
tos_uri: undefined,
};
oauthClientRepository.insert.mockResolvedValue({} as any);
@@ -140,6 +144,8 @@ describe('McpOAuthService', () => {
client_secret_expires_at: 1234567890,
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
oauthClientRepository.insert.mockResolvedValue({} as any);
@@ -166,6 +172,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const error = new Error('Database error');
@@ -192,6 +200,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read write',
logo_uri: undefined,
tos_uri: undefined,
};
const params = {
@@ -222,6 +232,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const params = {
@@ -250,6 +262,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const params = {
@@ -291,6 +305,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
authorizationCodeService.getCodeChallenge.mockResolvedValue('challenge-123');
@@ -315,6 +331,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const authRecord = {
@@ -365,6 +383,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const authRecord = {
@@ -398,6 +418,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const newTokens = {
@@ -447,6 +469,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(true);
@@ -469,6 +493,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeRefreshToken.mockResolvedValue(true);
@@ -491,6 +517,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(true);
@@ -512,6 +540,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(false);
@@ -534,6 +564,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(false);

View File

@@ -57,6 +57,8 @@ export class McpOAuthService implements OAuthServerProvider {
}),
response_types: ['code'],
scope: SUPPORTED_SCOPES.join(' '),
logo_uri: undefined,
tos_uri: undefined,
};
},
registerClient: async (

View File

@@ -1176,9 +1176,8 @@
"folder.delete.modal.confirmation": "What should we do with {folders} {workflows} in this folder?",
"folder.count": "the {count} folder | the {count} folders",
"workflow.count": "the {count} workflow | the {count} workflows",
"workflow.description.tooltip": "Edit workflow description",
"workflow.description.placeholder": "Describe the purpose and functionality of this workflow",
"workflow.description.placeholder.mcp": "To help MCP clients understand when to use this workflow, add a short workflow description that describes what it does.",
"workflow.description.mcp": "Clear descriptions help other users and MCP clients understand the purpose of your workflow",
"workflow.description.nomcp": "Clear descriptions help other users understand the purpose of your workflow",
"workflow.description.error.title": "Problem updating workflow description",
"folder.and.workflow.separator": "and",
"folders.delete.action": "Archive all workflows and delete subfolders",
@@ -1312,9 +1311,10 @@
"mcp.workflowDeactivated.message": "MCP Access has been disabled for this workflow because it is deactivated",
"menuActions.duplicate": "Duplicate",
"menuActions.download": "Download",
"menuActions.push": "Push to Git",
"menuActions.push": "Push to git",
"menuActions.editDescription": "Edit description",
"menuActions.importFromUrl": "Import from URL...",
"menuActions.importFromFile": "Import from File...",
"menuActions.importFromFile": "Import from file...",
"menuActions.delete": "Delete",
"menuActions.archive": "Archive",
"menuActions.unarchive": "Unarchive",

View File

@@ -15,6 +15,7 @@ import {
IS_DRAFT_PUBLISH_ENABLED,
WORKFLOW_SHARE_MODAL_KEY,
EnterpriseEditionFeature,
WORKFLOW_DESCRIPTION_MODAL_KEY,
} from '@/app/constants';
import { hasPermission } from '@/app/utils/rbac/permissions';
import { useRoute } from 'vue-router';
@@ -163,6 +164,11 @@ const workflowMenuItems = computed<Array<ActionDropdownItem<WORKFLOW_MENU_ACTION
label: locale.baseText('menuActions.duplicate'),
disabled: !onWorkflowPage.value || !props.id,
});
actions.unshift({
id: WORKFLOW_MENU_ACTIONS.EDIT_DESCRIPTION,
label: locale.baseText('menuActions.editDescription'),
disabled: !onWorkflowPage.value || !props.id,
});
actions.push(
{
@@ -239,6 +245,20 @@ const workflowMenuItems = computed<Array<ActionDropdownItem<WORKFLOW_MENU_ACTION
async function onWorkflowMenuSelect(action: WORKFLOW_MENU_ACTIONS): Promise<void> {
switch (action) {
case WORKFLOW_MENU_ACTIONS.EDIT_DESCRIPTION: {
const workflowId = getWorkflowId(props.id, route.params.name);
if (!workflowId) return;
const workflowDescription = workflowsStore.getWorkflowById(workflowId).description;
uiStore.openModalWithData({
name: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId,
workflowDescription,
},
});
break;
}
case WORKFLOW_MENU_ACTIONS.DUPLICATE: {
uiStore.openModalWithData({
name: DUPLICATE_MODAL_KEY,

View File

@@ -1,638 +0,0 @@
import { createComponentRenderer } from '@/__tests__/render';
import { type MockedStore, mockedStore } from '@/__tests__/utils';
import { createTestingPinia } from '@pinia/testing';
import userEvent from '@testing-library/user-event';
import { nextTick } from 'vue';
import WorkflowDescriptionPopover from '@/app/components/MainHeader/WorkflowDescriptionPopover.vue';
import { useWorkflowsStore } from '@/app/stores/workflows.store';
import { useUIStore } from '@/app/stores/ui.store';
import { useSettingsStore } from '@/app/stores/settings.store';
import { useToast } from '@/app/composables/useToast';
import { useTelemetry } from '@/app/composables/useTelemetry';
import { STORES } from '@n8n/stores';
vi.mock('@/app/composables/useToast', () => {
const showError = vi.fn();
return {
useToast: () => ({
showError,
}),
};
});
vi.mock('@/app/composables/useTelemetry', () => {
const track = vi.fn();
return {
useTelemetry: () => ({
track,
}),
};
});
const initialState = {
[STORES.SETTINGS]: {
settings: {
modules: {
mcp: {
enabled: false,
},
},
},
},
};
const renderComponent = createComponentRenderer(WorkflowDescriptionPopover, {
pinia: createTestingPinia({ initialState }),
});
describe('WorkflowDescriptionPopover', () => {
let workflowsStore: MockedStore<typeof useWorkflowsStore>;
let uiStore: MockedStore<typeof useUIStore>;
let settingsStore: MockedStore<typeof useSettingsStore>;
let telemetry: ReturnType<typeof useTelemetry>;
let toast: ReturnType<typeof useToast>;
beforeEach(() => {
workflowsStore = mockedStore(useWorkflowsStore);
uiStore = mockedStore(useUIStore);
settingsStore = mockedStore(useSettingsStore);
telemetry = useTelemetry();
toast = useToast();
// Reset mocks
workflowsStore.saveWorkflowDescription = vi.fn().mockResolvedValue(undefined);
workflowsStore.workflow = {
id: 'test-workflow-id',
name: 'Test Workflow',
active: false,
activeVersionId: null,
isArchived: false,
createdAt: Date.now(),
updatedAt: Date.now(),
versionId: '1',
nodes: [],
connections: {},
};
uiStore.stateIsDirty = false;
});
afterEach(() => {
vi.clearAllMocks();
});
describe('Component rendering', () => {
it('should render the description button and default description', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
const button = getByTestId('workflow-description-button');
await userEvent.click(button);
const textarea = getByTestId('workflow-description-input');
expect(textarea).toHaveValue('Initial description');
});
it('should render empty string if there is no description', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
},
});
const button = getByTestId('workflow-description-button');
await userEvent.click(button);
const textarea = getByTestId('workflow-description-input');
expect(textarea).toHaveValue('');
});
});
describe('Popover interaction', () => {
it('should open popover when button is clicked', async () => {
const { getByTestId, queryByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Test description',
},
});
const button = getByTestId('workflow-description-button');
expect(queryByTestId('workflow-description-edit-content')).not.toBeInTheDocument();
await userEvent.click(button);
expect(getByTestId('workflow-description-edit-content')).toBeInTheDocument();
});
it('should focus textarea when popover opens', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
},
});
const button = getByTestId('workflow-description-button');
await userEvent.click(button);
await nextTick();
const textarea = getByTestId('workflow-description-input');
expect(textarea).toHaveFocus();
});
it('should save description when popover closes', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
const button = getByTestId('workflow-description-button');
await userEvent.click(button);
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'Updated description');
// Click outside to close popover
await userEvent.click(document.body);
expect(workflowsStore.saveWorkflowDescription).toHaveBeenCalledWith(
'test-workflow-id',
'Updated description',
);
});
});
describe('Save and Cancel functionality', () => {
it('should save description when save button is clicked', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'New description');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
expect(workflowsStore.saveWorkflowDescription).toHaveBeenCalledWith(
'test-workflow-id',
'New description',
);
expect(telemetry.track).toHaveBeenCalledWith('User set workflow description', {
workflow_id: 'test-workflow-id',
description: 'New description',
});
});
it('should save empty string when description is cleared', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
expect(workflowsStore.saveWorkflowDescription).toHaveBeenCalledWith('test-workflow-id', '');
expect(telemetry.track).toHaveBeenCalledWith('User set workflow description', {
workflow_id: 'test-workflow-id',
description: '',
});
});
it('should disable save button when description has not changed', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const saveButton = getByTestId('workflow-description-save-button');
expect(saveButton).toBeDisabled();
});
it('should disable save button when whitespace-only changes result in same trimmed value', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
// Type only whitespace
await userEvent.type(textarea, ' ');
const saveButton = getByTestId('workflow-description-save-button');
// Should be disabled since trimmed value is still empty
expect(saveButton).toBeDisabled();
});
it('should not save on Enter key when only whitespace is entered', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
// Type only whitespace
await userEvent.type(textarea, ' ');
await userEvent.keyboard('{Enter}');
// Should not save since canSave is false
expect(workflowsStore.saveWorkflowDescription).not.toHaveBeenCalled();
});
it('should enable save button when description changes', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
expect(saveButton).not.toBeDisabled();
});
it('should revert changes when cancel button is clicked', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'Changed description');
const cancelButton = getByTestId('workflow-description-cancel-button');
await userEvent.click(cancelButton);
// Re-open popover to check value
await userEvent.click(getByTestId('workflow-description-button'));
const textareaAfterCancel = getByTestId('workflow-description-input');
expect(textareaAfterCancel).toHaveValue('Initial description');
});
it('should disable cancel button during save', async () => {
workflowsStore.saveWorkflowDescription = vi.fn(
async () => await new Promise((resolve) => setTimeout(resolve, 100)),
);
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
const cancelButton = getByTestId('workflow-description-cancel-button');
await userEvent.click(saveButton);
// During save, cancel should be disabled
expect(cancelButton).toBeDisabled();
});
});
describe('Keyboard shortcuts', () => {
it('should save when Enter key is pressed', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'New description');
await userEvent.keyboard('{Enter}');
expect(workflowsStore.saveWorkflowDescription).toHaveBeenCalledWith(
'test-workflow-id',
'New description',
);
});
it('should allow new lines with Shift+Enter', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, 'Line 1');
await userEvent.keyboard('{Shift>}{Enter}{/Shift}');
await userEvent.type(textarea, 'Line 2');
expect(textarea).toHaveValue('Line 1\nLine 2');
expect(workflowsStore.saveWorkflowDescription).not.toHaveBeenCalled();
});
it('should cancel when Escape key is pressed', async () => {
const { getByTestId, queryByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'Changed description');
await userEvent.keyboard('{Escape}');
// Check that popover is closed
expect(queryByTestId('workflow-description-edit-content')).not.toBeInTheDocument();
// Re-open to verify changes were reverted
await userEvent.click(getByTestId('workflow-description-button'));
const textareaAfterEscape = getByTestId('workflow-description-input');
expect(textareaAfterEscape).toHaveValue('Initial description');
});
});
describe('Error handling', () => {
it('should show error toast when save fails', async () => {
const error = new Error('Save failed');
workflowsStore.saveWorkflowDescription = vi.fn().mockRejectedValue(error);
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
await vi.waitFor(() => {
expect(toast.showError).toHaveBeenCalledWith(
error,
'Problem updating workflow description',
);
});
});
it('should revert to last saved value on error', async () => {
const error = new Error('Save failed');
workflowsStore.saveWorkflowDescription = vi.fn().mockRejectedValue(error);
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'Failed update');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
await vi.waitFor(() => {
expect(textarea).toHaveValue('Initial description');
});
});
});
describe('Dirty state management', () => {
it('should set dirty flag when description changes', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
expect(uiStore.stateIsDirty).toBe(false);
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
expect(uiStore.stateIsDirty).toBe(true);
});
it('should clear dirty flag when saving', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
expect(uiStore.stateIsDirty).toBe(true);
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
await vi.waitFor(() => {
expect(uiStore.stateIsDirty).toBe(false);
});
});
it('should clear dirty flag when canceling', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
expect(uiStore.stateIsDirty).toBe(true);
const cancelButton = getByTestId('workflow-description-cancel-button');
await userEvent.click(cancelButton);
expect(uiStore.stateIsDirty).toBe(false);
});
it('should handle whitespace-only changes correctly', async () => {
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: ' Initial ',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'Initial');
// Should not be dirty since trimmed values are the same
expect(uiStore.stateIsDirty).toBe(false);
});
});
describe('MCP tooltips', () => {
it('should show base tooltip when MCP is disabled', async () => {
// Ensure MCP is disabled
settingsStore.isModuleActive = vi.fn().mockReturnValue(false);
settingsStore.moduleSettings.mcp = { mcpAccessEnabled: false };
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
// The tooltip text appears as placeholder in the textarea
const textarea = getByTestId('workflow-description-input');
const placeholder = textarea.getAttribute('placeholder');
expect(placeholder).toContain('Edit workflow description');
expect(placeholder).not.toContain('MCP clients');
});
it('should show MCP tooltip when MCP is enabled', async () => {
// Enable MCP module
settingsStore.isModuleActive = vi.fn().mockReturnValue(true);
settingsStore.moduleSettings.mcp = { mcpAccessEnabled: true };
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
const placeholder = textarea.getAttribute('placeholder');
expect(placeholder).toContain('MCP clients');
expect(placeholder).not.toContain('Edit workflow description');
});
});
describe('UI state tracking', () => {
it('should track active actions during save', async () => {
const addActiveActionSpy = vi.spyOn(uiStore, 'addActiveAction');
const removeActiveActionSpy = vi.spyOn(uiStore, 'removeActiveAction');
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
expect(addActiveActionSpy).toHaveBeenCalledWith('workflowSaving');
await vi.waitFor(() => {
expect(removeActiveActionSpy).toHaveBeenCalledWith('workflowSaving');
});
});
it('should remove active action even on error', async () => {
const removeActiveActionSpy = vi.spyOn(uiStore, 'removeActiveAction');
workflowsStore.saveWorkflowDescription = vi.fn().mockRejectedValue(new Error('Failed'));
const { getByTestId } = renderComponent({
props: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
});
await userEvent.click(getByTestId('workflow-description-button'));
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
await vi.waitFor(() => {
expect(removeActiveActionSpy).toHaveBeenCalledWith('workflowSaving');
});
});
});
});

View File

@@ -1,239 +0,0 @@
<script setup lang="ts">
import { computed, nextTick, ref, useTemplateRef, watch } from 'vue';
import {
N8nButton,
N8nIconButton,
N8nInput,
N8nInputLabel,
N8nPopoverReka,
N8nTooltip,
} from '@n8n/design-system';
import { useI18n } from '@n8n/i18n';
import { useSettingsStore } from '@/app/stores/settings.store';
import { useWorkflowsStore } from '@/app/stores/workflows.store';
import { useUIStore } from '@/app/stores/ui.store';
import { useToast } from '@/app/composables/useToast';
import { useTelemetry } from '@/app/composables/useTelemetry';
type Props = {
workflowId: string;
workflowDescription?: string | null;
};
const props = withDefaults(defineProps<Props>(), {
workflowDescription: '',
});
const i18n = useI18n();
const toast = useToast();
const telemetry = useTelemetry();
const settingsStore = useSettingsStore();
const workflowStore = useWorkflowsStore();
const uiStore = useUIStore();
const descriptionValue = ref(props.workflowDescription);
const popoverOpen = ref(false);
const descriptionInput = useTemplateRef<HTMLInputElement>('descriptionInput');
const isSaving = ref(false);
const lastSavedDescription = ref(props.workflowDescription);
const normalizedCurrentValue = computed(() => (descriptionValue.value ?? '').trim());
const normalizedLastSaved = computed(() => (lastSavedDescription.value ?? '').trim());
const canSave = computed(() => normalizedCurrentValue.value !== normalizedLastSaved.value);
const isMcpEnabled = computed(
() => settingsStore.isModuleActive('mcp') && settingsStore.moduleSettings.mcp?.mcpAccessEnabled,
);
const textareaTip = computed(() => {
if (!isMcpEnabled.value) {
return i18n.baseText('workflow.description.tooltip');
}
return i18n.baseText('workflow.description.placeholder.mcp');
});
const saveDescription = async () => {
isSaving.value = true;
uiStore.addActiveAction('workflowSaving');
try {
await workflowStore.saveWorkflowDescription(
props.workflowId,
normalizedCurrentValue.value ?? null,
);
lastSavedDescription.value = descriptionValue.value;
uiStore.stateIsDirty = false;
telemetry.track('User set workflow description', {
workflow_id: props.workflowId,
description: normalizedCurrentValue.value ?? null,
});
} catch (error) {
toast.showError(error, i18n.baseText('workflow.description.error.title'));
descriptionValue.value = lastSavedDescription.value;
} finally {
isSaving.value = false;
uiStore.removeActiveAction('workflowSaving');
}
};
const handlePopoverOpenChange = async (open: boolean) => {
popoverOpen.value = open;
if (open) {
await nextTick();
descriptionInput.value?.focus();
} else {
await saveDescription();
}
};
const handleKeyDown = async (event: KeyboardEvent) => {
// Escape - cancel editing
if (event.key === 'Escape') {
event.preventDefault();
event.stopPropagation();
await cancel();
}
// Enter (without Shift) - save and close
if (event.key === 'Enter' && !event.shiftKey) {
if (!canSave.value) {
return;
}
event.preventDefault();
event.stopPropagation();
await save();
}
};
const cancel = async () => {
descriptionValue.value = lastSavedDescription.value;
uiStore.stateIsDirty = false;
popoverOpen.value = false;
};
const save = async () => {
await saveDescription();
popoverOpen.value = false;
};
// Sync with external prop changes
watch(
() => props.workflowDescription,
(newValue) => {
descriptionValue.value = newValue;
lastSavedDescription.value = newValue;
},
);
// Set dirty flag when text changes
watch(descriptionValue, (newValue) => {
const normalizedNewValue = (newValue ?? '').trim();
if (normalizedNewValue !== normalizedLastSaved.value) {
uiStore.stateIsDirty = true;
} else {
uiStore.stateIsDirty = false;
}
});
</script>
<template>
<N8nTooltip :disabled="popoverOpen" :content="i18n.baseText('workflow.description.tooltip')">
<div :class="$style['description-popover-wrapper']" data-test-id="workflow-description-popover">
<N8nPopoverReka
id="workflow-description-popover"
:open="popoverOpen"
@update:open="handlePopoverOpenChange"
>
<template #trigger>
<N8nIconButton
:class="{ [$style['description-button']]: true, [$style.active]: popoverOpen }"
:square="true"
data-test-id="workflow-description-button"
icon="notebook-pen"
type="tertiary"
size="small"
:aria-label="i18n.baseText('workflow.description.tooltip')"
/>
</template>
<template #content>
<div
:class="$style['description-edit-content']"
data-test-id="workflow-description-edit-content"
>
<N8nInputLabel
:label="i18n.baseText('generic.description')"
:tooltip-text="textareaTip"
>
<N8nInput
ref="descriptionInput"
v-model="descriptionValue"
:placeholder="textareaTip"
:rows="6"
data-test-id="workflow-description-input"
type="textarea"
@keydown="handleKeyDown"
/>
</N8nInputLabel>
</div>
<footer :class="$style['popover-footer']">
<N8nButton
:label="i18n.baseText('generic.cancel')"
:size="'small'"
:disabled="isSaving"
type="tertiary"
data-test-id="workflow-description-cancel-button"
@click="cancel"
/>
<N8nButton
:label="i18n.baseText('generic.unsavedWork.confirmMessage.confirmButtonText')"
:size="'small'"
:loading="isSaving"
:disabled="!canSave || isSaving"
type="primary"
data-test-id="workflow-description-save-button"
@click="save"
/>
</footer>
</template>
</N8nPopoverReka>
</div>
</N8nTooltip>
</template>
<style module lang="scss">
.description-button {
border: none;
position: relative;
&.active {
color: var(--color--background--shade-2);
}
&:hover,
&:focus,
&:focus-visible,
&:active {
background: none;
background-color: transparent !important;
outline: none !important;
color: var(--color--background--shade-2) !important;
}
}
.description-edit-content {
display: flex;
flex-direction: column;
padding: var(--spacing--xs);
width: 400px;
}
.popover-footer {
display: flex;
justify-content: flex-end;
gap: var(--spacing--2xs);
padding: 0 var(--spacing--xs) var(--spacing--xs);
}
</style>

View File

@@ -42,8 +42,6 @@ import {
} from 'vue';
import { useRoute, useRouter } from 'vue-router';
import WorkflowDescriptionPopover from './WorkflowDescriptionPopover.vue';
import { N8nBadge, N8nInlineTextEdit } from '@n8n/design-system';
import { useSettingsStore } from '@/app/stores/settings.store';
import { useUIStore } from '@/app/stores/ui.store';
@@ -549,11 +547,6 @@ onBeforeUnmount(() => {
>
{{ locale.baseText('workflows.item.archived') }}
</N8nBadge>
<WorkflowDescriptionPopover
v-else-if="!props.readOnly && workflowPermissions.update"
:workflow-id="props.id"
:workflow-description="props.description"
/>
</span>
</span>

View File

@@ -28,7 +28,6 @@ import {
EXPERIMENT_TEMPLATES_DATA_QUALITY_KEY,
} from '@/app/constants';
import { EXTERNAL_LINKS } from '@/app/constants/externalLinks';
import { CHAT_VIEW } from '@/features/ai/chatHub/constants';
import { hasPermission } from '@/app/utils/rbac/permissions';
import { useCloudPlanStore } from '@/app/stores/cloudPlan.store';
import { useRootStore } from '@n8n/stores/useRootStore';
@@ -117,16 +116,6 @@ const mainMenuItems = computed<IMenuItem[]>(() => [
icon: 'cloud',
available: settingsStore.isCloudDeployment && hasPermission(['instanceOwner']),
},
{
id: 'chat',
icon: 'message-circle',
label: 'Chat',
position: 'bottom',
route: { to: { name: CHAT_VIEW } },
available:
settingsStore.isChatFeatureEnabled &&
hasPermission(['rbac'], { rbac: { scope: 'chatHub:message' } }),
},
{
// Link to in-app pre-built agent templates, available experiment is enabled
id: 'templates',

View File

@@ -30,6 +30,7 @@ import {
EXPERIMENT_TEMPLATE_RECO_V3_KEY,
EXPERIMENT_TEMPLATES_DATA_QUALITY_KEY,
CONFIRM_PASSWORD_MODAL_KEY,
WORKFLOW_DESCRIPTION_MODAL_KEY,
WORKFLOW_PUBLISH_MODAL_KEY,
WORKFLOW_HISTORY_PUBLISH_MODAL_KEY,
} from '@/app/constants';
@@ -114,6 +115,7 @@ import NodeRecommendationModalV2 from '@/experiments/templateRecoV2/components/N
import NodeRecommendationModalV3 from '@/experiments/personalizedTemplatesV3/components/NodeRecommendationModal.vue';
import NodeRecommendationModalTDQ from '@/experiments/templatesDataQuality/components/NodeRecommendationModal.vue';
import VariableModal from '@/features/settings/environments.ee/components/VariableModal.vue';
import WorkflowDescriptionModal from '@/app/components/WorkflowDescriptionModal.vue';
import WorkflowPublishModal from '@/app/components/MainHeader/WorkflowPublishModal.vue';
import WorkflowHistoryPublishModal from '@/features/workflows/workflowHistory/components/WorkflowHistoryPublishModal.vue';
</script>
@@ -419,6 +421,12 @@ import WorkflowHistoryPublishModal from '@/features/workflows/workflowHistory/co
</template>
</ModalRoot>
<ModalRoot :name="WORKFLOW_DESCRIPTION_MODAL_KEY">
<template #default="{ modalName, data }">
<WorkflowDescriptionModal :modal-name="modalName" :data="data" />
</template>
</ModalRoot>
<ModalRoot :name="WORKFLOW_PUBLISH_MODAL_KEY">
<template #default="{ modalName, data }">
<WorkflowPublishModal :modal-name="modalName" :data="data" />

View File

@@ -0,0 +1,490 @@
import { createComponentRenderer } from '@/__tests__/render';
import { type MockedStore, mockedStore } from '@/__tests__/utils';
import { createTestingPinia } from '@pinia/testing';
import userEvent from '@testing-library/user-event';
import WorkflowDescriptionModal from '@/app/components/WorkflowDescriptionModal.vue';
import { useWorkflowsStore } from '@/app/stores/workflows.store';
import { useUIStore } from '@/app/stores/ui.store';
import { useSettingsStore } from '@/app/stores/settings.store';
import { useToast } from '@/app/composables/useToast';
import { useTelemetry } from '@/app/composables/useTelemetry';
import { STORES } from '@n8n/stores';
import { WORKFLOW_DESCRIPTION_MODAL_KEY } from '../constants';
vi.mock('@/app/composables/useToast', () => {
const showError = vi.fn();
return {
useToast: () => ({
showError,
}),
};
});
vi.mock('@/app/composables/useTelemetry', () => {
const track = vi.fn();
return {
useTelemetry: () => ({
track,
}),
};
});
const initialState = {
[STORES.SETTINGS]: {
settings: {
modules: {
mcp: {
enabled: false,
},
},
},
},
};
const ModalStub = {
template: `
<div>
<slot name="header" />
<slot name="title" />
<slot name="content" />
<slot name="footer" />
</div>
`,
};
const global = {
stubs: {
Modal: ModalStub,
},
};
const renderModal = createComponentRenderer(WorkflowDescriptionModal);
let pinia: ReturnType<typeof createTestingPinia>;
describe('WorkflowDescriptionModal', () => {
let workflowsStore: MockedStore<typeof useWorkflowsStore>;
let uiStore: MockedStore<typeof useUIStore>;
let settingsStore: MockedStore<typeof useSettingsStore>;
let telemetry: ReturnType<typeof useTelemetry>;
let toast: ReturnType<typeof useToast>;
beforeEach(() => {
pinia = createTestingPinia({ initialState });
workflowsStore = mockedStore(useWorkflowsStore);
uiStore = mockedStore(useUIStore);
settingsStore = mockedStore(useSettingsStore);
telemetry = useTelemetry();
toast = useToast();
// Reset mocks
workflowsStore.saveWorkflowDescription = vi.fn().mockResolvedValue(undefined);
workflowsStore.workflow = {
id: 'test-workflow-id',
name: 'Test Workflow',
active: false,
activeVersionId: null,
isArchived: false,
createdAt: Date.now(),
updatedAt: Date.now(),
versionId: '1',
nodes: [],
connections: {},
};
uiStore.stateIsDirty = false;
});
afterEach(() => {
vi.clearAllMocks();
});
describe('Component rendering', () => {
it('should render empty string if there is no description', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
expect(textarea).toHaveValue('');
});
});
describe('Popover interaction', () => {
it('should focus textarea when modal opens', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
},
},
pinia,
global,
});
await new Promise((resolve) => setTimeout(resolve, 200));
const textarea = getByTestId('workflow-description-input');
expect(textarea).toHaveFocus();
});
it('should not save description when modal closes by esc', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'Updated description');
await userEvent.type(textarea, '{Esc}');
expect(workflowsStore.saveWorkflowDescription).not.toHaveBeenCalled();
});
});
describe('Save and Cancel functionality', () => {
it('should save description when save button is clicked', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'New description');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
expect(workflowsStore.saveWorkflowDescription).toHaveBeenCalledWith(
'test-workflow-id',
'New description',
);
expect(telemetry.track).toHaveBeenCalledWith('User set workflow description', {
workflow_id: 'test-workflow-id',
description: 'New description',
});
});
it('should save empty string when description is cleared', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
expect(workflowsStore.saveWorkflowDescription).toHaveBeenCalledWith('test-workflow-id', '');
expect(telemetry.track).toHaveBeenCalledWith('User set workflow description', {
workflow_id: 'test-workflow-id',
description: '',
});
});
it('should disable save button when description has not changed', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const saveButton = getByTestId('workflow-description-save-button');
expect(saveButton).toBeDisabled();
});
it('should disable save button when whitespace-only changes result in same trimmed value', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
// Type only whitespace
await userEvent.type(textarea, ' ');
const saveButton = getByTestId('workflow-description-save-button');
// Should be disabled since trimmed value is still empty
expect(saveButton).toBeDisabled();
});
it('should not save on Enter key when only whitespace is entered', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
// Type only whitespace
await userEvent.type(textarea, ' ');
await userEvent.keyboard('{Enter}');
// Should not save since canSave is false
expect(workflowsStore.saveWorkflowDescription).not.toHaveBeenCalled();
});
it('should enable save button when description changes', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
expect(saveButton).not.toBeDisabled();
});
it('should disable cancel button during save', async () => {
workflowsStore.saveWorkflowDescription = vi.fn(
async () => await new Promise((resolve) => setTimeout(resolve, 100)),
);
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
const cancelButton = getByTestId('workflow-description-cancel-button');
await userEvent.click(saveButton);
// During save, cancel should be disabled
expect(cancelButton).toBeDisabled();
});
});
describe('Keyboard shortcuts', () => {
it('should save when Enter key is pressed', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'New description');
await userEvent.keyboard('{Enter}');
expect(workflowsStore.saveWorkflowDescription).toHaveBeenCalledWith(
'test-workflow-id',
'New description',
);
});
it('should allow new lines with Shift+Enter', async () => {
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, 'Line 1');
await userEvent.keyboard('{Shift>}{Enter}{/Shift}');
await userEvent.type(textarea, 'Line 2');
expect(textarea).toHaveValue('Line 1\nLine 2');
expect(workflowsStore.saveWorkflowDescription).not.toHaveBeenCalled();
});
});
describe('Error handling', () => {
it('should show error toast when save fails', async () => {
const error = new Error('Save failed');
workflowsStore.saveWorkflowDescription = vi.fn().mockRejectedValue(error);
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.type(textarea, ' updated');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
await vi.waitFor(() => {
expect(toast.showError).toHaveBeenCalledWith(
error,
'Problem updating workflow description',
);
});
});
it('should keep text on error', async () => {
const error = new Error('Save failed');
workflowsStore.saveWorkflowDescription = vi.fn().mockRejectedValue(error);
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: 'Initial description',
},
},
pinia,
global,
});
const textarea = getByTestId('workflow-description-input');
await userEvent.clear(textarea);
await userEvent.type(textarea, 'Failed update');
const saveButton = getByTestId('workflow-description-save-button');
await userEvent.click(saveButton);
await vi.waitFor(() => {
expect(textarea).toHaveValue('Failed update');
});
});
});
describe('MCP tooltips', () => {
it('should show base tooltip when MCP is disabled', async () => {
// Ensure MCP is disabled
settingsStore.isModuleActive = vi.fn().mockReturnValue(false);
settingsStore.moduleSettings.mcp = { mcpAccessEnabled: false };
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
},
pinia,
global,
});
// The tooltip text appears as placeholder in the textarea
const textarea = getByTestId('descriptionTooltip');
const placeholder = textarea.textContent;
expect(placeholder).toContain(
'Clear descriptions help other users understand the purpose of your workflow',
);
expect(placeholder).not.toContain('MCP clients');
});
it('should show MCP tooltip when MCP is enabled', async () => {
// Enable MCP module
settingsStore.isModuleActive = vi.fn().mockReturnValue(true);
settingsStore.moduleSettings.mcp = { mcpAccessEnabled: true };
const { getByTestId } = renderModal({
props: {
modalName: WORKFLOW_DESCRIPTION_MODAL_KEY,
data: {
workflowId: 'test-workflow-id',
workflowDescription: '',
},
},
pinia,
global,
});
const textarea = getByTestId('descriptionTooltip');
const placeholder = textarea.textContent;
// When MCP is enabled, the placeholder includes both base tooltip and MCP-specific text
expect(placeholder).toContain('MCP clients');
});
});
});

View File

@@ -0,0 +1,168 @@
<script setup lang="ts">
import { computed, ref, useTemplateRef } from 'vue';
import { N8nButton, N8nInput, N8nText } from '@n8n/design-system';
import { useI18n } from '@n8n/i18n';
import { useSettingsStore } from '@/app/stores/settings.store';
import { useWorkflowsStore } from '@/app/stores/workflows.store';
import { useToast } from '@/app/composables/useToast';
import { useTelemetry } from '@/app/composables/useTelemetry';
import { WORKFLOW_DESCRIPTION_MODAL_KEY } from '../constants';
import { createEventBus } from '@n8n/utils/event-bus';
import Modal from './Modal.vue';
import { onMounted } from 'vue';
const props = defineProps<{
modalName: string;
data: {
workflowId: string;
workflowDescription?: string | null;
};
}>();
const modalBus = createEventBus();
const i18n = useI18n();
const toast = useToast();
const telemetry = useTelemetry();
const settingsStore = useSettingsStore();
const workflowStore = useWorkflowsStore();
const descriptionValue = ref(props.data.workflowDescription ?? '');
const descriptionInput = useTemplateRef<HTMLInputElement>('descriptionInput');
const isSaving = ref(false);
const normalizedCurrentValue = computed(() => (descriptionValue.value ?? '').trim());
const normalizedLastSaved = computed(() => (props.data.workflowDescription ?? '').trim());
const canSave = computed(() => normalizedCurrentValue.value !== normalizedLastSaved.value);
const isMcpEnabled = computed(
() => settingsStore.isModuleActive('mcp') && settingsStore.moduleSettings.mcp?.mcpAccessEnabled,
);
// Descriptive message that educates the user that the description is relevant for MCP
// Updated based on MCP presence
const textareaTip = computed(() =>
isMcpEnabled.value
? i18n.baseText('workflow.description.mcp')
: i18n.baseText('workflow.description.nomcp'),
);
const saveDescription = async () => {
isSaving.value = true;
try {
await workflowStore.saveWorkflowDescription(
props.data.workflowId,
normalizedCurrentValue.value ?? null,
);
telemetry.track('User set workflow description', {
workflow_id: props.data.workflowId,
description: normalizedCurrentValue.value ?? null,
});
} catch (error) {
toast.showError(error, i18n.baseText('workflow.description.error.title'));
} finally {
isSaving.value = false;
}
};
const cancel = () => {
modalBus.emit('close');
};
const save = async () => {
await saveDescription();
modalBus.emit('close');
};
const handleKeyDown = async (event: KeyboardEvent) => {
// Escape - cancel editing
if (event.key === 'Escape') {
event.preventDefault();
event.stopPropagation();
cancel();
}
// Enter (without Shift) - save and close
if (event.key === 'Enter' && !event.shiftKey) {
if (!canSave.value) {
return;
}
event.preventDefault();
event.stopPropagation();
await save();
}
};
onMounted(() => {
setTimeout(() => {
descriptionInput.value?.focus();
}, 150);
});
</script>
<template>
<Modal
:name="WORKFLOW_DESCRIPTION_MODAL_KEY"
:title="i18n.baseText('generic.description')"
width="500"
:class="$style.container"
:event-bus="modalBus"
:close-on-click-modal="false"
>
<template #content>
<div
:class="$style['description-edit-content']"
data-test-id="workflow-description-edit-content"
>
<N8nText color="text-base" data-test-id="descriptionTooltip">{{ textareaTip }}</N8nText>
<N8nInput
ref="descriptionInput"
v-model="descriptionValue"
:rows="6"
data-test-id="workflow-description-input"
type="textarea"
@keydown="handleKeyDown"
/>
</div>
</template>
<template #footer>
<div :class="$style['popover-footer']">
<N8nButton
:label="i18n.baseText('generic.cancel')"
:size="'small'"
:disabled="isSaving"
type="tertiary"
data-test-id="workflow-description-cancel-button"
@click="cancel"
/>
<N8nButton
:label="i18n.baseText('generic.unsavedWork.confirmMessage.confirmButtonText')"
:loading="isSaving"
:disabled="!canSave || isSaving"
type="primary"
data-test-id="workflow-description-save-button"
@click="save"
/>
</div>
</template>
</Modal>
</template>
<style module lang="scss">
.description-edit-content {
display: flex;
flex-direction: column;
gap: var(--spacing--xs);
padding: var(--spacing--s);
}
.popover-footer {
display: flex;
justify-content: flex-end;
gap: var(--spacing--2xs);
}
</style>

View File

@@ -4,6 +4,7 @@ export const enum WORKFLOW_MENU_ACTIONS {
IMPORT_FROM_URL = 'import-from-url',
IMPORT_FROM_FILE = 'import-from-file',
PUSH = 'push',
EDIT_DESCRIPTION = 'edit-description',
SETTINGS = 'settings',
DELETE = 'delete',
ARCHIVE = 'archive',

View File

@@ -35,5 +35,6 @@ export const CHAT_HUB_SIDE_MENU_DRAWER_MODAL_KEY = 'chatHubSideMenuDrawer';
export const EXPERIMENT_TEMPLATE_RECO_V2_KEY = 'templateRecoV2';
export const EXPERIMENT_TEMPLATE_RECO_V3_KEY = 'templateRecoV3';
export const EXPERIMENT_TEMPLATES_DATA_QUALITY_KEY = 'templatesDataQuality';
export const WORKFLOW_DESCRIPTION_MODAL_KEY = 'workflowDescription';
export const WORKFLOW_PUBLISH_MODAL_KEY = 'workflowPublish';
export const WORKFLOW_HISTORY_PUBLISH_MODAL_KEY = 'workflowHistoryPublish';

View File

@@ -32,6 +32,7 @@ import {
EXPERIMENT_TEMPLATE_RECO_V3_KEY,
WORKFLOW_PUBLISH_MODAL_KEY,
EXPERIMENT_TEMPLATES_DATA_QUALITY_KEY,
WORKFLOW_DESCRIPTION_MODAL_KEY,
WORKFLOW_HISTORY_PUBLISH_MODAL_KEY,
WORKFLOW_HISTORY_VERSION_UNPUBLISH,
} from '@/app/constants';
@@ -151,6 +152,7 @@ export const useUIStore = defineStore(STORES.UI, () => {
WORKFLOW_DIFF_MODAL_KEY,
EXPERIMENT_TEMPLATE_RECO_V3_KEY,
VARIABLE_MODAL_KEY,
WORKFLOW_DESCRIPTION_MODAL_KEY,
WORKFLOW_PUBLISH_MODAL_KEY,
WORKFLOW_HISTORY_PUBLISH_MODAL_KEY,
WORKFLOW_HISTORY_VERSION_UNPUBLISH,

View File

@@ -1713,18 +1713,20 @@ export const useWorkflowsStore = defineStore(STORES.WORKFLOWS, () => {
description,
});
if (workflowsById.value[id]) {
workflowsById.value[id] = {
...workflowsById.value[id],
description: updated.description,
versionId: updated.versionId,
};
}
// Update local store state
if (isCurrentWorkflow) {
setDescription(updated.description ?? '');
if (updated.versionId !== currentVersionId) {
setWorkflowVersionId(updated.versionId);
}
} else if (workflowsById.value[id]) {
workflowsById.value[id] = {
...workflowsById.value[id],
description: updated.description,
versionId: updated.versionId,
};
}
return updated;

View File

@@ -768,11 +768,12 @@ export const useBuilderStore = defineStore(STORES.BUILDER, () => {
watch(
() => workflowsStore.workflowId,
(newWorkflowId) => {
// Only fetch if we have a valid workflow ID, and we're in a builder-enabled view
// Only fetch if we have a valid workflow ID, AI builder is enabled, and we're in a builder-enabled view
if (
newWorkflowId &&
newWorkflowId !== PLACEHOLDER_EMPTY_WORKFLOW_ID &&
BUILDER_ENABLED_VIEWS.includes(route.name as VIEWS)
BUILDER_ENABLED_VIEWS.includes(route.name as VIEWS) &&
isAIBuilderEnabled.value
) {
void fetchSessionsMetadata();
} else {

View File

@@ -32,7 +32,7 @@ import {
type ChatModelDto,
} from '@n8n/api-types';
import { N8nIconButton, N8nScrollArea, N8nText } from '@n8n/design-system';
import { useLocalStorage, useMediaQuery, useScroll } from '@vueuse/core';
import { useElementSize, useLocalStorage, useMediaQuery, useScroll } from '@vueuse/core';
import { v4 as uuidv4 } from 'uuid';
import { computed, nextTick, ref, useTemplateRef, watch } from 'vue';
import { useRoute, useRouter } from 'vue-router';
@@ -62,12 +62,15 @@ const i18n = useI18n();
const headerRef = useTemplateRef('headerRef');
const inputRef = useTemplateRef('inputRef');
const scrollableRef = useTemplateRef('scrollable');
const scrollableSize = useElementSize(scrollableRef);
const sessionId = computed<string>(() =>
typeof route.params.id === 'string' ? route.params.id : uuidv4(),
);
const isResponding = computed(() => chatStore.isResponding(sessionId.value));
const isNewSession = computed(() => sessionId.value !== route.params.id);
const scrollableRef = useTemplateRef('scrollable');
const scrollContainerRef = computed(() => scrollableRef.value?.parentElement ?? null);
const currentConversation = computed(() =>
sessionId.value ? chatStore.sessions.byId[sessionId.value] : undefined,
@@ -317,6 +320,7 @@ watch(
[sessionId, isNewSession],
async ([id, isNew]) => {
didSubmitInCurrentSession.value = false;
editingMessageId.value = undefined;
if (!isNew && !chatStore.getConversation(id)) {
try {
@@ -362,7 +366,7 @@ watch(
watch(
defaultAgent,
(agent, prevAgent) => {
if (defaultModel.value && agent && agent.name !== prevAgent?.name) {
if (defaultModel.value && agent?.name && agent.name !== prevAgent?.name) {
defaultModel.value = { ...defaultModel.value, cachedDisplayName: agent.name };
}
@@ -605,6 +609,7 @@ function onFilesDropped(files: File[]) {
? scrollContainerRef.offsetHeight - 30 /* padding-top */ - 200 /* padding-bottom */
: undefined
"
:container-width="scrollableSize.width.value ?? 0"
@start-edit="handleStartEditMessage(message.id)"
@cancel-edit="handleCancelEditMessage"
@regenerate="handleRegenerateMessage"

View File

@@ -18,18 +18,26 @@ import { useDeviceSupport } from '@n8n/composables/useDeviceSupport';
import { useI18n } from '@n8n/i18n';
import CopyButton from '@/features/ai/chatHub/components/CopyButton.vue';
const { message, compact, isEditing, isStreaming, minHeight, cachedAgentDisplayName } =
defineProps<{
message: ChatMessage;
compact: boolean;
isEditing: boolean;
isStreaming: boolean;
cachedAgentDisplayName: string | null;
/**
* minHeight allows scrolling agent's response to the top while it is being generated
*/
minHeight?: number;
}>();
const {
message,
compact,
isEditing,
isStreaming,
minHeight,
cachedAgentDisplayName,
containerWidth,
} = defineProps<{
message: ChatMessage;
compact: boolean;
isEditing: boolean;
isStreaming: boolean;
cachedAgentDisplayName: string | null;
/**
* minHeight allows scrolling agent's response to the top while it is being generated
*/
minHeight?: number;
containerWidth: number;
}>();
const emit = defineEmits<{
startEdit: [];
@@ -48,7 +56,7 @@ const styles = useCssModule();
const editedText = ref('');
const hoveredCodeBlockActions = ref<HTMLElement | null>(null);
const textareaRef = useTemplateRef('textarea');
const markdown = useChatHubMarkdownOptions(styles.codeBlockActions);
const markdown = useChatHubMarkdownOptions(styles.codeBlockActions, styles.tableContainer);
const messageContent = computed(() => message.content);
const speech = useSpeechSynthesis(messageContent, {
@@ -106,6 +114,11 @@ function handleConfirmEdit() {
}
function handleKeydownTextarea(e: KeyboardEvent) {
if (e.key === 'Escape') {
emit('cancelEdit');
return;
}
const trimmed = editedText.value.trim();
if (e.key === 'Enter' && isCtrlKeyPressed(e) && !e.isComposing && trimmed) {
@@ -181,7 +194,10 @@ onBeforeMount(() => {
[$style.compact]: compact,
},
]"
:style="minHeight ? { minHeight: `${minHeight}px` } : undefined"
:style="{
minHeight: minHeight ? `${minHeight}px` : undefined,
'--container--width': `${containerWidth}px`,
}"
:data-message-id="message.id"
>
<div :class="$style.avatar">
@@ -404,6 +420,7 @@ onBeforeMount(() => {
}
pre {
width: 100%;
font-family: inherit;
font-size: inherit;
margin: 0;
@@ -430,18 +447,27 @@ onBeforeMount(() => {
}
}
.tableContainer {
width: var(--container--width);
padding-bottom: 1em;
padding-left: calc((var(--container--width) - 100%) / 2);
padding-right: var(--spacing--lg);
margin-left: calc(-1 * (var(--container--width) - 100%) / 2);
overflow-x: auto;
}
table {
width: 100%;
width: fit-content;
border-bottom: var(--border);
border-top: var(--border);
border-width: 2px;
margin-bottom: 1em;
border-color: var(--color--text--shade-1);
}
th,
td {
padding: 0.25em 1em 0.25em 0;
min-width: 12em;
}
th {

View File

@@ -125,6 +125,8 @@ function handleSubmitForm() {
function handleKeydownTextarea(e: KeyboardEvent) {
const trimmed = message.value.trim();
speechInput.stop();
if (e.key === 'Enter' && !e.shiftKey && !e.isComposing && trimmed) {
e.preventDefault();
speechInput.stop();

View File

@@ -108,9 +108,9 @@ defineSlots<{
.actionDropdown {
opacity: 0;
transition: opacity 0.2s;
flex-shrink: 0;
width: 0;
overflow: hidden;
.menuItem:has([aria-expanded='true']) &,
.menuItem:has(:focus) &,

View File

@@ -13,7 +13,10 @@ let asyncImport:
| { status: 'uninitialized' }
| { status: 'done' } = { status: 'uninitialized' };
export function useChatHubMarkdownOptions(codeBlockActionsClassName: string) {
export function useChatHubMarkdownOptions(
codeBlockActionsClassName: string,
tableContainerClassName: string,
) {
const forceReRenderKey = ref(0);
const codeBlockContents = ref<Map<string, string>>();
@@ -100,7 +103,28 @@ export function useChatHubMarkdownOptions(codeBlockActionsClassName: string) {
);
};
};
return [linksNewTabPlugin, codeBlockPlugin];
const tablePlugin = (vueMarkdownItInstance: MarkdownIt) => {
const defaultTableOpenRenderer = vueMarkdownItInstance.renderer.rules.table_open;
const defaultTableCloseRenderer = vueMarkdownItInstance.renderer.rules.table_close;
vueMarkdownItInstance.renderer.rules.table_open = (tokens, idx, options, env, self) => {
const defaultRendered =
defaultTableOpenRenderer?.(tokens, idx, options, env, self) ??
self.renderToken(tokens, idx, options);
return defaultRendered.replace('<table', `<div class="${tableContainerClassName}"><table`);
};
vueMarkdownItInstance.renderer.rules.table_close = (tokens, idx, options, env, self) => {
const defaultRendered =
defaultTableCloseRenderer?.(tokens, idx, options, env, self) ??
self.renderToken(tokens, idx, options);
return defaultRendered.replace('</table>', '</table></div>');
};
};
return [linksNewTabPlugin, codeBlockPlugin, tablePlugin];
});
return { options, forceReRenderKey, plugins, codeBlockContents };

View File

@@ -9,8 +9,10 @@ import { useI18n } from '@n8n/i18n';
import { computed, onBeforeMount, onBeforeUnmount } from 'vue';
import { useProjectsStore } from '../projects.store';
import type { ProjectListItem } from '../projects.types';
import { CHAT_VIEW } from '@/features/ai/chatHub/constants';
import { N8nButton, N8nHeading, N8nMenuItem, N8nTooltip } from '@n8n/design-system';
import { hasPermission } from '@/app/utils/rbac/permissions';
type Props = {
collapsed: boolean;
@@ -29,6 +31,11 @@ const usersStore = useUsersStore();
const isCreatingProject = computed(() => globalEntityCreation.isCreatingProject.value);
const displayProjects = computed(() => globalEntityCreation.displayProjects.value);
const isFoldersFeatureEnabled = computed(() => settingsStore.isFoldersFeatureEnabled);
const isChatLinkAvailable = computed(
() =>
settingsStore.isChatFeatureEnabled &&
hasPermission(['rbac'], { rbac: { scope: 'chatHub:message' } }),
);
const hasMultipleVerifiedUsers = computed(
() => usersStore.allUsers.filter((user) => !user.isPendingUser).length > 1,
);
@@ -87,6 +94,14 @@ const activeTabId = computed(() => {
);
});
const chat = computed<IMenuItem>(() => ({
id: 'chat',
icon: 'message-circle',
label: 'Chat',
position: 'bottom',
route: { to: { name: CHAT_VIEW } },
}));
async function onSourceControlPull() {
// Update myProjects for the sidebar display
await projectsStore.getMyProjects();
@@ -128,6 +143,13 @@ onBeforeUnmount(() => {
:active="activeTabId === 'shared'"
data-test-id="project-shared-menu-item"
/>
<N8nMenuItem
v-if="isChatLinkAvailable"
:item="chat"
:compact="props.collapsed"
:active="activeTabId === 'chat'"
data-test-id="project-chat-menu-item"
/>
</div>
<N8nHeading
v-if="!props.collapsed && projectsStore.isTeamProjectFeatureEnabled"

View File

@@ -13,6 +13,7 @@ export const COMPILER_OPTIONS: ts.CompilerOptions = {
importHelpers: false,
skipDefaultLibCheck: true,
noEmit: true,
noImplicitAny: false,
};
export const TYPESCRIPT_AUTOCOMPLETE_THRESHOLD = '15';
export const TYPESCRIPT_FILES = {

View File

@@ -1,804 +0,0 @@
import {
CODE_NODE_NAME,
CODE_NODE_DISPLAY_NAME,
MANUAL_TRIGGER_NODE_DISPLAY_NAME,
} from '../../config/constants';
import { test, expect } from '../../fixtures/base';
import type { n8nPage } from '../../pages/n8nPage';
test.describe('NDV', () => {
test.beforeEach(async ({ n8n }) => {
await n8n.start.fromBlankCanvas();
});
test('should show up when double clicked on a node and close when Back to canvas clicked', async ({
n8n,
}) => {
await n8n.canvas.addNode('Manual Trigger');
const canvasNodes = n8n.canvas.getCanvasNodes();
await canvasNodes.first().dblclick();
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.ndv.clickBackToCanvasButton();
await expect(n8n.ndv.getContainer()).toBeHidden();
});
test('should show input panel when node is not connected', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.deselectAll();
await n8n.canvas.addNode('Edit Fields (Set)', { closeNDV: true });
const canvasNodes = n8n.canvas.getCanvasNodes();
await canvasNodes.last().dblclick();
await expect(n8n.ndv.getContainer()).toBeVisible();
await expect(n8n.ndv.inputPanel.get()).toContainText('Wire me up');
});
test('should test webhook node', async ({ n8n }) => {
await n8n.canvas.addNode('Webhook', { closeNDV: false });
await n8n.ndv.execute();
const webhookUrl = await n8n.ndv.getWebhookUrl();
await expect(n8n.ndv.getWebhookTriggerListening()).toBeVisible();
const response = await n8n.ndv.makeWebhookRequest(webhookUrl as string);
expect(response.status()).toBe(200);
await expect(n8n.ndv.outputPanel.get()).toBeVisible();
await expect(n8n.ndv.outputPanel.getDataContainer()).toBeVisible();
});
test('should change input and go back to canvas', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('NDV-test-select-input.json');
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.getCanvasNodes().last().dblclick();
await n8n.ndv.execute();
await n8n.ndv.inputPanel.switchDisplayMode('table');
await n8n.ndv.inputPanel.getNodeInputOptions().last().click();
await expect(n8n.ndv.inputPanel.get()).toContainText('start');
await n8n.ndv.clickBackToCanvasButton();
await expect(n8n.ndv.getContainer()).toBeHidden();
});
test('should show correct validation state for resource locator params', async ({ n8n }) => {
await n8n.canvas.addNode('Typeform Trigger', { closeNDV: false });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.ndv.clickBackToCanvasButton();
await n8n.canvas.openNode('Typeform Trigger');
await expect(n8n.canvas.getNodeIssuesByName('Typeform Trigger')).toBeVisible();
});
test('should show validation errors only after blur or re-opening of NDV', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Airtable', { closeNDV: false, action: 'Search records' });
await expect(n8n.ndv.getContainer()).toBeVisible();
await expect(n8n.canvas.getNodeIssuesByName('Airtable')).toBeHidden();
await n8n.ndv.getParameterInputField('table').nth(1).focus();
await n8n.ndv.getParameterInputField('table').nth(1).blur();
await n8n.ndv.getParameterInputField('base').nth(1).focus();
await n8n.ndv.getParameterInputField('base').nth(1).blur();
await expect(n8n.ndv.getParameterInput('base')).toHaveClass(/has-issues|error|invalid/);
await expect(n8n.ndv.getParameterInput('table')).toHaveClass(/has-issues|error|invalid/);
await n8n.ndv.clickBackToCanvasButton();
await n8n.canvas.openNode('Search records');
await expect(n8n.canvas.getNodeIssuesByName('Search records')).toBeVisible();
});
test('should show all validation errors when opening pasted node', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_workflow_ndv_errors.json');
const canvasNodes = n8n.canvas.getCanvasNodes();
await expect(canvasNodes).toHaveCount(1);
await n8n.canvas.openNode('Airtable');
await expect(n8n.canvas.getNodeIssuesByName('Airtable')).toBeVisible();
});
test('should render run errors correctly', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_workflow_ndv_run_error.json');
await n8n.canvas.openNode('Error');
await n8n.ndv.execute();
await expect(n8n.ndv.getNodeRunErrorMessage()).toHaveText(
"Paired item data for item from node 'Break pairedItem chain' is unavailable. Ensure 'Break pairedItem chain' is providing the required output.",
);
await expect(n8n.ndv.getNodeRunErrorDescription()).toContainText(
"An expression here won't work because it uses .item and n8n can't figure out the matching item.",
);
await expect(n8n.ndv.getNodeRunErrorMessage()).toBeVisible();
await expect(n8n.ndv.getNodeRunErrorDescription()).toBeVisible();
});
test('should save workflow using keyboard shortcut from NDV', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Edit Fields (Set)', { closeNDV: false });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.page.keyboard.press('ControlOrMeta+s');
await expect(n8n.canvas.getWorkflowSaveButton()).toBeHidden();
});
test('webhook should fallback to webhookId if path is empty', async ({ n8n }) => {
await n8n.canvas.addNode('Webhook', { closeNDV: false });
await expect(n8n.canvas.getNodeIssuesByName('Webhook')).toBeHidden();
await expect(n8n.ndv.getExecuteNodeButton()).toBeEnabled();
await expect(n8n.ndv.getTriggerPanelExecuteButton()).toBeVisible();
await n8n.ndv.getParameterInputField('path').clear();
const webhookUrlsContainer = n8n.ndv.getContainer().getByText('Webhook URLs').locator('..');
const urlText = await webhookUrlsContainer.textContent();
const uuidRegex = /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i;
expect(urlText).toMatch(uuidRegex);
await n8n.ndv.close();
await n8n.canvas.openNode('Webhook');
await n8n.ndv.fillParameterInput('path', 'test-path');
const updatedUrlText = await webhookUrlsContainer.textContent();
expect(updatedUrlText).toContain('test-path');
expect(updatedUrlText).not.toMatch(uuidRegex);
});
test.describe('test output schema view', () => {
const schemaKeys = [
'id',
'name',
'email',
'notes',
'country',
'created',
'objectValue',
'prop1',
'prop2',
];
const setupSchemaWorkflow = async (n8n: n8nPage) => {
await n8n.start.fromImportedWorkflow('Test_workflow_schema_test.json');
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.openNode('Set');
await n8n.ndv.execute();
};
test('should switch to output schema view and validate it', async ({ n8n }) => {
await setupSchemaWorkflow(n8n);
await n8n.ndv.outputPanel.switchDisplayMode('schema');
for (const key of schemaKeys) {
await expect(n8n.ndv.outputPanel.getSchemaItem(key)).toBeVisible();
}
});
test('should preserve schema view after execution', async ({ n8n }) => {
await setupSchemaWorkflow(n8n);
await n8n.ndv.outputPanel.switchDisplayMode('schema');
await n8n.ndv.execute();
for (const key of schemaKeys) {
await expect(n8n.ndv.outputPanel.getSchemaItem(key)).toBeVisible();
}
});
test('should collapse and expand nested schema object', async ({ n8n }) => {
await setupSchemaWorkflow(n8n);
const expandedObjectProps = ['prop1', 'prop2'];
await n8n.ndv.outputPanel.switchDisplayMode('schema');
for (const key of expandedObjectProps) {
await expect(n8n.ndv.outputPanel.getSchemaItem(key)).toBeVisible();
}
const objectValueItem = n8n.ndv.outputPanel.getSchemaItem('objectValue');
await objectValueItem.locator('.toggle').click();
for (const key of expandedObjectProps) {
await expect(n8n.ndv.outputPanel.getSchemaItem(key)).not.toBeInViewport();
}
});
test('should not display pagination for schema', async ({ n8n }) => {
await setupSchemaWorkflow(n8n);
await n8n.ndv.clickBackToCanvasButton();
await n8n.canvas.deselectAll();
await n8n.canvas.nodeByName('Set').click();
await n8n.canvas.addNode('Customer Datastore (n8n training)');
await n8n.canvas.openNode('Customer Datastore (n8n training)');
await n8n.ndv.execute();
await expect(n8n.ndv.outputPanel.get().getByText('5 items')).toBeVisible();
await n8n.ndv.outputPanel.switchDisplayMode('schema');
const schemaItemsCount = await n8n.ndv.outputPanel.getSchemaItems().count();
expect(schemaItemsCount).toBeGreaterThan(0);
await n8n.ndv.outputPanel.switchDisplayMode('json');
});
test('should display large schema', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_workflow_schema_test_pinned_data.json');
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.openNode('Set');
await expect(n8n.ndv.outputPanel.get().getByText('20 items')).toBeVisible();
await expect(n8n.ndv.outputPanel.get().locator('[class*="_pagination"]')).toBeVisible();
await n8n.ndv.outputPanel.switchDisplayMode('schema');
await expect(n8n.ndv.outputPanel.get().locator('[class*="_pagination"]')).toBeHidden();
});
});
test('should display parameter hints correctly', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_workflow_3.json');
await n8n.canvas.openNode('Set1');
await n8n.ndv.getParameterInputField('value').clear();
await n8n.ndv.getParameterInputField('value').fill('=');
await n8n.ndv.getInlineExpressionEditorContent().fill('hello');
await n8n.ndv.getParameterInputField('name').click();
await expect(n8n.ndv.getParameterExpressionPreviewValue()).toContainText('hello');
await n8n.ndv.getInlineExpressionEditorContent().fill('');
await n8n.ndv.getParameterInputField('name').click();
await expect(n8n.ndv.getParameterExpressionPreviewValue()).toContainText('[empty]');
await n8n.ndv.getInlineExpressionEditorContent().fill(' test');
await n8n.ndv.getParameterInputField('name').click();
await expect(n8n.ndv.getParameterExpressionPreviewValue()).toContainText(' test');
await n8n.ndv.getInlineExpressionEditorContent().fill(' ');
await n8n.ndv.getParameterInputField('name').click();
await expect(n8n.ndv.getParameterExpressionPreviewValue()).toContainText(' ');
await n8n.ndv.getInlineExpressionEditorContent().fill('<div></div>');
await n8n.ndv.getParameterInputField('name').click();
await expect(n8n.ndv.getParameterExpressionPreviewValue()).toContainText('<div></div>');
});
test('should properly show node execution indicator', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Code', { action: 'Code in JavaScript', closeNDV: false });
await expect(n8n.ndv.getNodeRunSuccessIndicator()).toBeHidden();
await expect(n8n.ndv.getNodeRunErrorIndicator()).toBeHidden();
await expect(n8n.ndv.getNodeRunTooltipIndicator()).toBeHidden();
await n8n.ndv.execute();
await expect(n8n.ndv.getNodeRunSuccessIndicator()).toBeVisible();
await expect(n8n.ndv.getNodeRunTooltipIndicator()).toBeVisible();
});
test('should show node name and version in settings', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_workflow_ndv_version.json');
await n8n.canvas.openNode('Edit Fields (old)');
await n8n.ndv.openSettings();
await expect(n8n.ndv.getNodeVersion()).toContainText('Set node version 2');
await expect(n8n.ndv.getNodeVersion()).toContainText('Latest version: 3.4');
await n8n.ndv.close();
await n8n.canvas.openNode('Edit Fields (latest)');
await n8n.ndv.openSettings();
await expect(n8n.ndv.getNodeVersion()).toContainText('Edit Fields (Set) node version 3.4');
await expect(n8n.ndv.getNodeVersion()).toContainText('Latest');
await n8n.ndv.close();
await n8n.canvas.openNode('Function');
await n8n.ndv.openSettings();
await expect(n8n.ndv.getNodeVersion()).toContainText('Function node version 1');
await expect(n8n.ndv.getNodeVersion()).toContainText('Deprecated');
await n8n.ndv.close();
});
test('should not push NDV header out with a lot of code in Code node editor', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Code', { action: 'Code in JavaScript', closeNDV: false });
const codeEditor = n8n.ndv.getParameterInput('jsCode').locator('.cm-content');
await codeEditor.click();
await n8n.page.keyboard.press('ControlOrMeta+a');
await n8n.page.keyboard.press('Delete');
const dummyCode = Array(50)
.fill(
'console.log("This is a very long line of dummy JavaScript code that should not push the NDV header out of view");',
)
.join('\n');
await codeEditor.fill(dummyCode);
await expect(n8n.ndv.getExecuteNodeButton()).toBeVisible();
});
test('should allow editing code in fullscreen in the code editors', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Code', { action: 'Code in JavaScript', closeNDV: false });
await n8n.ndv.openCodeEditorFullscreen();
const fullscreenEditor = n8n.ndv.getCodeEditorFullscreen();
await fullscreenEditor.click();
await n8n.page.keyboard.press('ControlOrMeta+a');
await fullscreenEditor.fill('foo()');
await expect(fullscreenEditor).toContainText('foo()');
await n8n.ndv.closeCodeEditorDialog();
await expect(n8n.ndv.getParameterInput('jsCode').locator('.cm-content')).toContainText('foo()');
});
test('should keep search expanded after Execute step node run', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_ndv_search.json');
await n8n.canvas.clickZoomToFitButton();
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
);
await n8n.canvas.openNode('Edit Fields');
await expect(n8n.ndv.outputPanel.get()).toBeVisible();
await n8n.ndv.searchOutputData('US');
await expect(n8n.ndv.outputPanel.getTableRow(1).locator('mark')).toContainText('US');
await n8n.ndv.execute();
await expect(n8n.ndv.outputPanel.getSearchInput()).toBeVisible();
await expect(n8n.ndv.outputPanel.getSearchInput()).toHaveValue('US');
});
test('Should render xml and html tags as strings and can search', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_workflow_xml_output.json');
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
);
await n8n.canvas.openNode('Edit Fields');
await expect(n8n.ndv.outputPanel.get().locator('[class*="active"]')).toContainText('Table');
await expect(n8n.ndv.outputPanel.getTableRow(1)).toContainText(
'<?xml version="1.0" encoding="UTF-8"?> <library>',
);
await n8n.page.keyboard.press('/');
const searchInput = n8n.ndv.outputPanel.getSearchInput();
await expect(searchInput).toBeFocused();
await searchInput.fill('<lib');
await expect(n8n.ndv.outputPanel.getTableRow(1).locator('mark')).toContainText('<lib');
await n8n.ndv.outputPanel.switchDisplayMode('json');
await expect(n8n.ndv.outputPanel.getDataContainer().locator('.json-data')).toBeVisible();
});
test.describe('Run Data & Selectors - Advanced', () => {
test('can link and unlink run selectors between input and output', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Test_workflow_5.json');
await n8n.canvas.clickZoomToFitButton();
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
);
await n8n.canvas.openNode('Set3');
await n8n.ndv.inputPanel.switchDisplayMode('table');
await n8n.ndv.outputPanel.switchDisplayMode('table');
await n8n.ndv.ensureOutputRunLinking(true);
await n8n.ndv.inputPanel.getTbodyCell(0, 0).click();
expect(await n8n.ndv.getInputRunSelectorValue()).toContain('2 of 2 (6 items)');
expect(await n8n.ndv.getOutputRunSelectorValue()).toContain('2 of 2 (6 items)');
await n8n.ndv.changeOutputRunSelector('1 of 2 (6 items)');
expect(await n8n.ndv.getInputRunSelectorValue()).toContain('1 of 2 (6 items)');
await expect(n8n.ndv.inputPanel.getTbodyCell(0, 0)).toHaveText('1111');
await expect(n8n.ndv.outputPanel.getTbodyCell(0, 0)).toHaveText('1111');
await n8n.ndv.inputPanel.getTbodyCell(0, 0).click();
await n8n.ndv.changeInputRunSelector('2 of 2 (6 items)');
expect(await n8n.ndv.getOutputRunSelectorValue()).toContain('2 of 2 (6 items)');
await n8n.ndv.outputPanel.getLinkRun().click();
await n8n.ndv.inputPanel.getTbodyCell(0, 0).click();
await n8n.ndv.changeOutputRunSelector('1 of 2 (6 items)');
expect(await n8n.ndv.getInputRunSelectorValue()).toContain('2 of 2 (6 items)');
await n8n.ndv.outputPanel.getLinkRun().click();
await n8n.ndv.inputPanel.getTbodyCell(0, 0).click();
expect(await n8n.ndv.getInputRunSelectorValue()).toContain('1 of 2 (6 items)');
await n8n.ndv.inputPanel.toggleInputRunLinking();
await n8n.ndv.inputPanel.getTbodyCell(0, 0).click();
await n8n.ndv.changeInputRunSelector('2 of 2 (6 items)');
expect(await n8n.ndv.getOutputRunSelectorValue()).toContain('1 of 2 (6 items)');
await n8n.ndv.inputPanel.toggleInputRunLinking();
await n8n.ndv.inputPanel.getTbodyCell(0, 0).click();
expect(await n8n.ndv.getOutputRunSelectorValue()).toContain('2 of 2 (6 items)');
});
});
test.describe('Remote Options & Network', () => {
test('should not retrieve remote options when a parameter value changes', async ({ n8n }) => {
let fetchParameterOptionsCallCount = 0;
await n8n.page.route('**/rest/dynamic-node-parameters/options', async (route) => {
fetchParameterOptionsCallCount++;
await route.fulfill({
status: 200,
contentType: 'application/json',
body: JSON.stringify({ data: [] }),
});
});
await n8n.canvas.addNode('E2E Test', { action: 'Remote Options' });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.ndv.fillFirstAvailableTextParameterMultipleTimes(['test1', 'test2', 'test3']);
expect(fetchParameterOptionsCallCount).toBe(1);
});
test('Should show a notice when remote options cannot be fetched because of missing credentials', async ({
n8n,
}) => {
await n8n.page.route('**/rest/dynamic-node-parameters/options', async (route) => {
await route.fulfill({ status: 403 });
});
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Notion', { action: 'Update a database page', closeNDV: false });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.ndv.addItemToFixedCollection('propertiesUi');
await expect(
n8n.ndv.getParameterInputWithIssues('propertiesUi.propertyValues[0].key'),
).toBeVisible();
});
test('Should show error state when remote options cannot be fetched', async ({ n8n }) => {
await n8n.page.route('**/rest/dynamic-node-parameters/options', async (route) => {
await route.fulfill({ status: 500 });
});
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Notion', { action: 'Update a database page', closeNDV: false });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.credentialsComposer.createFromNdv({
apiKey: 'sk_test_123',
});
await n8n.ndv.addItemToFixedCollection('propertiesUi');
await expect(
n8n.ndv.getParameterInputWithIssues('propertiesUi.propertyValues[0].key'),
).toBeVisible();
});
});
test.describe('Floating Nodes Navigation', () => {
test('should traverse floating nodes with mouse', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Floating_Nodes.json');
await n8n.canvas.getCanvasNodes().first().dblclick();
await expect(n8n.ndv.getContainer()).toBeVisible();
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeHidden();
await expect(n8n.ndv.getFloatingNodeByPosition('outputMain')).toBeVisible();
for (let i = 0; i < 4; i++) {
await n8n.ndv.clickFloatingNodeByPosition('outputMain');
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeVisible();
await expect(n8n.ndv.getFloatingNodeByPosition('outputMain')).toBeVisible();
await n8n.ndv.close();
await expect(n8n.canvas.getSelectedNodes()).toHaveCount(1);
await n8n.canvas.getSelectedNodes().first().dblclick();
await expect(n8n.ndv.getContainer()).toBeVisible();
}
await n8n.ndv.clickFloatingNodeByPosition('outputMain');
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeVisible();
for (let i = 0; i < 4; i++) {
await n8n.ndv.clickFloatingNodeByPosition('inputMain');
await expect(n8n.ndv.getFloatingNodeByPosition('outputMain')).toBeVisible();
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeVisible();
}
await n8n.ndv.clickFloatingNodeByPosition('inputMain');
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeHidden();
await expect(n8n.ndv.getFloatingNodeByPosition('inputSub')).toBeHidden();
await expect(n8n.ndv.getFloatingNodeByPosition('outputSub')).toBeHidden();
await n8n.ndv.close();
await expect(n8n.canvas.getSelectedNodes()).toHaveCount(1);
});
test('should traverse floating nodes with keyboard', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Floating_Nodes.json');
await n8n.canvas.getCanvasNodes().first().dblclick();
await expect(n8n.ndv.getContainer()).toBeVisible();
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeHidden();
await expect(n8n.ndv.getFloatingNodeByPosition('outputMain')).toBeVisible();
for (let i = 0; i < 4; i++) {
await n8n.ndv.navigateToNextFloatingNodeWithKeyboard();
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeVisible();
await expect(n8n.ndv.getFloatingNodeByPosition('outputMain')).toBeVisible();
await n8n.ndv.close();
await expect(n8n.canvas.getSelectedNodes()).toHaveCount(1);
await n8n.canvas.getSelectedNodes().first().dblclick();
await expect(n8n.ndv.getContainer()).toBeVisible();
}
await n8n.ndv.navigateToNextFloatingNodeWithKeyboard();
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeVisible();
for (let i = 0; i < 4; i++) {
await n8n.ndv.navigateToPreviousFloatingNodeWithKeyboard();
await expect(n8n.ndv.getFloatingNodeByPosition('outputMain')).toBeVisible();
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeVisible();
}
await n8n.ndv.navigateToPreviousFloatingNodeWithKeyboard();
await expect(n8n.ndv.getFloatingNodeByPosition('inputMain')).toBeHidden();
await expect(n8n.ndv.getFloatingNodeByPosition('inputSub')).toBeHidden();
await expect(n8n.ndv.getFloatingNodeByPosition('outputSub')).toBeHidden();
await n8n.ndv.close();
await expect(n8n.canvas.getSelectedNodes()).toHaveCount(1);
});
test('should connect floating sub-nodes', async ({ n8n }) => {
await n8n.canvas.addNode('AI Agent', { closeNDV: false });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.ndv.connectAISubNode('ai_languageModel', 'Anthropic Chat Model');
await n8n.ndv.connectAISubNode('ai_memory', 'Simple Memory');
await n8n.ndv.connectAISubNode('ai_tool', 'HTTP Request Tool');
expect(await n8n.ndv.getNodesWithIssuesCount()).toBeGreaterThanOrEqual(2);
});
test('should have the floating nodes in correct order', async ({ n8n }) => {
await n8n.start.fromImportedWorkflow('Floating_Nodes.json');
await n8n.canvas.openNode('Merge');
await expect(n8n.ndv.getContainer()).toBeVisible();
expect(await n8n.ndv.getFloatingNodeCount('inputMain')).toBe(2);
await n8n.ndv.verifyFloatingNodeName('inputMain', 'Edit Fields1', 0);
await n8n.ndv.verifyFloatingNodeName('inputMain', 'Edit Fields0', 1);
await n8n.ndv.close();
await n8n.canvas.openNode('Merge1');
await expect(n8n.ndv.getContainer()).toBeVisible();
expect(await n8n.ndv.getFloatingNodeCount('inputMain')).toBe(2);
await n8n.ndv.verifyFloatingNodeName('inputMain', 'Edit Fields0', 0);
await n8n.ndv.verifyFloatingNodeName('inputMain', 'Edit Fields1', 1);
});
});
test.describe('Parameter Management - Advanced', () => {
test('Should clear mismatched collection parameters', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Notion', { action: 'Create a database page', closeNDV: false });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.ndv.addItemToFixedCollection('propertiesUi');
await n8n.ndv.changeNodeOperation('Update');
await expect(n8n.ndv.getParameterItemWithText('Currently no items exist')).toBeVisible();
});
test('Should keep RLC values after operation change', async ({ n8n }) => {
const TEST_DOC_ID = '1111';
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Google Sheets', { closeNDV: false, action: 'Append row in sheet' });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.ndv.setRLCValue('documentId', TEST_DOC_ID);
await n8n.ndv.changeNodeOperation('Append or Update Row');
const input = n8n.ndv.getResourceLocatorInput('documentId').locator('input');
await expect(input).toHaveValue(TEST_DOC_ID);
});
test('Should not clear resource/operation after credential change', async ({ n8n }) => {
await n8n.canvas.addNode('Manual Trigger');
await n8n.canvas.addNode('Discord', { closeNDV: false, action: 'Delete a message' });
await expect(n8n.ndv.getContainer()).toBeVisible();
await n8n.credentialsComposer.createFromNdv({
botToken: 'sk_test_123',
});
const resourceInput = n8n.ndv.getParameterInputField('resource');
const operationInput = n8n.ndv.getParameterInputField('operation');
await expect(resourceInput).toHaveValue('Message');
await expect(operationInput).toHaveValue('Delete');
});
});
test.describe('Node Creator Integration', () => {
test('Should open appropriate node creator after clicking on connection hint link', async ({
n8n,
}) => {
const hintMapper = {
Memory: 'AI Nodes',
'Output Parser': 'AI Nodes',
'Token Splitter': 'Document Loaders',
Tool: 'AI Nodes',
Embeddings: 'Vector Stores',
'Vector Store': 'Retrievers',
};
await n8n.canvas.importWorkflow(
'open_node_creator_for_connection.json',
'open_node_creator_for_connection',
);
for (const [node, group] of Object.entries(hintMapper)) {
await n8n.canvas.openNode(node);
await n8n.ndv.clickNodeCreatorInsertOneButton();
await expect(n8n.canvas.getNodeCreatorHeader(group)).toBeVisible();
await n8n.page.keyboard.press('Escape');
}
});
});
test.describe('Expression Editor Features', () => {
test('should allow selecting item for expressions', async ({ n8n }) => {
await n8n.canvas.importWorkflow('Test_workflow_3.json', 'My test workflow 2');
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
);
await n8n.canvas.openNode('Set');
await n8n.ndv.getAssignmentValue('assignments').getByText('Expression').click();
const expressionInput = n8n.ndv.getInlineExpressionEditorInput();
await expressionInput.click();
await n8n.ndv.clearExpressionEditor();
await n8n.ndv.typeInExpressionEditor('{{ $json.input[0].count');
await expect(n8n.ndv.getInlineExpressionEditorOutput()).toHaveText('0');
await n8n.ndv.expressionSelectNextItem();
await expect(n8n.ndv.getInlineExpressionEditorOutput()).toHaveText('1');
await expect(n8n.ndv.getInlineExpressionEditorItemInput()).toHaveValue('1');
await expect(n8n.ndv.getInlineExpressionEditorItemNextButton()).toBeDisabled();
await n8n.ndv.expressionSelectPrevItem();
await expect(n8n.ndv.getInlineExpressionEditorOutput()).toHaveText('0');
await expect(n8n.ndv.getInlineExpressionEditorItemInput()).toHaveValue('0');
});
});
test.describe('Schema & Data Views', () => {
test('should show data from the correct output in schema view', async ({ n8n }) => {
await n8n.canvas.importWorkflow('Test_workflow_multiple_outputs.json', 'Multiple outputs');
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
);
await n8n.canvas.openNode('Only Item 1');
await expect(n8n.ndv.inputPanel.get()).toBeVisible();
await n8n.ndv.inputPanel.switchDisplayMode('schema');
await expect(n8n.ndv.inputPanel.getSchemaItem('onlyOnItem1')).toBeVisible();
await n8n.ndv.close();
await n8n.canvas.openNode('Only Item 2');
await expect(n8n.ndv.inputPanel.get()).toBeVisible();
await n8n.ndv.inputPanel.switchDisplayMode('schema');
await expect(n8n.ndv.inputPanel.getSchemaItem('onlyOnItem2')).toBeVisible();
await n8n.ndv.close();
await n8n.canvas.openNode('Only Item 3');
await expect(n8n.ndv.inputPanel.get()).toBeVisible();
await n8n.ndv.inputPanel.switchDisplayMode('schema');
await expect(n8n.ndv.inputPanel.getSchemaItem('onlyOnItem3')).toBeVisible();
await n8n.ndv.close();
});
});
test.describe('Search Functionality - Advanced', () => {
test('should not show items count when searching in schema view', async ({ n8n }) => {
await n8n.canvas.importWorkflow('Test_ndv_search.json', 'NDV Search Test');
await n8n.canvas.openNode('Edit Fields');
await expect(n8n.ndv.outputPanel.get()).toBeVisible();
await n8n.ndv.execute();
await n8n.ndv.outputPanel.switchDisplayMode('schema');
await n8n.ndv.searchOutputData('US');
await expect(n8n.ndv.outputPanel.getItemsCount()).toBeHidden();
});
test('should show additional tooltip when searching in schema view if no matches', async ({
n8n,
}) => {
await n8n.canvas.importWorkflow('Test_ndv_search.json', 'NDV Search Test');
await n8n.canvas.openNode('Edit Fields');
await expect(n8n.ndv.outputPanel.get()).toBeVisible();
await n8n.ndv.execute();
await n8n.ndv.outputPanel.switchDisplayMode('schema');
await n8n.ndv.searchOutputData('foo');
await expect(
n8n.ndv.outputPanel
.get()
.getByText('To search field values, switch to table or JSON view.'),
).toBeVisible();
});
});
test.describe('Complex Edge Cases', () => {
test('ADO-2931 - should handle multiple branches of the same input with the first branch empty correctly', async ({
n8n,
}) => {
await n8n.canvas.importWorkflow(
'Test_ndv_two_branches_of_same_parent_false_populated.json',
'Multiple Branches Test',
);
await n8n.canvas.openNode('DebugHelper');
await expect(n8n.ndv.inputPanel.get()).toBeVisible();
await expect(n8n.ndv.outputPanel.get()).toBeVisible();
await n8n.ndv.execute();
await expect(n8n.ndv.inputPanel.getSchemaItem('a1')).toBeVisible();
});
});
test.describe('Execution Indicators - Multi-Node', () => {
test('should properly show node execution indicator for multiple nodes', async ({ n8n }) => {
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript' });
await n8n.ndv.clickBackToCanvasButton();
await n8n.workflowComposer.executeWorkflowAndWaitForNotification(
'Workflow executed successfully',
);
await n8n.canvas.openNode(MANUAL_TRIGGER_NODE_DISPLAY_NAME);
await expect(n8n.ndv.getNodeRunSuccessIndicator()).toBeVisible();
await expect(n8n.ndv.getNodeRunTooltipIndicator()).toBeVisible();
await n8n.ndv.clickBackToCanvasButton();
await n8n.canvas.openNode(CODE_NODE_DISPLAY_NAME);
await expect(n8n.ndv.getNodeRunSuccessIndicator()).toBeVisible();
});
});
});

View File

@@ -1,4 +1,4 @@
import { test, expect } from '../../fixtures/base';
import { test, expect } from '../../../fixtures/base';
test.describe('Folders - Advanced Operations', () => {
test.describe('Duplicate workflows', () => {

View File

@@ -1,4 +1,4 @@
import { test, expect } from '../../fixtures/base';
import { test, expect } from '../../../fixtures/base';
test.describe('Folders - Basic Operations', () => {
const FOLDER_CREATED_NOTIFICATION = 'Folder created';

View File

@@ -1,4 +1,4 @@
import { test, expect } from '../../fixtures/base';
import { test, expect } from '../../../fixtures/base';
test.describe('Folders - Operations', () => {
test.describe('Rename and delete folders', () => {

View File

@@ -1,6 +1,6 @@
import { nanoid } from 'nanoid';
import { test, expect } from '../../fixtures/base';
import { test, expect } from '../../../fixtures/base';
test.describe('Project Settings - Member Management', () => {
test.beforeEach(async ({ n8n }) => {

View File

@@ -4,8 +4,8 @@ import {
INSTANCE_ADMIN_CREDENTIALS,
INSTANCE_MEMBER_CREDENTIALS,
INSTANCE_OWNER_CREDENTIALS,
} from '../../config/test-users';
import { test, expect } from '../../fixtures/base';
} from '../../../config/test-users';
import { test, expect } from '../../../fixtures/base';
const MANUAL_TRIGGER_NODE_NAME = 'Manual Trigger';
const EXECUTE_WORKFLOW_NODE_NAME = 'Execute Sub-workflow';

View File

@@ -0,0 +1,24 @@
import { test, expect } from '../../../fixtures/base';
/**
* Regression test for GHC-5776
* https://linear.app/n8n/issue/GHC-5776/community-issue-plan-lacks-license-for-this-feature
*
* The AI builder was calling /rest/ai/sessions/metadata endpoint even when the
* AI builder feature was not licensed, causing 403 errors in the console.
* This test verifies that no such errors occur when navigating to the canvas.
*/
test.describe('GHC-5776: AI sessions metadata should not cause console errors', () => {
test('should not log license errors when navigating to canvas without AI builder license', async ({
n8n,
}) => {
await n8n.start.fromBlankCanvas();
// This is just to trigger the watch on the workflow id, which will trigger the fetch of the sessions metadata
await n8n.canvas.getOpenPublishModalButton().click();
const consoleMessages = await n8n.page.consoleMessages();
const errorMessages = consoleMessages.filter((msg) => msg.type() === 'error');
expect(errorMessages).toHaveLength(0);
});
});

View File

@@ -4,8 +4,8 @@ import {
CODE_NODE_NAME,
HTTP_REQUEST_NODE_NAME,
CODE_NODE_DISPLAY_NAME,
} from '../../config/constants';
import { test, expect } from '../../fixtures/base';
} from '../../../../../config/constants';
import { test, expect } from '../../../../../fixtures/base';
test.describe('Canvas Actions', () => {
test.beforeEach(async ({ n8n }) => {

View File

@@ -0,0 +1,227 @@
import {
MANUAL_TRIGGER_NODE_NAME,
MANUAL_TRIGGER_NODE_DISPLAY_NAME,
SWITCH_NODE_NAME,
EDIT_FIELDS_SET_NODE_NAME,
MERGE_NODE_NAME,
CODE_NODE_NAME,
SCHEDULE_TRIGGER_NODE_NAME,
CODE_NODE_DISPLAY_NAME,
} from '../../../../../config/constants';
import { test, expect } from '../../../../../fixtures/base';
test.describe('Canvas Node Manipulation and Navigation', () => {
test.beforeEach(async ({ n8n }) => {
await n8n.start.fromBlankCanvas();
});
test('should add switch node and test connections', async ({ n8n }) => {
const desiredOutputs = 4;
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.addNode(SWITCH_NODE_NAME);
for (let i = 0; i < desiredOutputs; i++) {
await n8n.page.getByText('Add Routing Rule').click();
}
await n8n.ndv.close();
for (let i = 0; i < desiredOutputs; i++) {
await n8n.canvas.clickNodePlusEndpoint(SWITCH_NODE_NAME);
await expect(n8n.canvas.nodeCreatorSearchBar()).toBeVisible();
await n8n.canvas.fillNodeCreatorSearchBar(EDIT_FIELDS_SET_NODE_NAME);
await n8n.canvas.clickNodeCreatorItemName(EDIT_FIELDS_SET_NODE_NAME);
await n8n.page.keyboard.press('Escape');
await n8n.canvas.clickZoomToFitButton();
}
await n8n.canvas.canvasPane().click({ position: { x: 10, y: 10 } });
await n8n.canvas.clickNodePlusEndpoint('Edit Fields3');
await n8n.canvas.fillNodeCreatorSearchBar(SWITCH_NODE_NAME);
await n8n.canvas.clickNodeCreatorItemName(SWITCH_NODE_NAME);
await n8n.page.keyboard.press('Escape');
await n8n.canvasComposer.saveWorkflowAndWaitForUrl();
await expect(n8n.canvas.getWorkflowSaveButton()).toContainText('Saved');
await n8n.canvasComposer.reloadAndWaitForCanvas();
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields3', `${SWITCH_NODE_NAME}1`),
).toBeAttached();
const editFieldsNodes = ['Edit Fields', 'Edit Fields1', 'Edit Fields2', 'Edit Fields3'];
for (const nodeName of editFieldsNodes) {
await expect(n8n.canvas.connectionBetweenNodes(SWITCH_NODE_NAME, nodeName)).toBeAttached();
}
});
test('should add merge node and test connections', async ({ n8n }) => {
const editFieldsNodeCount = 2;
const checkConnections = async () => {
await expect(
n8n.canvas.connectionBetweenNodes(MANUAL_TRIGGER_NODE_DISPLAY_NAME, 'Edit Fields1').first(),
).toBeAttached();
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields', MERGE_NODE_NAME).first(),
).toBeAttached();
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields1', MERGE_NODE_NAME).first(),
).toBeAttached();
};
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.nodeByName(MANUAL_TRIGGER_NODE_DISPLAY_NAME).click();
for (let i = 0; i < editFieldsNodeCount; i++) {
await n8n.canvas.addNode(EDIT_FIELDS_SET_NODE_NAME, { closeNDV: true });
await n8n.canvas.canvasPane().click({
position: { x: (i + 1) * 200, y: (i + 1) * 200 },
// eslint-disable-next-line playwright/no-force-option
force: true,
});
}
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.addNode(MERGE_NODE_NAME, { closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.connectNodesByDrag(MANUAL_TRIGGER_NODE_DISPLAY_NAME, 'Edit Fields1', 0, 0);
await expect(
n8n.canvas.connectionBetweenNodes(MANUAL_TRIGGER_NODE_DISPLAY_NAME, 'Edit Fields1').first(),
).toBeAttached();
await n8n.canvas.connectNodesByDrag('Edit Fields', MERGE_NODE_NAME, 0, 0);
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields', MERGE_NODE_NAME).first(),
).toBeAttached();
await n8n.canvas.connectNodesByDrag('Edit Fields1', MERGE_NODE_NAME, 0, 1);
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields1', MERGE_NODE_NAME).first(),
).toBeAttached();
await n8n.canvasComposer.saveWorkflowAndWaitForUrl();
await expect(n8n.canvas.getWorkflowSaveButton()).toContainText('Saved');
await n8n.canvasComposer.reloadAndWaitForCanvas();
await checkConnections();
await n8n.canvas.clickExecuteWorkflowButton();
await expect(n8n.canvas.stopExecutionButton()).toBeHidden();
await n8n.canvasComposer.reloadAndWaitForCanvas();
await checkConnections();
await n8n.canvas.clickExecuteWorkflowButton();
await expect(n8n.canvas.stopExecutionButton()).toBeHidden();
await expect(
n8n.canvas.getConnectionLabelBetweenNodes('Edit Fields1', MERGE_NODE_NAME).first(),
).toContainText('2 items');
});
test('should add nodes and check execution success', async ({ n8n }) => {
const nodeCount = 3;
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.nodeByName(MANUAL_TRIGGER_NODE_DISPLAY_NAME).click();
for (let i = 0; i < nodeCount; i++) {
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
}
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.clickExecuteWorkflowButton();
await expect(n8n.canvas.stopExecutionButton()).toBeHidden();
await expect(n8n.canvas.getSuccessEdges()).toHaveCount(nodeCount);
await expect(n8n.canvas.getAllNodeSuccessIndicators()).toHaveCount(nodeCount + 1);
await expect(n8n.canvas.getCanvasHandlePlusWrapper()).toHaveAttribute(
'data-plus-type',
'success',
);
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
await expect(n8n.canvas.getCanvasHandlePlus()).not.toHaveAttribute('data-plus-type', 'success');
await expect(n8n.canvas.getSuccessEdges()).toHaveCount(nodeCount + 1);
await expect(n8n.canvas.getAllNodeSuccessIndicators()).toHaveCount(nodeCount + 1);
});
test('should delete node using context menu', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.deleteNodeFromContextMenu(CODE_NODE_DISPLAY_NAME);
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(1);
await expect(n8n.canvas.nodeConnections()).toHaveCount(0);
});
test('should delete node using keyboard shortcut', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.nodeByName(CODE_NODE_DISPLAY_NAME).click();
await n8n.page.keyboard.press('Backspace');
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(1);
await expect(n8n.canvas.nodeConnections()).toHaveCount(0);
});
test('should delete node between two connected nodes', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.addNode(EDIT_FIELDS_SET_NODE_NAME, { closeNDV: true });
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(3);
await expect(n8n.canvas.nodeConnections()).toHaveCount(2);
await n8n.canvas.nodeByName(CODE_NODE_DISPLAY_NAME).click();
await n8n.canvas.clickZoomToFitButton();
await n8n.page.keyboard.press('Backspace');
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(2);
await expect(n8n.canvas.nodeConnections()).toHaveCount(1);
});
test('should delete multiple nodes (context menu or shortcut)', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.hitDeleteAllNodes();
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(0);
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.rightClickCanvas();
await n8n.canvas.getContextMenuItem('select_all').click();
await n8n.canvas.rightClickCanvas();
await n8n.canvas.getContextMenuItem('delete').click();
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(0);
});
test('should move node', async ({ n8n }) => {
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.nodeByName(MANUAL_TRIGGER_NODE_DISPLAY_NAME).click();
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
const pos1 = await n8n.canvas.getNodePosition(CODE_NODE_DISPLAY_NAME);
await n8n.canvas.dragNodeToRelativePosition(CODE_NODE_DISPLAY_NAME, 50, 150);
const pos2 = await n8n.canvas.getNodePosition(CODE_NODE_DISPLAY_NAME);
expect(pos2.x).toBeGreaterThan(pos1.x);
expect(pos2.y).toBeGreaterThan(pos1.y);
});
});

View File

@@ -3,16 +3,12 @@ import fs from 'fs';
import {
MANUAL_TRIGGER_NODE_NAME,
MANUAL_TRIGGER_NODE_DISPLAY_NAME,
SWITCH_NODE_NAME,
EDIT_FIELDS_SET_NODE_NAME,
MERGE_NODE_NAME,
CODE_NODE_NAME,
SCHEDULE_TRIGGER_NODE_NAME,
CODE_NODE_DISPLAY_NAME,
} from '../../config/constants';
import { test, expect } from '../../fixtures/base';
import type { n8nPage } from '../../pages/n8nPage';
import { resolveFromRoot } from '../../utils/path-helper';
} from '../../../../../config/constants';
import { test, expect } from '../../../../../fixtures/base';
import type { n8nPage } from '../../../../../pages/n8nPage';
import { resolveFromRoot } from '../../../../../utils/path-helper';
const DEFAULT_ZOOM_FACTOR = 1;
const ZOOM_IN_X1_FACTOR = 1.25; // Expected zoom after 1 zoom-in click (125%)
@@ -21,222 +17,6 @@ const ZOOM_OUT_X1_FACTOR = 0.8; // Expected zoom after 1 zoom-out click (80%)
const ZOOM_OUT_X2_FACTOR = 0.64; // Expected zoom after 2 zoom-out clicks (64%)
const ZOOM_TOLERANCE = 0.2; // Acceptable variance for floating-point zoom comparisons
test.describe('Canvas Node Manipulation and Navigation', () => {
test.beforeEach(async ({ n8n }) => {
await n8n.start.fromBlankCanvas();
});
test('should add switch node and test connections', async ({ n8n }) => {
const desiredOutputs = 4;
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.addNode(SWITCH_NODE_NAME);
for (let i = 0; i < desiredOutputs; i++) {
await n8n.page.getByText('Add Routing Rule').click();
}
await n8n.ndv.close();
for (let i = 0; i < desiredOutputs; i++) {
await n8n.canvas.clickNodePlusEndpoint(SWITCH_NODE_NAME);
await expect(n8n.canvas.nodeCreatorSearchBar()).toBeVisible();
await n8n.canvas.fillNodeCreatorSearchBar(EDIT_FIELDS_SET_NODE_NAME);
await n8n.canvas.clickNodeCreatorItemName(EDIT_FIELDS_SET_NODE_NAME);
await n8n.page.keyboard.press('Escape');
await n8n.canvas.clickZoomToFitButton();
}
await n8n.canvas.canvasPane().click({ position: { x: 10, y: 10 } });
await n8n.canvas.clickNodePlusEndpoint('Edit Fields3');
await n8n.canvas.fillNodeCreatorSearchBar(SWITCH_NODE_NAME);
await n8n.canvas.clickNodeCreatorItemName(SWITCH_NODE_NAME);
await n8n.page.keyboard.press('Escape');
await n8n.canvasComposer.saveWorkflowAndWaitForUrl();
await expect(n8n.canvas.getWorkflowSaveButton()).toContainText('Saved');
await n8n.canvasComposer.reloadAndWaitForCanvas();
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields3', `${SWITCH_NODE_NAME}1`),
).toBeAttached();
const editFieldsNodes = ['Edit Fields', 'Edit Fields1', 'Edit Fields2', 'Edit Fields3'];
for (const nodeName of editFieldsNodes) {
await expect(n8n.canvas.connectionBetweenNodes(SWITCH_NODE_NAME, nodeName)).toBeAttached();
}
});
test('should add merge node and test connections', async ({ n8n }) => {
const editFieldsNodeCount = 2;
const checkConnections = async () => {
await expect(
n8n.canvas.connectionBetweenNodes(MANUAL_TRIGGER_NODE_DISPLAY_NAME, 'Edit Fields1').first(),
).toBeAttached();
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields', MERGE_NODE_NAME).first(),
).toBeAttached();
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields1', MERGE_NODE_NAME).first(),
).toBeAttached();
};
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.nodeByName(MANUAL_TRIGGER_NODE_DISPLAY_NAME).click();
for (let i = 0; i < editFieldsNodeCount; i++) {
await n8n.canvas.addNode(EDIT_FIELDS_SET_NODE_NAME, { closeNDV: true });
await n8n.canvas.canvasPane().click({
position: { x: (i + 1) * 200, y: (i + 1) * 200 },
// eslint-disable-next-line playwright/no-force-option
force: true,
});
}
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.addNode(MERGE_NODE_NAME, { closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.connectNodesByDrag(MANUAL_TRIGGER_NODE_DISPLAY_NAME, 'Edit Fields1', 0, 0);
await expect(
n8n.canvas.connectionBetweenNodes(MANUAL_TRIGGER_NODE_DISPLAY_NAME, 'Edit Fields1').first(),
).toBeAttached();
await n8n.canvas.connectNodesByDrag('Edit Fields', MERGE_NODE_NAME, 0, 0);
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields', MERGE_NODE_NAME).first(),
).toBeAttached();
await n8n.canvas.connectNodesByDrag('Edit Fields1', MERGE_NODE_NAME, 0, 1);
await expect(
n8n.canvas.connectionBetweenNodes('Edit Fields1', MERGE_NODE_NAME).first(),
).toBeAttached();
await n8n.canvasComposer.saveWorkflowAndWaitForUrl();
await expect(n8n.canvas.getWorkflowSaveButton()).toContainText('Saved');
await n8n.canvasComposer.reloadAndWaitForCanvas();
await checkConnections();
await n8n.canvas.clickExecuteWorkflowButton();
await expect(n8n.canvas.stopExecutionButton()).toBeHidden();
await n8n.canvasComposer.reloadAndWaitForCanvas();
await checkConnections();
await n8n.canvas.clickExecuteWorkflowButton();
await expect(n8n.canvas.stopExecutionButton()).toBeHidden();
await expect(
n8n.canvas.getConnectionLabelBetweenNodes('Edit Fields1', MERGE_NODE_NAME).first(),
).toContainText('2 items');
});
test('should add nodes and check execution success', async ({ n8n }) => {
const nodeCount = 3;
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.nodeByName(MANUAL_TRIGGER_NODE_DISPLAY_NAME).click();
for (let i = 0; i < nodeCount; i++) {
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
}
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.clickExecuteWorkflowButton();
await expect(n8n.canvas.stopExecutionButton()).toBeHidden();
await expect(n8n.canvas.getSuccessEdges()).toHaveCount(nodeCount);
await expect(n8n.canvas.getAllNodeSuccessIndicators()).toHaveCount(nodeCount + 1);
await expect(n8n.canvas.getCanvasHandlePlusWrapper()).toHaveAttribute(
'data-plus-type',
'success',
);
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
await expect(n8n.canvas.getCanvasHandlePlus()).not.toHaveAttribute('data-plus-type', 'success');
await expect(n8n.canvas.getSuccessEdges()).toHaveCount(nodeCount + 1);
await expect(n8n.canvas.getAllNodeSuccessIndicators()).toHaveCount(nodeCount + 1);
});
test('should delete node using context menu', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
await n8n.canvas.deleteNodeFromContextMenu(CODE_NODE_DISPLAY_NAME);
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(1);
await expect(n8n.canvas.nodeConnections()).toHaveCount(0);
});
test('should delete node using keyboard shortcut', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.nodeByName(CODE_NODE_DISPLAY_NAME).click();
await n8n.page.keyboard.press('Backspace');
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(1);
await expect(n8n.canvas.nodeConnections()).toHaveCount(0);
});
test('should delete node between two connected nodes', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.addNode(EDIT_FIELDS_SET_NODE_NAME, { closeNDV: true });
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(3);
await expect(n8n.canvas.nodeConnections()).toHaveCount(2);
await n8n.canvas.nodeByName(CODE_NODE_DISPLAY_NAME).click();
await n8n.canvas.clickZoomToFitButton();
await n8n.page.keyboard.press('Backspace');
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(2);
await expect(n8n.canvas.nodeConnections()).toHaveCount(1);
});
test('should delete multiple nodes (context menu or shortcut)', async ({ n8n }) => {
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.hitDeleteAllNodes();
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(0);
await n8n.canvas.addNode(SCHEDULE_TRIGGER_NODE_NAME, { closeNDV: true });
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.rightClickCanvas();
await n8n.canvas.getContextMenuItem('select_all').click();
await n8n.canvas.rightClickCanvas();
await n8n.canvas.getContextMenuItem('delete').click();
await expect(n8n.canvas.getCanvasNodes()).toHaveCount(0);
});
test('should move node', async ({ n8n }) => {
await n8n.canvas.addNode(MANUAL_TRIGGER_NODE_NAME);
await n8n.canvas.nodeByName(MANUAL_TRIGGER_NODE_DISPLAY_NAME).click();
await n8n.canvas.addNode(CODE_NODE_NAME, { action: 'Code in JavaScript', closeNDV: true });
await n8n.canvas.clickZoomToFitButton();
const pos1 = await n8n.canvas.getNodePosition(CODE_NODE_DISPLAY_NAME);
await n8n.canvas.dragNodeToRelativePosition(CODE_NODE_DISPLAY_NAME, 50, 150);
const pos2 = await n8n.canvas.getNodePosition(CODE_NODE_DISPLAY_NAME);
expect(pos2.x).toBeGreaterThan(pos1.x);
expect(pos2.y).toBeGreaterThan(pos1.y);
});
});
test.describe('Canvas Zoom Functionality', () => {
test.beforeEach(async ({ n8n }) => {
await n8n.start.fromBlankCanvas();

View File

@@ -1,5 +1,5 @@
import { test, expect } from '../../fixtures/base';
import type { TestRequirements } from '../../Types';
import { test, expect } from '../../../../../fixtures/base';
import type { TestRequirements } from '../../../../../Types';
test.describe('Focus panel', () => {
test.describe('With experimental NDV in focus panel enabled', () => {

View File

@@ -1,4 +1,4 @@
import { test, expect } from '../../fixtures/base';
import { test, expect } from '../../../../../fixtures/base';
test.describe('Canvas Actions', () => {
test('adds sticky to canvas with default text and position', async ({ n8n }) => {

View File

@@ -7,9 +7,9 @@ import {
EDIT_FIELDS_SET_NODE_NAME,
MANUAL_TRIGGER_NODE_NAME,
MANUAL_TRIGGER_NODE_DISPLAY_NAME,
} from '../../config/constants';
import { test, expect } from '../../fixtures/base';
import { resolveFromRoot } from '../../utils/path-helper';
} from '../../../../../config/constants';
import { test, expect } from '../../../../../fixtures/base';
import { resolveFromRoot } from '../../../../../utils/path-helper';
test.describe('Undo/Redo', () => {
test.beforeEach(async ({ n8n }) => {

View File

@@ -4,8 +4,8 @@ import {
CODE_NODE_DISPLAY_NAME,
CODE_NODE_NAME,
MANUAL_TRIGGER_NODE_NAME,
} from '../../config/constants';
import { test, expect } from '../../fixtures/base';
} from '../../../../../config/constants';
import { test, expect } from '../../../../../fixtures/base';
test.describe('Code node', () => {
test.describe('Code editor', () => {

View File

@@ -1,8 +1,8 @@
import fs from 'fs';
import { MANUAL_TRIGGER_NODE_DISPLAY_NAME } from '../../config/constants';
import { test, expect } from '../../fixtures/base';
import { resolveFromRoot } from '../../utils/path-helper';
import { MANUAL_TRIGGER_NODE_DISPLAY_NAME } from '../../../../../config/constants';
import { test, expect } from '../../../../../fixtures/base';
import { resolveFromRoot } from '../../../../../utils/path-helper';
test.describe('Editors', () => {
test.beforeEach(async ({ n8n }) => {

View File

@@ -1,4 +1,4 @@
import { test, expect } from '../../fixtures/base';
import { test, expect } from '../../../../fixtures/base';
const NOTIFICATIONS = {
WORKFLOW_CREATED: 'Workflow successfully created',

Some files were not shown because too many files have changed in this diff Show More