Compare commits

...

57 Commits

Author SHA1 Message Date
Declan Carroll
b412b4c570 Merge branch 'master' into test-orchestration 2025-12-05 20:05:25 +00:00
James Gee
5c76f1ec56 fix: PAY-4074 - Owner registration in multi-main setup (#22520)
Signed-off-by: James Gee <james@justec.io>
Signed-off-by: James Gee <1285296+geemanjs@users.noreply.github.com>
2025-12-05 21:04:12 +01:00
Artem Sorokin
68693b5b26 test: Split workflow-actions.spec.ts into focused test files (#22850) 2025-12-05 18:37:18 +01:00
Artem Sorokin
d3e7713632 test: Split sharing tests into sharing/ directory (#22849) 2025-12-05 18:13:26 +01:00
renovate[bot]
e9c4c8d99a chore: Update cloudflare/wrangler-action digest to 707f637 (#22322)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Declan Carroll <declan@n8n.io>
2025-12-05 16:53:52 +00:00
Artem Sorokin
39a4b29b9d test: Rename building-blocks tests to remove numeric prefixes (#22846) 2025-12-05 17:51:00 +01:00
Artem Sorokin
5234b286ac test: Migrate data tables tests to data-tables/ directory (#22841) 2025-12-05 17:30:12 +01:00
oleg
b97b5d5bf8 feat(ai-builder): Add multi-judge pairwise evals and local mode (no-changelog) (#22708)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2025-12-05 17:12:18 +01:00
Artem Sorokin
a939ac6e1a test: Migrate tests to app-config/ and capabilities/ directories (#22843) 2025-12-05 17:08:31 +01:00
Declan Carroll
1f51264931 Change currentsBatchSize from string to number 2025-12-05 15:59:05 +00:00
Declan Carroll
96a87e0b7a Change currentsBatchSize from 'auto' to '5' 2025-12-05 15:51:57 +00:00
Artem Sorokin
b04970a478 test: Move cloud tests to cloud/ directory (#22840) 2025-12-05 16:42:37 +01:00
Declan Carroll
9fa7b2e6a5 Merge branch 'master' into test-orchestration 2025-12-05 15:34:57 +00:00
Artem Sorokin
3292132c49 test: Migrate node-specific tests to nodes/ directory (#22835) 2025-12-05 16:27:08 +01:00
Artem Sorokin
f85f1107a6 test: Migrate trigger tests to triggers/ directory (#22832) 2025-12-05 16:25:37 +01:00
Stephen Wright
5926ebf094 feat: Make underlying oauth abstractions more generic (#22741) 2025-12-05 15:16:10 +00:00
Artem Sorokin
decbc162aa test: Migrate subworkflow tests to workflows/editor/subworkflows/ (#22827) 2025-12-05 16:15:44 +01:00
Artem Sorokin
be254b270f test: Migrate workflow list/templates/checklist tests to workflows/ (#22831) 2025-12-05 16:15:23 +01:00
Benjamin Schroth
fe4f4677c4 chore: Update @modelcontextprotocol/sdk (#22823) 2025-12-05 15:13:49 +01:00
Artem Sorokin
0596e1a5b3 test: Migrate expression and code editor tests (#22820) 2025-12-05 14:57:22 +01:00
Konstantin Tieber
3979e76c1b chore: Revert add version back to public settings (no-changelog) (#22821) 2025-12-05 14:53:41 +01:00
Suguru Inoue
367643025d fix(editor): Address chat UI feedback (no-changelog) (#22819) 2025-12-05 14:47:23 +01:00
Michael Drury
33a6aa665c fix(ai-builder): Allow setting evaluation feature flags via environment variables (no-changelog) (#22813) 2025-12-05 13:37:56 +00:00
Artem Sorokin
305ba9201a test: Migrate execution tests to workflows/editor/execution/ (#22818) 2025-12-05 14:35:05 +01:00
Artem Sorokin
5c5de5c7ae test: Move canvas and editor tests to workflows/editor/ directory (#22801) 2025-12-05 14:29:30 +01:00
oleg
4b1e0ce796 chore(ai-builder): Use isAIMessage for LangGraph v1 compatibility (no-changelog) (#22807) 2025-12-05 14:24:24 +01:00
Jaakko Husso
1d952014d8 refactor(core): Move the chat link (no-changelog) (#22812) 2025-12-05 15:01:54 +02:00
Konstantin Tieber
28c3018c95 fix(core): Add version back to public settings (no-changelog) (#22804) 2025-12-05 13:30:04 +01:00
Declan Carroll
7091bc7806 fix: AI session metadata call fix (#22775) 2025-12-05 12:14:55 +00:00
oleg
661dada572 refactor(ai-builder): Centralize prompts into dedicated directory (no-changelog) (#22751)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
2025-12-05 13:07:45 +01:00
Suguru Inoue
0fd0904103 refactor(core): Extract model retrieval methods from ChatHubService (#22809) 2025-12-05 13:07:14 +01:00
Iván Ovejero
08729ce6c6 refactor(core): Make Sentry init non-fatal for JS runner (#22800) 2025-12-05 12:50:47 +01:00
Artem Sorokin
79fd0b5d40 test: Reorganize NDV tests into workflows/editor/ndv/ directory (#22806) 2025-12-05 12:39:00 +01:00
Artem Sorokin
679cfc5572 test: Move project and folder tests to projects/ directory (#22802) 2025-12-05 12:35:04 +01:00
Charlie Kolb
feab6d3f34 fix(editor): Restore workflow tool view-execution link for V3 Agents (#22730) 2025-12-05 12:20:39 +01:00
Charlie Kolb
492aca09ff feat(editor): Move workflow description edit button to settings (#22301) 2025-12-05 12:20:27 +01:00
yehorkardash
c43543fb84 fix(editor): Disable noImplicitAny in typescript plugin (#22104) 2025-12-05 11:45:18 +01:00
Suguru Inoue
132f9c6f70 fix(editor): Improve table rendering in chat history (no-changelog) (#22738) 2025-12-05 11:19:41 +01:00
Iván Ovejero
064f90ce1e fix: Add HOME env var to distroless runners image (#22796) 2025-12-05 11:15:29 +01:00
Andreas Fitzek
e78250f94c chore(core): Implement generic credential storage provider (#22662) 2025-12-05 11:04:26 +01:00
renovate[bot]
fcc6d86326 chore: Update peter-evans/create-pull-request digest to 84ae59a (#22376)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Declan Carroll <declan@n8n.io>
2025-12-05 10:03:18 +00:00
Jaakko Husso
480d1e609b feat(core): Put Chat users behind license checks (no-changelog) (#22781) 2025-12-05 11:38:12 +02:00
Nikhil Kuriakose
b22654709a feat(editor): Rename columns in data tables (#21747) 2025-12-05 10:06:54 +01:00
Suguru Inoue
8d7f438e1f fix(editor): Fix chat telemetry (no-changelog) (#22793) 2025-12-05 10:04:20 +01:00
Milorad FIlipović
829135ceee feat(editor): Open template setup modal automatically (no-changelog) (#22596) 2025-12-05 09:54:05 +01:00
Declan Carroll
3f382a0369 test: Fixing flaky/failing workflow action test (#22792) 2025-12-05 08:37:55 +00:00
Jaakko Husso
54ca0c1abc fix(core): Filter out workflows from custom agents that use too old agents (no-changelog) (#22752) 2025-12-05 00:53:02 +02:00
Artem Sorokin
e219e7e915 test: Move auth tests to separate folder (#22726) 2025-12-04 23:01:55 +01:00
Declan Carroll
6e77f0eb81 ci: GH bot has a bypass for our CLA (#22773) 2025-12-04 21:23:59 +00:00
Artem Sorokin
813d33372c test: Move AI features tests to separate folder (#22727) 2025-12-04 21:43:07 +01:00
Artem Sorokin
bcfc95b08f test: Move workflow executions to separate folder (#22723) 2025-12-04 21:01:23 +01:00
Artem Sorokin
ba1ac9e1a8 test: Move credentials tests to separate folder (#22724) 2025-12-04 21:01:09 +01:00
Artem Sorokin
8928522991 test: Reorganize Playwright settings tests to match UI structure (#22618) 2025-12-04 21:00:52 +01:00
Guillaume Jacquart
ad56240013 fix(core): Hide migration rule issues not relevant to cloud (#22749) 2025-12-04 20:20:35 +01:00
Guillaume Jacquart
b8d045b050 feat(core): Add credential resolver service for CRUD operations (#22653) 2025-12-04 20:20:24 +01:00
mfsiega
803ab42164 fix(core): During partial execution don't include loop as start node if the loop isn't closed (#22555)
Co-authored-by: Danny Martini <danny@n8n.io>
2025-12-04 18:20:33 +01:00
mfsiega
3026a813b0 fix(core, editor): Move single webhook trigger check to the backend (#22450)
Co-authored-by: Danny Martini <danny@n8n.io>
2025-12-04 18:20:13 +01:00
325 changed files with 13836 additions and 7790 deletions

View File

@@ -43,7 +43,7 @@ jobs:
pnpm add --global wrangler
- name: Deploy
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65
uses: cloudflare/wrangler-action@707f63750981584eb6abc365a50d441516fb04b8
id: cloudflare_deployment
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}

View File

@@ -137,7 +137,7 @@ jobs:
if: steps.verify_file.outputs.file_exists == 'true'
# Pin v7.0.8
uses: peter-evans/create-pull-request@18e469570b1cf0dfc11d60ec121099f8ff3e617a
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412
with:
token: ${{ steps.generate_token.outputs.token }}

View File

@@ -56,5 +56,5 @@ jobs:
branch: update-node-popularity
base: master
delete-branch: true
author: n8n Bot <191478365+n8n-bot@users.noreply.github.com>
committer: n8n Bot <191478365+n8n-bot@users.noreply.github.com>
author: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

View File

@@ -172,7 +172,8 @@ ARG N8N_VERSION=snapshot
ARG N8N_RELEASE_TYPE=dev
ENV NODE_ENV=production \
N8N_RELEASE_TYPE=${N8N_RELEASE_TYPE}
N8N_RELEASE_TYPE=${N8N_RELEASE_TYPE} \
HOME=/home/runner
# Copy everything from the prepared runtime filesystem
COPY --from=runtime-prep --chown=root:root /runtime/ /

View File

@@ -104,21 +104,29 @@ The Langsmith integration provides two key components:
#### 6. Pairwise Evaluation
Pairwise evaluation provides a simpler, criteria-based approach to workflow evaluation. Instead of using the complex multi-metric evaluation system, it evaluates workflows against a custom set of "do" and "don't" rules defined in the dataset.
Pairwise evaluation provides a criteria-based approach to workflow evaluation with hierarchical scoring and multi-judge consensus. It evaluates workflows against a custom set of "do" and "don't" rules defined in the dataset.
**Evaluator (`chains/pairwise-evaluator.ts`):**
- Evaluates workflows against a checklist of criteria (dos and don'ts)
- Uses an LLM to determine if each criterion passes or fails
- Requires evidence-based justification for each decision
- Calculates a simple pass/fail score (passes / total rules)
- Returns `primaryPass` (true only if ALL criteria pass) and `diagnosticScore` (ratio of passes)
**Runner (`langsmith/pairwise-runner.ts`):**
- Generates workflows from prompts in the dataset
- Applies pairwise evaluation to each generated workflow
- Reports three metrics to Langsmith:
- `pairwise_score`: Overall score (0-1)
- `pairwise_passed_count`: Number of criteria passed
- `pairwise_failed_count`: Number of criteria violated
- Runs multiple LLM judges in parallel for each evaluation (configurable via `--judges`)
- Aggregates judge results using majority vote
- Supports filtering by `notion_id` metadata for single-example runs
- Reports five metrics to Langsmith:
- `pairwise_primary`: Majority vote result (0 or 1)
- `pairwise_diagnostic`: Average diagnostic score across judges
- `pairwise_judges_passed`: Count of judges that passed
- `pairwise_total_violations`: Sum of all violations
- `pairwise_total_passes`: Sum of all passes
**Logger (`utils/logger.ts`):**
- Simple evaluation logger with verbose mode support
- Controls output verbosity via `--verbose` flag
**Dataset Format:**
The pairwise evaluation expects a Langsmith dataset with examples containing:
@@ -217,6 +225,9 @@ GENERATE_TEST_CASES=true pnpm eval
# With custom concurrency
EVALUATION_CONCURRENCY=10 pnpm eval
# With feature flags enabled
pnpm eval --multi-agent --template-examples
```
### Langsmith Evaluation
@@ -229,11 +240,59 @@ export LANGSMITH_DATASET_NAME=your_dataset_name
# Run evaluation
pnpm eval:langsmith
# With feature flags enabled
pnpm eval:langsmith --multi-agent
```
### Pairwise Evaluation
Pairwise evaluation uses a dataset with custom do/don't criteria for each prompt.
Pairwise evaluation uses a dataset with custom do/don't criteria for each prompt. It implements a hierarchical scoring system with multiple LLM judges per evaluation.
#### CLI Options
| Option | Description | Default |
|--------|-------------|---------|
| `--prompt <text>` | Run local evaluation with this prompt (no LangSmith required) | - |
| `--dos <rules>` | Newline-separated "do" rules for local evaluation | - |
| `--donts <rules>` | Newline-separated "don't" rules for local evaluation | - |
| `--notion-id <id>` | Filter to a single example by its `notion_id` metadata | (all examples) |
| `--max-examples <n>` | Limit number of examples to evaluate (useful for testing) | (no limit) |
| `--repetitions <n>` | Number of times to repeat the entire evaluation | 1 |
| `--generations <n>` | Number of workflow generations per prompt (for variance reduction) | 1 |
| `--judges <n>` | Number of LLM judges per evaluation | 3 |
| `--concurrency <n>` | Number of prompts to evaluate in parallel | 5 |
| `--name <name>` | Custom experiment name in LangSmith | `pairwise-evals` |
| `--output-dir <path>` | Save generated workflows and evaluation results to this directory | - |
| `--verbose`, `-v` | Enable verbose logging (shows judge details, violations, etc.) | false |
| `--multi-agent` | Enable multi-agent architecture (see [Feature Flags](#feature-flags)) | false |
| `--template-examples` | Enable template-based examples (see [Feature Flags](#feature-flags)) | false |
#### Local Mode (No LangSmith Required)
Run a single pairwise evaluation locally without needing a LangSmith account:
```bash
# Basic local evaluation
pnpm eval:pairwise --prompt "Create a workflow that sends Slack messages" --dos "Use Slack node"
# With don'ts and multiple judges
pnpm eval:pairwise \
--prompt "Create a workflow that fetches data from an API" \
--dos "Use HTTP Request node\nHandle errors" \
--donts "Don't hardcode URLs" \
--judges 5 \
--verbose
```
Local mode is useful for:
- Testing prompts before adding them to a dataset
- Quick iteration on evaluation criteria
- Running evaluations without LangSmith setup
#### LangSmith Mode
For dataset-based evaluation with experiment tracking:
```bash
# Set required environment variables
@@ -242,14 +301,104 @@ export LANGSMITH_API_KEY=your_api_key
# Run pairwise evaluation (uses default dataset: notion-pairwise-workflows)
pnpm eval:pairwise
# Run a single example by notion_id
pnpm eval:pairwise --notion-id 30d29454-b397-4a35-8e0b-74a2302fa81a
# Run with 3 repetitions and 5 judges, custom experiment name
pnpm eval:pairwise --repetitions 3 --judges 5 --name "my-experiment"
# Enable verbose logging to see all judge details
pnpm eval:pairwise --notion-id abc123 --verbose
# Use a custom dataset
LANGSMITH_DATASET_NAME=my-pairwise-dataset pnpm eval:pairwise
# Limit to specific number of examples (useful for testing)
EVAL_MAX_EXAMPLES=2 pnpm eval:pairwise
pnpm eval:pairwise --max-examples 2
```
# Run with multiple repetitions
pnpm eval:pairwise --repetitions 3
#### Multi-Generation Evaluation
The `--generations` flag enables multiple workflow generations per prompt, providing a **Generation Correctness** metric:
```bash
# Run 3 generations per prompt with 3 judges each
pnpm eval:pairwise --generations 3 --judges 3 --verbose
# Example output:
# Gen 1: 2/3 judges → ✓ PASS (diag=85%)
# Gen 2: 1/3 judges → ✗ FAIL (diag=60%)
# Gen 3: 3/3 judges → ✓ PASS (diag=95%)
# 📊 [#1] 2/3 gens → PASS (gen_corr=0.67, diag=80%)
```
**Generation Correctness** = (# passing generations) / total generations:
- With `--generations 3`: Values are 0, 0.33, 0.67, or 1
- With `--generations 5`: Values are 0, 0.2, 0.4, 0.6, 0.8, or 1
#### Hierarchical Scoring System
The pairwise evaluation uses a multi-level scoring hierarchy:
| Level | Primary Score | Secondary Score |
|-------|--------------|-----------------|
| Individual do/don't | Binary (true/false) | 0 or 1 |
| 1 LLM judge | false if ANY criterion fails | Average of criteria scores |
| N judges on 1 generation | Majority vote (≥50% pass) | Average diagnostic across judges |
| N generations on 1 prompt | (# passing gens) / N | Average diagnostic across generations |
| Full dataset | Average across prompts | Average diagnostic across all |
This approach reduces variance from LLM non-determinism by using multiple judges and generations.
#### Saving Artifacts with --output-dir
The `--output-dir` flag saves all generated workflows and evaluation results to disk:
```bash
# Save artifacts to ./eval-output directory
pnpm eval:pairwise --generations 3 --output-dir ./eval-output --verbose
```
**Output structure:**
```
eval-output/
├── prompt-1/
│ ├── prompt.txt # Original prompt text
│ ├── criteria.json # dos/donts criteria
│ ├── gen-1/
│ │ ├── workflow.json # Importable n8n workflow
│ │ └── evaluation.json # Judge results for this generation
│ ├── gen-2/
│ │ ├── workflow.json
│ │ └── evaluation.json
│ └── gen-3/
│ ├── workflow.json
│ └── evaluation.json
├── prompt-2/
│ └── ...
└── summary.json # Overall results summary
```
**workflow.json**: Directly importable into n8n (File → Import from file)
**evaluation.json**: Contains per-judge results including violations and passes:
```json
{
"generationIndex": 1,
"majorityPass": false,
"primaryPasses": 1,
"numJudges": 3,
"diagnosticScore": 0.35,
"judges": [
{
"judgeIndex": 1,
"primaryPass": false,
"diagnosticScore": 0.30,
"violations": [{"rule": "...", "justification": "..."}],
"passes": [{"rule": "...", "justification": "..."}]
}
]
}
```
## Configuration
@@ -282,10 +431,77 @@ The evaluation will fail with a clear error message if `nodes.json` is missing.
- `USE_LANGSMITH_EVAL` - Set to "true" to use Langsmith mode
- `USE_PAIRWISE_EVAL` - Set to "true" to use pairwise evaluation mode
- `LANGSMITH_DATASET_NAME` - Override default dataset name
- `EVAL_MAX_EXAMPLES` - Limit number of examples to evaluate (useful for testing)
- `EVALUATION_CONCURRENCY` - Number of parallel test executions (default: 5)
- `GENERATE_TEST_CASES` - Set to "true" to generate additional test cases
- `LLM_MODEL` - Model identifier for metadata tracking
- `EVAL_FEATURE_MULTI_AGENT` - Set to "true" to enable multi-agent mode
- `EVAL_FEATURE_TEMPLATE_EXAMPLES` - Set to "true" to enable template examples
### Feature Flags
Feature flags control experimental or optional behaviors in the AI Workflow Builder agent during evaluations. They can be set via environment variables or CLI arguments.
#### Available Flags
| Flag | Description | Default |
|------|-------------|---------|
| `multiAgent` | Enables multi-agent architecture with specialized sub-agents (supervisor, builder, configurator, discovery) | `false` |
| `templateExamples` | Enables template-based examples in agent prompts | `false` |
#### Setting Feature Flags
**Via Environment Variables:**
```bash
# Enable multi-agent mode
EVAL_FEATURE_MULTI_AGENT=true pnpm eval
# Enable template examples
EVAL_FEATURE_TEMPLATE_EXAMPLES=true pnpm eval:pairwise
# Enable both
EVAL_FEATURE_MULTI_AGENT=true EVAL_FEATURE_TEMPLATE_EXAMPLES=true pnpm eval:langsmith
```
**Via CLI Arguments:**
```bash
# Enable multi-agent mode
pnpm eval --multi-agent
# Enable template examples
pnpm eval:pairwise --template-examples
# Enable both
pnpm eval:langsmith --multi-agent --template-examples
```
#### Usage Across Evaluation Modes
Feature flags work consistently across all evaluation modes:
**CLI Evaluation:**
```bash
pnpm eval --multi-agent --template-examples
```
**Langsmith Evaluation:**
```bash
pnpm eval:langsmith --multi-agent
```
**Pairwise Evaluation (LangSmith mode):**
```bash
pnpm eval:pairwise --multi-agent --template-examples
```
**Pairwise Evaluation (Local mode):**
```bash
pnpm eval:pairwise --prompt "Create a Slack workflow" --dos "Use Slack node" --multi-agent
```
When feature flags are enabled, they are logged at the start of the evaluation:
```
➔ Feature flags enabled: multiAgent, templateExamples
```
## Output
@@ -304,14 +520,22 @@ The evaluation will fail with a clear error message if `nodes.json` is missing.
### Pairwise Evaluation Output
- Results are stored in Langsmith dashboard
- Experiment name format: `pairwise-evals-[uuid]`
- Metrics reported:
- `pairwise_score`: Overall pass rate (0-1)
- `pairwise_passed_count`: Number of criteria that passed
- `pairwise_failed_count`: Number of criteria that were violated
- Experiment name format: `<name>-[uuid]` (default: `pairwise-evals-[uuid]`)
- Metrics reported (single generation mode):
- `pairwise_primary`: Binary pass/fail based on majority vote (0 or 1)
- `pairwise_diagnostic`: Average diagnostic score across judges (0-1)
- `pairwise_judges_passed`: Number of judges that returned primaryPass=true
- `pairwise_total_violations`: Sum of violations across all judges
- `pairwise_total_passes`: Sum of passes across all judges
- Additional metrics reported (multi-generation mode with `--generations N`):
- `pairwise_generation_correctness`: (# passing generations) / N (0, 0.33, 0.67, 1 for N=3)
- `pairwise_aggregated_diagnostic`: Average diagnostic score across all generations
- `pairwise_generations_passed`: Count of generations that passed majority vote
- `pairwise_total_judge_calls`: Total judge invocations (generations × judges)
- Each result includes detailed comments with:
- List of violations with justifications
- List of passes with justifications
- Majority vote summary
- List of violations with justifications (per judge)
- List of passes (per judge)
## Adding New Test Cases

View File

@@ -52,7 +52,8 @@ describe('evaluateWorkflowPairwise', () => {
expect(result).toEqual({
...mockResult,
score: 1,
primaryPass: true,
diagnosticScore: 1,
});
expect(baseEvaluator.createEvaluatorChain).toHaveBeenCalledWith(
mockLlm,
@@ -69,7 +70,7 @@ describe('evaluateWorkflowPairwise', () => {
);
});
it('should calculate score correctly with violations', async () => {
it('should calculate diagnosticScore correctly with violations', async () => {
const mockResult = {
violations: [{ rule: "Don't do that", justification: 'Did it' }],
passes: [{ rule: 'Do this', justification: 'Done' }],
@@ -79,10 +80,11 @@ describe('evaluateWorkflowPairwise', () => {
const result = await evaluateWorkflowPairwise(mockLlm, input);
expect(result.score).toBe(0.5);
expect(result.primaryPass).toBe(false);
expect(result.diagnosticScore).toBe(0.5);
});
it('should return score 0 when no rules evaluated', async () => {
it('should return diagnosticScore 0 when no rules evaluated', async () => {
const mockResult = {
violations: [],
passes: [],
@@ -92,6 +94,7 @@ describe('evaluateWorkflowPairwise', () => {
const result = await evaluateWorkflowPairwise(mockLlm, input);
expect(result.score).toBe(0);
expect(result.primaryPass).toBe(true);
expect(result.diagnosticScore).toBe(0);
});
});

View File

@@ -32,7 +32,10 @@ const pairwiseEvaluationLLMResultSchema = z.object({
});
export type PairwiseEvaluationResult = z.infer<typeof pairwiseEvaluationLLMResultSchema> & {
score: number;
/** True only if ALL criteria passed (no violations) */
primaryPass: boolean;
/** Ratio of passed criteria to total criteria (0-1) */
diagnosticScore: number;
};
const EVALUATOR_SYSTEM_PROMPT = `You are an expert n8n workflow auditor. Your task is to strictly evaluate a candidate workflow against a provided set of requirements.
@@ -96,10 +99,12 @@ export async function evaluateWorkflowPairwise(
});
const totalRules = result.passes.length + result.violations.length;
const score = totalRules > 0 ? result.passes.length / totalRules : 0;
const diagnosticScore = totalRules > 0 ? result.passes.length / totalRules : 0;
const primaryPass = result.violations.length === 0;
return {
...result,
score,
primaryPass,
diagnosticScore,
};
}

View File

@@ -2,6 +2,7 @@ import pLimit from 'p-limit';
import pc from 'picocolors';
import { createProgressBar, updateProgress, displayResults, displayError } from './display.js';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent.js';
import { basicTestCases, generateTestCases } from '../chains/test-case-generator.js';
import {
setupTestEnvironment,
@@ -25,6 +26,7 @@ type CliEvaluationOptions = {
testCaseFilter?: string; // Optional test case ID to run only a specific test
testCases?: TestCase[]; // Optional array of test cases to run (if not provided, uses defaults and generation)
repetitions?: number; // Number of times to run each test (e.g. for cache warming analysis)
featureFlags?: BuilderFeatureFlags; // Optional feature flags to pass to the agent (e.g. templateExamples, multiAgent)
};
/**
@@ -32,12 +34,20 @@ type CliEvaluationOptions = {
* Supports concurrency control via EVALUATION_CONCURRENCY environment variable
*/
export async function runCliEvaluation(options: CliEvaluationOptions = {}): Promise<void> {
const { repetitions = 1, testCaseFilter } = options;
const { repetitions = 1, testCaseFilter, featureFlags } = options;
console.log(formatHeader('AI Workflow Builder Full Evaluation', 70));
if (repetitions > 1) {
console.log(pc.yellow(`➔ Each test will be run ${repetitions} times for cache analysis`));
}
if (featureFlags) {
const enabledFlags = Object.entries(featureFlags)
.filter(([, v]) => v === true)
.map(([k]) => k);
if (enabledFlags.length > 0) {
console.log(pc.green(`➔ Feature flags enabled: ${enabledFlags.join(', ')}`));
}
}
console.log();
try {
// Setup test environment
@@ -105,7 +115,9 @@ export async function runCliEvaluation(options: CliEvaluationOptions = {}): Prom
// Create a dedicated agent for this test to avoid state conflicts
const testAgent = createAgent(parsedNodeTypes, llm, tracer);
const result = await runSingleTest(testAgent, llm, testCase, parsedNodeTypes);
const result = await runSingleTest(testAgent, llm, testCase, parsedNodeTypes, {
featureFlags,
});
testResults[testCase.id] = result.error ? 'fail' : 'pass';
completed++;

View File

@@ -5,6 +5,7 @@ import { Client } from 'langsmith/client';
import type { INodeTypeDescription } from 'n8n-workflow';
import { anthropicClaudeSonnet45 } from '../../src/llm-config.js';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent.js';
import { WorkflowBuilderAgent } from '../../src/workflow-builder-agent.js';
import { loadNodesFromFile } from '../load-nodes.js';
@@ -76,12 +77,14 @@ export async function setupTestEnvironment(): Promise<TestEnvironment> {
* @param parsedNodeTypes - Array of parsed node type descriptions
* @param llm - Language model instance
* @param tracer - Optional LangChain tracer
* @param featureFlags - Optional feature flags
* @returns Configured WorkflowBuilderAgent
*/
export function createAgent(
parsedNodeTypes: INodeTypeDescription[],
llm: BaseChatModel,
tracer?: LangChainTracer,
featureFlags?: BuilderFeatureFlags,
): WorkflowBuilderAgent {
return new WorkflowBuilderAgent({
parsedNodeTypes,
@@ -89,6 +92,7 @@ export function createAgent(
llmComplexTask: llm,
checkpointer: new MemorySaver(),
tracer,
featureFlags,
});
}

View File

@@ -1,7 +1,7 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { INodeTypeDescription } from 'n8n-workflow';
import type { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import type { BuilderFeatureFlags, WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import { evaluateWorkflow } from '../chains/workflow-evaluator';
import { programmaticEvaluation } from '../programmatic/programmatic-evaluation';
import type { EvaluationInput, TestCase } from '../types/evaluation';
@@ -69,12 +69,22 @@ export function createErrorResult(testCase: TestCase, error: unknown): TestResul
};
}
export interface RunSingleTestOptions {
agent: WorkflowBuilderAgent;
llm: BaseChatModel;
testCase: TestCase;
nodeTypes: INodeTypeDescription[];
userId?: string;
featureFlags?: BuilderFeatureFlags;
}
/**
* Runs a single test case by generating a workflow and evaluating it
* @param agent - The workflow builder agent to use
* @param llm - Language model for evaluation
* @param testCase - Test case to execute
* @param userId - User ID for the session
* @param nodeTypes - Array of node type descriptions
* @params opts - userId, User ID for the session and featureFlags, Optional feature flags to pass to the agent
* @returns Test result with generated workflow and evaluation
*/
export async function runSingleTest(
@@ -82,12 +92,15 @@ export async function runSingleTest(
llm: BaseChatModel,
testCase: TestCase,
nodeTypes: INodeTypeDescription[],
userId: string = 'test-user',
opts?: { userId?: string; featureFlags?: BuilderFeatureFlags },
): Promise<TestResult> {
const userId = opts?.userId ?? 'test-user';
try {
// Generate workflow
const startTime = Date.now();
await consumeGenerator(agent.chat(getChatPayload(testCase.prompt, testCase.id), userId));
await consumeGenerator(
agent.chat(getChatPayload(testCase.prompt, testCase.id, opts?.featureFlags), userId),
);
const generationTime = Date.now() - startTime;
// Get generated workflow with validation

View File

@@ -1,15 +1,54 @@
import type { BuilderFeatureFlags } from '@/workflow-builder-agent';
import { runCliEvaluation } from './cli/runner.js';
import { runPairwiseLangsmithEvaluation } from './langsmith/pairwise-runner.js';
import {
runLocalPairwiseEvaluation,
runPairwiseLangsmithEvaluation,
} from './langsmith/pairwise-runner.js';
import { runLangsmithEvaluation } from './langsmith/runner.js';
import { loadTestCasesFromCsv } from './utils/csv-prompt-loader.js';
// Re-export for external use if needed
export { runCliEvaluation } from './cli/runner.js';
export { runLangsmithEvaluation } from './langsmith/runner.js';
export { runPairwiseLangsmithEvaluation } from './langsmith/pairwise-runner.js';
export {
runLocalPairwiseEvaluation,
runPairwiseLangsmithEvaluation,
} from './langsmith/pairwise-runner.js';
export { runSingleTest } from './core/test-runner.js';
export { setupTestEnvironment, createAgent } from './core/environment.js';
/** Parse an integer flag with default value */
function getIntFlag(flag: string, defaultValue: number, max?: number): number {
const arg = getFlagValue(flag);
if (!arg) return defaultValue;
const parsed = parseInt(arg, 10);
if (Number.isNaN(parsed) || parsed < 1) return defaultValue;
return max ? Math.min(parsed, max) : parsed;
}
/** Parse all CLI arguments */
function parseCliArgs() {
return {
testCaseId: process.argv.includes('--test-case')
? process.argv[process.argv.indexOf('--test-case') + 1]
: undefined,
promptsCsvPath: getFlagValue('--prompts-csv') ?? process.env.PROMPTS_CSV_FILE,
repetitions: getIntFlag('--repetitions', 1),
notionId: getFlagValue('--notion-id'),
numJudges: getIntFlag('--judges', 3),
numGenerations: getIntFlag('--generations', 1, 10),
concurrency: getIntFlag('--concurrency', 5),
maxExamples: getIntFlag('--max-examples', 0), // 0 means no limit
verbose: process.argv.includes('--verbose') || process.argv.includes('-v'),
experimentName: getFlagValue('--name'),
outputDir: getFlagValue('--output-dir'),
prompt: getFlagValue('--prompt'),
dos: getFlagValue('--dos'),
donts: getFlagValue('--donts'),
};
}
/**
* Main entry point for evaluation
* Determines which evaluation mode to run based on environment variables
@@ -17,32 +56,54 @@ export { setupTestEnvironment, createAgent } from './core/environment.js';
async function main(): Promise<void> {
const useLangsmith = process.env.USE_LANGSMITH_EVAL === 'true';
const usePairwiseEval = process.env.USE_PAIRWISE_EVAL === 'true';
const args = parseCliArgs();
// Parse command line arguments for single test case
const testCaseId = process.argv.includes('--test-case')
? process.argv[process.argv.indexOf('--test-case') + 1]
: undefined;
// Parse command line argument for CSV prompts file path
const promptsCsvPath = getFlagValue('--prompts-csv') ?? process.env.PROMPTS_CSV_FILE;
if (promptsCsvPath && (useLangsmith || usePairwiseEval)) {
if (args.promptsCsvPath && (useLangsmith || usePairwiseEval)) {
console.warn('CSV-driven evaluations are only supported in CLI mode. Ignoring --prompts-csv.');
}
// Parse command line arguments for a number of repetitions (applies to both modes)
const repetitionsArg = process.argv.includes('--repetitions')
? parseInt(process.argv[process.argv.indexOf('--repetitions') + 1], 10)
: 1;
const repetitions = Number.isNaN(repetitionsArg) ? 1 : repetitionsArg;
// Parse feature flags from environment variables or CLI arguments
const featureFlags = parseFeatureFlags();
if (usePairwiseEval) {
await runPairwiseLangsmithEvaluation(repetitions);
if (args.prompt) {
// Local mode - run single evaluation without LangSmith
await runLocalPairwiseEvaluation({
prompt: args.prompt,
criteria: { dos: args.dos ?? '', donts: args.donts ?? '' },
numJudges: args.numJudges,
numGenerations: args.numGenerations,
verbose: args.verbose,
outputDir: args.outputDir,
featureFlags,
});
} else {
// LangSmith mode
await runPairwiseLangsmithEvaluation({
repetitions: args.repetitions,
notionId: args.notionId,
numJudges: args.numJudges,
numGenerations: args.numGenerations,
verbose: args.verbose,
experimentName: args.experimentName,
outputDir: args.outputDir,
concurrency: args.concurrency,
maxExamples: args.maxExamples || undefined,
featureFlags,
});
}
} else if (useLangsmith) {
await runLangsmithEvaluation(repetitions);
await runLangsmithEvaluation(args.repetitions, featureFlags);
} else {
const csvTestCases = promptsCsvPath ? loadTestCasesFromCsv(promptsCsvPath) : undefined;
await runCliEvaluation({ testCases: csvTestCases, testCaseFilter: testCaseId, repetitions });
const csvTestCases = args.promptsCsvPath
? loadTestCasesFromCsv(args.promptsCsvPath)
: undefined;
await runCliEvaluation({
testCases: csvTestCases,
testCaseFilter: args.testCaseId,
repetitions: args.repetitions,
featureFlags,
});
}
}
@@ -68,6 +129,36 @@ function getFlagValue(flag: string): string | undefined {
return undefined;
}
/**
* Parse feature flags from environment variables or CLI arguments.
* Environment variables:
* - EVAL_FEATURE_TEMPLATE_EXAMPLES=true - Enable template examples feature
* - EVAL_FEATURE_MULTI_AGENT=true - Enable multi-agent feature
* CLI arguments:
* - --template-examples - Enable template examples feature
* - --multi-agent - Enable multi-agent feature
*/
function parseFeatureFlags(): BuilderFeatureFlags | undefined {
const templateExamplesFromEnv = process.env.EVAL_FEATURE_TEMPLATE_EXAMPLES === 'true';
const multiAgentFromEnv = process.env.EVAL_FEATURE_MULTI_AGENT === 'true';
const templateExamplesFromCli = process.argv.includes('--template-examples');
const multiAgentFromCli = process.argv.includes('--multi-agent');
const templateExamples = templateExamplesFromEnv || templateExamplesFromCli;
const multiAgent = multiAgentFromEnv || multiAgentFromCli;
// Only return feature flags object if at least one flag is set
if (templateExamples || multiAgent) {
return {
templateExamples: templateExamples || undefined,
multiAgent: multiAgent || undefined,
};
}
return undefined;
}
// Run if called directly
if (require.main === module) {
main().catch(console.error);

View File

@@ -5,6 +5,7 @@ import type { INodeTypeDescription } from 'n8n-workflow';
import pc from 'picocolors';
import { createLangsmithEvaluator } from './evaluator';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent';
import type { WorkflowState } from '../../src/workflow-state';
import { setupTestEnvironment, createAgent } from '../core/environment';
import {
@@ -20,12 +21,14 @@ import { consumeGenerator, formatHeader, getChatPayload } from '../utils/evaluat
* @param parsedNodeTypes - Node types
* @param llm - Language model
* @param tracer - Optional tracer
* @param featureFlags - Optional feature flags to pass to the agent
* @returns Function that generates workflows from inputs
*/
function createWorkflowGenerator(
parsedNodeTypes: INodeTypeDescription[],
llm: BaseChatModel,
tracer?: LangChainTracer,
featureFlags?: BuilderFeatureFlags,
) {
return async (inputs: typeof WorkflowState.State) => {
// Generate a unique ID for this evaluation run
@@ -43,7 +46,7 @@ function createWorkflowGenerator(
// Create agent for this run
const agent = createAgent(parsedNodeTypes, llm, tracer);
await consumeGenerator(
agent.chat(getChatPayload(messageContent, runId), 'langsmith-eval-user'),
agent.chat(getChatPayload(messageContent, runId, featureFlags), 'langsmith-eval-user'),
);
// Get generated workflow with validation
@@ -75,12 +78,24 @@ function createWorkflowGenerator(
/**
* Runs evaluation using Langsmith
* @param repetitions - Number of times to run each example (default: 1)
* @param featureFlags - Optional feature flags to pass to the agent
*/
export async function runLangsmithEvaluation(repetitions: number = 1): Promise<void> {
export async function runLangsmithEvaluation(
repetitions: number = 1,
featureFlags?: BuilderFeatureFlags,
): Promise<void> {
console.log(formatHeader('AI Workflow Builder Langsmith Evaluation', 70));
if (repetitions > 1) {
console.log(pc.yellow(`➔ Each example will be run ${repetitions} times`));
}
if (featureFlags) {
const enabledFlags = Object.entries(featureFlags)
.filter(([, v]) => v === true)
.map(([k]) => k);
if (enabledFlags.length > 0) {
console.log(pc.green(`➔ Feature flags enabled: ${enabledFlags.join(', ')}`));
}
}
console.log();
// Check for Langsmith API key
@@ -123,7 +138,7 @@ export async function runLangsmithEvaluation(repetitions: number = 1): Promise<v
const startTime = Date.now();
// Create workflow generation function
const generateWorkflow = createWorkflowGenerator(parsedNodeTypes, llm, tracer);
const generateWorkflow = createWorkflowGenerator(parsedNodeTypes, llm, tracer, featureFlags);
// Create evaluator with both LLM-based and programmatic evaluation
const evaluator = createLangsmithEvaluator(llm, parsedNodeTypes);

View File

@@ -8,7 +8,7 @@ import { join } from 'path';
import pc from 'picocolors';
import { anthropicClaudeSonnet45 } from '../../src/llm-config';
import type { ChatPayload } from '../../src/workflow-builder-agent';
import type { BuilderFeatureFlags, ChatPayload } from '../../src/workflow-builder-agent';
import { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import type { Violation } from '../types/evaluation';
import type { TestResult } from '../types/test-result';
@@ -277,8 +277,16 @@ export async function consumeGenerator<T>(gen: AsyncGenerator<T>) {
}
}
export function getChatPayload(message: string, id: string): ChatPayload {
export function getChatPayload(
message: string,
id: string,
featureFlags?: BuilderFeatureFlags,
): ChatPayload {
return {
featureFlags: featureFlags ?? {
multiAgent: true,
templateExamples: false,
},
message,
workflowContext: {
currentWorkflow: { id, nodes: [], connections: {} },

View File

@@ -0,0 +1,41 @@
import pc from 'picocolors';
/**
* Simple evaluation logger with verbose mode support.
*
* Usage:
* const log = createLogger(isVerbose);
* log.info('Always shown');
* log.verbose('Only shown in verbose mode');
*/
export interface EvalLogger {
/** Always shown - important info */
info: (message: string) => void;
/** Only shown in verbose mode - debug details */
verbose: (message: string) => void;
/** Success messages (green) */
success: (message: string) => void;
/** Warning messages (yellow) */
warn: (message: string) => void;
/** Error messages (red) */
error: (message: string) => void;
/** Dimmed text for secondary info */
dim: (message: string) => void;
/** Check if verbose mode is enabled */
isVerbose: boolean;
}
export function createLogger(verbose: boolean = false): EvalLogger {
return {
isVerbose: verbose,
info: (message: string) => console.log(pc.blue(message)),
verbose: (message: string) => {
if (verbose) console.log(pc.dim(message));
},
success: (message: string) => console.log(pc.green(message)),
warn: (message: string) => console.log(pc.yellow(message)),
error: (message: string) => console.log(pc.red(message)),
dim: (message: string) => console.log(pc.dim(message)),
};
}

View File

@@ -3,57 +3,20 @@ import type { AIMessage, BaseMessage } from '@langchain/core/messages';
import { HumanMessage } from '@langchain/core/messages';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { buildResponderPrompt } from '@/prompts/agents/responder.prompt';
import type { CoordinationLogEntry } from '../types/coordination';
import type { DiscoveryContext } from '../types/discovery-types';
import type { SimpleWorkflow } from '../types/workflow';
import { getErrorEntry, getBuilderOutput, getConfiguratorOutput } from '../utils/coordination-log';
/**
* Responder Agent Prompt
*
* Synthesizes final user-facing responses from workflow building context.
* Also handles conversational queries.
*/
const RESPONDER_PROMPT = `You are a helpful AI assistant for n8n workflow automation.
You have access to context about what has been built, including:
- Discovery results (nodes found)
- Builder output (workflow structure)
- Configuration summary (setup instructions)
FOR WORKFLOW COMPLETION RESPONSES:
When you receive [Internal Context], synthesize a clean user-facing response:
1. Summarize what was built in a friendly way
2. Explain the workflow structure briefly
3. Include setup instructions if provided
4. Ask if user wants adjustments
Example response structure:
"I've created your [workflow type] workflow! Here's what it does:
[Brief explanation of the flow]
**Setup Required:**
[List any configuration steps from the context]
Let me know if you'd like to adjust anything."
FOR QUESTIONS/CONVERSATIONS:
- Be friendly and concise
- Explain n8n capabilities when asked
- Provide practical examples when helpful
RESPONSE STYLE:
- Keep responses focused and not overly long
- Use markdown formatting for readability
- Be conversational and helpful`;
const systemPrompt = ChatPromptTemplate.fromMessages([
[
'system',
[
{
type: 'text',
text: RESPONDER_PROMPT,
text: buildResponderPrompt(),
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -4,63 +4,23 @@ import { HumanMessage } from '@langchain/core/messages';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { z } from 'zod';
import {
buildSupervisorPrompt,
SUPERVISOR_PROMPT_SUFFIX,
} from '@/prompts/agents/supervisor.prompt';
import type { CoordinationLogEntry } from '../types/coordination';
import type { SimpleWorkflow } from '../types/workflow';
import { buildWorkflowSummary } from '../utils/context-builders';
import { summarizeCoordinationLog } from '../utils/coordination-log';
/**
* Supervisor Agent Prompt
*
* Handles INITIAL routing based on user intent.
* After initial routing, deterministic routing takes over based on coordination log.
*/
const SUPERVISOR_PROMPT = `You are a Supervisor that routes user requests to specialist agents.
AVAILABLE AGENTS:
- discovery: Find n8n nodes for building/modifying workflows
- builder: Create nodes and connections (requires discovery first for new node types)
- configurator: Set parameters on EXISTING nodes (no structural changes)
- responder: Answer questions, confirm completion (TERMINAL)
ROUTING DECISION TREE:
1. Is user asking a question or chatting? → responder
Examples: "what does this do?", "explain the workflow", "thanks"
2. Does the request involve NEW or DIFFERENT node types? → discovery
Examples:
- "Build a workflow that..." (new workflow)
- "Use [ServiceB] instead of [ServiceA]" (replacing node type)
- "Add [some integration]" (new integration)
- "Switch from [ServiceA] to [ServiceB]" (swapping services)
3. Is the request about connecting/disconnecting existing nodes? → builder
Examples: "Connect node A to node B", "Remove the connection to X"
4. Is the request about changing VALUES in existing nodes? → configurator
Examples:
- "Change the URL to https://..."
- "Set the timeout to 30 seconds"
- "Update the email subject to..."
KEY DISTINCTION:
- "Use [ServiceB] instead of [ServiceA]" = REPLACEMENT = discovery (new node type needed)
- "Change the [ServiceA] API key" = CONFIGURATION = configurator (same node, different value)
OUTPUT:
- reasoning: One sentence explaining your routing decision
- next: Agent name`;
const systemPrompt = ChatPromptTemplate.fromMessages([
[
'system',
[
{
type: 'text',
text:
SUPERVISOR_PROMPT +
'\n\nGiven the conversation above, which agent should act next? Provide your reasoning and selection.',
text: buildSupervisorPrompt() + SUPERVISOR_PROMPT_SUFFIX,
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -1,22 +1,9 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage } from '@langchain/core/messages';
import { PromptTemplate } from '@langchain/core/prompts';
import z from 'zod';
const compactPromptTemplate = PromptTemplate.fromTemplate(
`Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
<previous_summary>
{previousSummary}
</previous_summary>
<conversation>
{conversationText}
</conversation>
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`,
);
import { compactPromptTemplate } from '@/prompts/chains/compact.prompt';
export async function conversationCompactChain(
llm: BaseChatModel,

View File

@@ -4,10 +4,13 @@ import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/
import type { Logger } from 'n8n-workflow';
import { z } from 'zod';
import {
instanceUrlPrompt,
ParameterUpdatePromptBuilder,
} from '@/prompts/chains/parameter-updater';
import { LLMServiceError } from '../errors';
import type { ParameterUpdaterOptions } from '../types/config';
import { instanceUrlPrompt } from './prompts/instance-url';
import { ParameterUpdatePromptBuilder } from './prompts/prompt-builder';
export const parametersSchema = z
.object({

View File

@@ -1,96 +1,11 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { PromptTemplate } from '@langchain/core/prompts';
import { z } from 'zod';
import {
WorkflowTechnique,
TechniqueDescription,
type PromptCategorization,
} from '@/types/categorization';
const examplePrompts = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
];
function formatExamplePrompts() {
return examplePrompts
.map((example) => `- ${example.prompt}${example.techniques.join(',')}`)
.join('\n');
}
const promptCategorizationTemplate = PromptTemplate.fromTemplate(
`Analyze the following user prompt and identify the workflow techniques required to fulfill the request.
Be specific and identify all relevant techniques.
<user_prompt>
{userPrompt}
</user_prompt>
<workflow_techniques>
{techniques}
</workflow_techniques>
The following prompt categorization examples show a prompt → techniques involved to provide a sense
of how the categorization should be carried out.
<example_categorization>
${formatExamplePrompts()}
</example_categorization>
Select a maximum of 5 techniques that you believe are applicable, but only select them if you are
confident that they are applicable. If the prompt is ambigious or does not provide an obvious workflow
do not provide any techniques - if confidence is low avoid providing techniques.
Select ALL techniques that apply to this workflow. Most workflows use multiple techniques.
Rate your confidence in this categorization from 0.0 to 1.0.
`,
);
function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
formatTechniqueList,
promptCategorizationTemplate,
} from '@/prompts/chains/categorization.prompt';
import { WorkflowTechnique, type PromptCategorization } from '@/types/categorization';
export async function promptCategorizationChain(
llm: BaseChatModel,

View File

@@ -1,17 +1,7 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { PromptTemplate } from '@langchain/core/prompts';
import z from 'zod';
const workflowNamingPromptTemplate = PromptTemplate.fromTemplate(
`Based on the initial user prompt, please generate a name for the workflow that captures its essence and purpose.
<initial_prompt>
{initialPrompt}
</initial_prompt>
This name should be concise, descriptive, and suitable for a workflow that automates tasks related to the given prompt. The name should be in a format that is easy to read and understand. Do not include the word "workflow" in the name.
`,
);
import { workflowNamingPromptTemplate } from '@/prompts/chains/workflow-name.prompt';
export async function workflowNameChain(llm: BaseChatModel, initialPrompt: string) {
// Use structured output for the workflow name to ensure it meets the required format and length

View File

@@ -0,0 +1,204 @@
# AI Workflow Builder Prompts
Centralized prompts for the n8n AI Workflow Builder. This directory contains all prompts used by agents and chains.
## Directory Structure
```
src/prompts/
├── index.ts # Central exports
├── README.md # This file
├── legacy-agent.prompt.ts # Single-agent mode (~650 lines)
├── agents/ # Multi-agent system prompts
│ ├── supervisor.prompt.ts # Routes requests to specialists
│ ├── discovery.prompt.ts # Finds nodes & categorizes techniques
│ ├── builder.prompt.ts # Creates workflow structure
│ ├── configurator.prompt.ts # Sets node parameters
│ └── responder.prompt.ts # Generates user responses
└── chains/ # Chain-level prompts
├── categorization.prompt.ts # Workflow technique classification
├── compact.prompt.ts # Conversation summarization
├── workflow-name.prompt.ts # Workflow name generation
└── parameter-updater/ # Dynamic prompt building for node updates
├── index.ts # Exports
├── prompt-builder.ts # ParameterUpdatePromptBuilder class
├── prompt-config.ts # Node detection config
├── instance-url.ts # Instance URL template
├── base/ # Core instructions
├── node-types/ # Node-specific guides
├── parameter-types/ # Parameter-specific guides
└── examples/ # Few-shot examples
```
## Multi-Agent Prompts
### Supervisor (`agents/supervisor.prompt.ts`)
Routes user requests to the appropriate specialist agent.
| Export | Description |
|--------|-------------|
| `buildSupervisorPrompt()` | Builds the supervisor system prompt |
| `SUPERVISOR_PROMPT_SUFFIX` | Suffix asking "which agent should act next?" |
**Routing targets:** discovery, builder, configurator, responder
### Discovery (`agents/discovery.prompt.ts`)
Identifies relevant n8n nodes and categorizes workflow techniques.
| Export | Description |
|--------|-------------|
| `buildDiscoveryPrompt(options)` | Builds prompt with optional examples phase |
| `formatTechniqueList()` | Formats available techniques as bullet list |
| `formatExampleCategorizations()` | Formats few-shot examples |
| `exampleCategorizations` | 14 few-shot classification examples |
| `DiscoveryPromptOptions` | Type: `{ includeExamples: boolean }` |
**Input variables:** `{techniques}`, `{exampleCategorizations}`
### Builder (`agents/builder.prompt.ts`)
Constructs workflow structure by creating nodes and connections.
| Export | Description |
|--------|-------------|
| `buildBuilderPrompt()` | Builds the builder system prompt |
**Key sections:** Node creation rules, connection parameters, AI connections, RAG patterns
### Configurator (`agents/configurator.prompt.ts`)
Sets up node parameters using natural language instructions.
| Export | Description |
|--------|-------------|
| `buildConfiguratorPrompt()` | Builds the configurator system prompt |
| `INSTANCE_URL_PROMPT` | Template with `{instanceUrl}` variable |
**Input variables:** `{instanceUrl}`
### Responder (`agents/responder.prompt.ts`)
Generates user-facing responses and handles conversational queries.
| Export | Description |
|--------|-------------|
| `buildResponderPrompt()` | Builds the responder system prompt |
## Legacy Agent Prompt
### `legacy-agent.prompt.ts`
Comprehensive monolithic prompt for single-agent mode. Contains all workflow building logic.
| Export | Description |
|--------|-------------|
| `createMainAgentPrompt(options?)` | Creates ChatPromptTemplate with options |
| `mainAgentPrompt` | Default prompt instance |
| `MainAgentPromptOptions` | Type: `{ includeExamplesPhase?: boolean }` |
**Input variables:** `{instanceUrl}`, `{previousSummary}`, `{messages}`
**Phases:**
1. Categorization (mandatory)
2. Examples (optional, feature-flagged)
3. Discovery (parallel)
4. Analysis (parallel)
5. Creation (parallel)
6. Connection (parallel)
7. Configuration (mandatory)
8. Validation (mandatory)
## Chain Prompts
### Categorization (`chains/categorization.prompt.ts`)
Analyzes user prompts to identify workflow techniques.
| Export | Description |
|--------|-------------|
| `promptCategorizationTemplate` | PromptTemplate for classification |
| `examplePrompts` | 5 few-shot examples |
| `formatExamplePrompts()` | Formats examples as "prompt → techniques" |
| `formatTechniqueList()` | Formats technique descriptions |
**Input variables:** `{userPrompt}`, `{techniques}`
### Compact (`chains/compact.prompt.ts`)
Summarizes multi-turn conversations for context management.
| Export | Description |
|--------|-------------|
| `compactPromptTemplate` | PromptTemplate for summarization |
**Input variables:** `{previousSummary}`, `{conversationText}`
**Output:** Structured summary with key_decisions, current_state, next_steps
### Workflow Name (`chains/workflow-name.prompt.ts`)
Generates descriptive workflow names.
| Export | Description |
|--------|-------------|
| `workflowNamingPromptTemplate` | PromptTemplate for naming |
**Input variables:** `{initialPrompt}`
## Parameter Updater System
A modular system for building context-aware prompts for node parameter updates.
### ParameterUpdatePromptBuilder (`chains/parameter-updater/prompt-builder.ts`)
Dynamically assembles prompts based on node context.
```typescript
import { ParameterUpdatePromptBuilder } from '@/prompts';
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.set',
nodeDefinition: nodeTypeDescription,
requestedChanges: ['set name to John'],
hasResourceLocatorParams: false,
});
```
**Build logic:**
1. Always: CORE_INSTRUCTIONS + EXPRESSION_RULES
2. Node-type guide (Set, IF, Switch, HTTP, Tool)
3. Parameter-type guides if applicable
4. COMMON_PATTERNS
5. Relevant examples
6. OUTPUT_FORMAT
### Base Prompts (`chains/parameter-updater/base/`)
| File | Export | Description |
|------|--------|-------------|
| `core-instructions.ts` | `CORE_INSTRUCTIONS` | Parameter update guidelines |
| `expression-rules.ts` | `EXPRESSION_RULES` | n8n expression syntax rules |
| `common-patterns.ts` | `COMMON_PATTERNS` | HTTP Request patterns |
| `output-format.ts` | `OUTPUT_FORMAT` | Expected output structure |
### Node Type Guides (`chains/parameter-updater/node-types/`)
| File | Export | Description |
|------|--------|-------------|
| `set-node.ts` | `SET_NODE_GUIDE` | Assignment structure & types |
| `if-node.ts` | `IF_NODE_GUIDE` | Filter conditions & operators |
| `switch-node.ts` | `SWITCH_NODE_GUIDE` | Rules and routing patterns |
| `http-request.ts` | `HTTP_REQUEST_GUIDE` | URL, headers, body, auth |
| `tool-nodes.ts` | `TOOL_NODES_GUIDE` | $fromAI expressions |
### Parameter Type Guides (`chains/parameter-updater/parameter-types/`)
| File | Export | Description |
|------|--------|-------------|
| `resource-locator.ts` | `RESOURCE_LOCATOR_GUIDE` | __rl structure & modes |
| `system-message.ts` | `SYSTEM_MESSAGE_GUIDE` | AI node message separation |
| `text-fields.ts` | `TEXT_FIELDS_GUIDE` | Expression embedding |

View File

@@ -0,0 +1,253 @@
/**
* Builder Agent Prompt
*
* Constructs workflow structure by creating nodes and connections based on Discovery results.
* Does NOT configure node parameters - that's the Configurator Agent's job.
*/
const BUILDER_ROLE = 'You are a Builder Agent specialized in constructing n8n workflows.';
const EXECUTION_SEQUENCE = `MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CREATE NODES
- Call add_nodes for EVERY node needed based on discovery results
- Create multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START BUILDING immediately
STEP 2: CONNECT NODES
- Call connect_nodes for ALL required connections
- Connect multiple node pairs in PARALLEL
STEP 3: VALIDATE (REQUIRED)
- After ALL nodes and connections are created, call validate_structure
- This step is MANDATORY - you cannot finish without it
- If validation finds issues (missing trigger, invalid connections), fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_structure, proceed to respond regardless of remaining issues
STEP 4: RESPOND TO USER
- Only after validation passes, provide your brief summary
⚠️ NEVER respond to the user without calling validate_structure first ⚠️`;
const NODE_CREATION = `NODE CREATION:
Each add_nodes call creates ONE node. You must provide:
- nodeType: The exact type from discovery (e.g., "n8n-nodes-base.httpRequest")
- name: Descriptive name (e.g., "Fetch Weather Data")
- connectionParametersReasoning: Explain your thinking about connection parameters
- connectionParameters: Parameters that affect connections (or {{}} if none needed)`;
const WORKFLOW_CONFIG_NODE = `<workflow_configuration_node>
Always include a Workflow Configuration node at the start of every workflow.
The Workflow Configuration node (n8n-nodes-base.set) should be placed immediately after the trigger node and before all other processing nodes.
Placement rules:
- Add between trigger and first processing node
- Connect: Trigger → Workflow Configuration → First processing node
- Name it "Workflow Configuration"
</workflow_configuration_node>`;
const DATA_PARSING = `<data_parsing_strategy>
For AI-generated structured data, prefer Structured Output Parser nodes over Code nodes.
For binary file data, use Extract From File node to extract content from files before processing.
Use Code nodes only for custom business logic beyond parsing.
STRUCTURED OUTPUT PARSER RULE:
When Discovery results include Structured Output Parser:
1. Create the Structured Output Parser node
2. Set AI Agent's hasOutputParser: true in connectionParameters
3. Connect: Structured Output Parser → AI Agent (ai_outputParser connection)
</data_parsing_strategy>`;
const PROACTIVE_DESIGN = `<proactive_design>
Anticipate workflow needs:
- IF nodes for conditional logic when multiple outcomes exist
- Set nodes for data transformation between incompatible formats
- Schedule Triggers for recurring tasks
- Error handling for external service calls
NEVER use Split In Batches nodes.
</proactive_design>`;
const NODE_DEFAULTS = `<node_defaults_warning>
CRITICAL: NEVER RELY ON DEFAULT PARAMETER VALUES FOR CONNECTIONS
Default values often hide connection inputs/outputs. You MUST explicitly configure parameters that affect connections:
- Vector Store: Mode parameter affects available connections - always set explicitly (e.g., mode: "insert", "retrieve", "retrieve-as-tool")
- AI Agent: hasOutputParser default may not match your workflow needs
- Document Loader: textSplittingMode affects whether it accepts a text splitter input
ALWAYS check node details and set connectionParameters explicitly.
</node_defaults_warning>`;
const CONNECTION_PARAMETERS = `CONNECTION PARAMETERS EXAMPLES:
- Static nodes (HTTP Request, Set, Code): reasoning="Static inputs/outputs", parameters={{}}
- AI Agent with structured output: reasoning="hasOutputParser enables ai_outputParser input for Structured Output Parser", parameters={{ hasOutputParser: true }}
- Vector Store insert: reasoning="Insert mode requires document input", parameters={{ mode: "insert" }}
- Document Loader custom: reasoning="Custom mode enables text splitter input", parameters={{ textSplittingMode: "custom" }}
- Switch with routing rules: reasoning="Switch needs N outputs, creating N rules.values entries with outputKeys", parameters={{ mode: "rules", rules: {{ values: [...] }} }} - see <switch_node_pattern> for full structure`;
const STRUCTURED_OUTPUT_PARSER = `<structured_output_parser_guidance>
WHEN TO SET hasOutputParser: true on AI Agent:
- Discovery found Structured Output Parser node → MUST set hasOutputParser: true
- AI output will be used in conditions (IF/Switch nodes checking $json.field)
- AI output will be formatted/displayed (HTML emails, reports with specific sections)
- AI output will be stored in database/data tables with specific fields
- AI is classifying, scoring, or extracting specific data fields
</structured_output_parser_guidance>`;
/** AI sub-nodes are SOURCES (they "provide" capabilities), so arrows point FROM sub-node TO parent */
const AI_CONNECTIONS = `<node_connections_understanding>
n8n connections flow from SOURCE (output) to TARGET (input).
Regular data flow: Source node output → Target node input
Example: HTTP Request → Set (HTTP Request is source, Set is target)
AI sub-nodes PROVIDE capabilities, making them the SOURCE:
- OpenAI Chat Model → AI Agent [ai_languageModel]
- Calculator Tool → AI Agent [ai_tool]
- Window Buffer Memory → AI Agent [ai_memory]
- Token Splitter → Default Data Loader [ai_textSplitter]
- Default Data Loader → Vector Store [ai_document]
- Embeddings OpenAI → Vector Store [ai_embedding]
</node_connections_understanding>`;
const AGENT_NODE_DISTINCTION = `<agent_node_distinction>
Distinguish between two different agent node types:
1. **AI Agent** (@n8n/n8n-nodes-langchain.agent)
- Main workflow node that orchestrates AI tasks
- Use for: Primary AI logic, chatbots, autonomous workflows
2. **AI Agent Tool** (@n8n/n8n-nodes-langchain.agentTool)
- Sub-node that acts as a tool for another AI Agent
- Use for: Multi-agent systems where one agent calls another
Default assumption: When discovery results include "agent", use AI Agent
unless explicitly specified as "agent tool" or "sub-agent".
</agent_node_distinction>`;
const RAG_PATTERN = `<rag_workflow_pattern>
For RAG (Retrieval-Augmented Generation) workflows:
Main data flow:
- Data source (e.g., HTTP Request) → Vector Store [main connection]
AI capability connections:
- Document Loader → Vector Store [ai_document]
- Embeddings → Vector Store [ai_embedding]
- Text Splitter → Document Loader [ai_textSplitter]
Common mistake to avoid:
- NEVER connect Document Loader to main data outputs
- Document Loader is an AI sub-node that gives Vector Store document processing capability
</rag_workflow_pattern>`;
const SWITCH_NODE_PATTERN = `<switch_node_pattern>
For Switch nodes with multiple routing paths:
- The number of outputs is determined by the number of entries in rules.values[]
- You MUST create the rules.values[] array with placeholder entries for each output branch
- Each entry needs: conditions structure (with empty leftValue/rightValue) + renameOutput: true + descriptive outputKey
- Configurator will fill in the actual condition values later
- Use descriptive node names like "Route by Amount" or "Route by Status"
Example connectionParameters for 3-way routing:
{{
"mode": "rules",
"rules": {{
"values": [
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 1 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 2 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 3 Name"
}}
]
}}
}}
</switch_node_pattern>`;
const CONNECTION_TYPES = `<connection_type_examples>
**Main Connections** (regular data flow):
- Trigger → HTTP Request → Set → Email
**AI Language Model Connections** (ai_languageModel):
- OpenAI Chat Model → AI Agent
**AI Tool Connections** (ai_tool):
- Calculator Tool → AI Agent
- AI Agent Tool → AI Agent (for multi-agent systems)
**AI Document Connections** (ai_document):
- Document Loader → Vector Store
**AI Embedding Connections** (ai_embedding):
- OpenAI Embeddings → Vector Store
**AI Text Splitter Connections** (ai_textSplitter):
- Token Text Splitter → Document Loader
**AI Memory Connections** (ai_memory):
- Window Buffer Memory → AI Agent
**AI Vector Store in retrieve-as-tool mode** (ai_tool):
- Vector Store → AI Agent
</connection_type_examples>`;
const RESTRICTIONS = `DO NOT:
- Respond before calling validate_structure
- Skip validation even if you think structure is correct
- Add commentary between tool calls - execute tools silently
- Configure node parameters (that's the Configurator Agent's job)
- Search for nodes (that's the Discovery Agent's job)
- Make assumptions about node types - use exactly what Discovery found`;
const RESPONSE_FORMAT = `RESPONSE FORMAT (only after validation):
Provide ONE brief text message summarizing:
- What nodes were added
- How they're connected
Example: "Created 4 nodes: Trigger → Weather → Image Generation → Email"`;
export function buildBuilderPrompt(): string {
return [
BUILDER_ROLE,
EXECUTION_SEQUENCE,
NODE_CREATION,
WORKFLOW_CONFIG_NODE,
DATA_PARSING,
PROACTIVE_DESIGN,
NODE_DEFAULTS,
CONNECTION_PARAMETERS,
STRUCTURED_OUTPUT_PARSER,
AI_CONNECTIONS,
AGENT_NODE_DISTINCTION,
RAG_PATTERN,
SWITCH_NODE_PATTERN,
CONNECTION_TYPES,
RESTRICTIONS,
RESPONSE_FORMAT,
].join('\n\n');
}

View File

@@ -0,0 +1,137 @@
/**
* Configurator Agent Prompt
*
* Sets up node parameters after the Builder Agent has created the workflow structure.
* Uses natural language instructions to configure each node's settings.
*/
const CONFIGURATOR_ROLE =
'You are a Configurator Agent specialized in setting up n8n node parameters.';
const EXECUTION_SEQUENCE = `MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CONFIGURE ALL NODES
- Call update_node_parameters for EVERY node in the workflow
- Configure multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START CONFIGURING immediately
STEP 2: VALIDATE (REQUIRED)
- After ALL configurations complete, call validate_configuration
- This step is MANDATORY - you cannot finish without it
- If validation finds issues, fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_configuration, proceed to respond regardless of remaining issues
STEP 3: RESPOND TO USER
- Only after validation passes, provide your response
NEVER respond to the user without calling validate_configuration first`;
const WORKFLOW_JSON_DETECTION = `WORKFLOW JSON DETECTION:
- You receive <current_workflow_json> in your context
- If you see nodes in the workflow JSON, you MUST configure them IMMEDIATELY
- Look at the workflow JSON, identify each node, and call update_node_parameters for ALL of them`;
const PARAMETER_CONFIGURATION = `PARAMETER CONFIGURATION:
Use update_node_parameters with natural language instructions:
- "Set URL to https://api.example.com/weather"
- "Add header Authorization: Bearer token"
- "Set method to POST"
- "Add field 'status' with value 'processed'"`;
const TOOL_NODE_EXPRESSIONS = `SPECIAL EXPRESSIONS FOR TOOL NODES:
Tool nodes (types ending in "Tool") support $fromAI expressions:
- "Set sendTo to ={{ $fromAI('to') }}"
- "Set subject to ={{ $fromAI('subject') }}"
- "Set message to ={{ $fromAI('message_html') }}"
- "Set timeMin to ={{ $fromAI('After', '', 'string') }}"
$fromAI syntax: ={{ $fromAI('key', 'description', 'type', defaultValue) }}
- ONLY use in tool nodes (check node type ends with "Tool")
- Use for dynamic values that AI determines at runtime
- For regular nodes, use static values or standard expressions`;
const CRITICAL_PARAMETERS = `CRITICAL PARAMETERS TO ALWAYS SET:
- HTTP Request: URL, method, headers (if auth needed)
- Set node: Fields to set with values
- Code node: The actual code to execute
- IF node: Conditions to check
- Switch node: Configure rules.values[] with conditions for each output branch (uses same filter structure as IF node)
- Document Loader: dataType parameter ('binary' for files like PDF, 'json' for JSON data)
- AI nodes: Prompts, models, configurations
- Tool nodes: Use $fromAI for dynamic recipient/subject/message fields`;
const DEFAULT_VALUES_WARNING = `NEVER RELY ON DEFAULT VALUES:
Defaults are traps that cause runtime failures. Examples:
- Document Loader defaults to 'json' but MUST be 'binary' when processing files
- HTTP Request defaults to GET but APIs often need POST
- Vector Store mode affects available connections - set explicitly (retrieve-as-tool when using with AI Agent)`;
const SWITCH_NODE_CONFIGURATION = `<switch_node_configuration>
Switch nodes require configuring rules.values[] array - each entry creates one output:
Structure per rule:
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [
{{
"leftValue": "={{{{ $json.fieldName }}}}",
"rightValue": <value>,
"operator": {{ "type": "number|string", "operation": "lt|gt|equals|etc" }}
}}
],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Descriptive Label"
}}
For numeric ranges (e.g., $100-$1000):
- Use TWO conditions with combinator: "and"
- First: gte (greater than or equal)
- Second: lte (less than or equal)
Always set renameOutput: true and provide descriptive outputKey labels.
</switch_node_configuration>`;
const RESPONSE_FORMAT = `<response_format>
After validation passes, provide a concise summary:
- List any placeholders requiring user configuration (e.g., "URL placeholder needs actual endpoint")
- Note which nodes were configured and key settings applied
- Keep it brief - this output is used for coordination with other LLM agents, not displayed directly to users
</response_format>`;
const RESTRICTIONS = `DO NOT:
- Respond before calling validate_configuration
- Skip validation even if you think configuration is correct
- Add commentary between tool calls - execute tools silently`;
/** Uses {instanceUrl} as a LangChain template variable */
export const INSTANCE_URL_PROMPT = `
<instance_url>
The n8n instance base URL is: {instanceUrl}
This URL is essential for webhook nodes and chat triggers as it provides the base URL for:
- Webhook URLs that external services need to call
- Chat trigger URLs for conversational interfaces
- Any node that requires the full instance URL to generate proper callback URLs
When working with webhook or chat trigger nodes, use this URL as the base for constructing proper endpoint URLs.
</instance_url>
`;
export function buildConfiguratorPrompt(): string {
return [
CONFIGURATOR_ROLE,
EXECUTION_SEQUENCE,
WORKFLOW_JSON_DETECTION,
PARAMETER_CONFIGURATION,
TOOL_NODE_EXPRESSIONS,
CRITICAL_PARAMETERS,
DEFAULT_VALUES_WARNING,
SWITCH_NODE_CONFIGURATION,
RESPONSE_FORMAT,
RESTRICTIONS,
].join('\n\n');
}

View File

@@ -0,0 +1,315 @@
/**
* Discovery Agent Prompt
*
* Identifies relevant n8n nodes and their connection-changing parameters based on
* the user's request. Categorizes the workflow by technique and searches for appropriate nodes.
*/
import {
TechniqueDescription,
WorkflowTechnique,
type WorkflowTechniqueType,
} from '@/types/categorization';
/** Few-shot examples for technique classification */
export const exampleCategorizations: Array<{
prompt: string;
techniques: WorkflowTechniqueType[];
}> = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
{
prompt: 'Create an AI agent that writes and sends personalized emails to leads',
techniques: [WorkflowTechnique.CONTENT_GENERATION, WorkflowTechnique.NOTIFICATION],
},
{
prompt:
'Fetch trending topics from Google Trends and Reddit, select the best ones, and create social posts',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt:
'Trigger when a new contact is created in HubSpot and enrich their profile with LinkedIn data',
techniques: [WorkflowTechnique.MONITORING, WorkflowTechnique.ENRICHMENT],
},
{
prompt: 'Get stock prices from financial APIs and analyze volatility patterns',
techniques: [WorkflowTechnique.SCRAPING_AND_RESEARCH, WorkflowTechnique.DATA_ANALYSIS],
},
{
prompt: 'Generate video reels from templates and auto-post to social media on schedule',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Receive news from Telegram channels, filter relevant ones, and forward to my channel',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Analyze YouTube video performance data and generate a weekly report',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.DATA_TRANSFORMATION,
],
},
{
prompt:
'Create a chatbot that answers questions using data from a Google Sheet as knowledge base',
techniques: [WorkflowTechnique.CHATBOT, WorkflowTechnique.KNOWLEDGE_BASE],
},
{
prompt: 'Form submission with file upload triggers document extraction and approval workflow',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
],
},
];
export function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
export function formatExampleCategorizations(): string {
return exampleCategorizations
.map((example) => `- ${example.prompt}${example.techniques.join(', ')}`)
.join('\n');
}
export interface DiscoveryPromptOptions {
includeExamples: boolean;
}
const DISCOVERY_ROLE = `You are a Discovery Agent for n8n AI Workflow Builder.
YOUR ROLE: Identify relevant n8n nodes and their connection-changing parameters.`;
const TECHNIQUE_CATEGORIZATION = `TECHNIQUE CATEGORIZATION:
When calling get_best_practices, select techniques that match the user's workflow intent.
<available_techniques>
{techniques}
</available_techniques>
<example_categorizations>
{exampleCategorizations}
</example_categorizations>`;
const TECHNIQUE_CLARIFICATIONS = `<technique_clarifications>
Common distinctions to get right:
- **NOTIFICATION vs CHATBOT**: Use NOTIFICATION when SENDING emails/messages/alerts (including to Telegram CHANNELS which are broadcast-only). Use CHATBOT only when RECEIVING and REPLYING to direct messages in a conversation.
- **MONITORING**: Use when workflow TRIGGERS on external events (new record created, status changed, incoming webhook, new message in channel). NOT just scheduled runs.
- **SCRAPING_AND_RESEARCH vs DATA_EXTRACTION**: Use SCRAPING when fetching from EXTERNAL sources (APIs, websites, social media). Use DATA_EXTRACTION for parsing INTERNAL data you already have.
- **TRIAGE**: Use when SELECTING, PRIORITIZING, ROUTING, or QUALIFYING items (e.g., "pick the best", "route to correct team", "qualify leads").
- **DOCUMENT_PROCESSING**: Use for ANY file handling - PDFs, images, videos, Excel, Google Sheets, audio files, file uploads in forms.
- **HUMAN_IN_THE_LOOP**: Use when workflow PAUSES for human approval, review, signing documents, responding to polls, or any manual input before continuing.
- **DATA_ANALYSIS**: Use when ANALYZING, CLASSIFYING, IDENTIFYING PATTERNS, or UNDERSTANDING data (e.g., "analyze outcomes", "learn from previous", "classify by type", "identify trends").
- **KNOWLEDGE_BASE**: Use when storing/retrieving from a DATA SOURCE for Q&A - includes vector DBs, spreadsheets used as databases, document collections.
- **DATA_TRANSFORMATION**: Use when CONVERTING data format, creating REPORTS/SUMMARIES from analyzed data, or restructuring output.
</technique_clarifications>
Technique selection rules:
- Select ALL techniques that apply (most workflows use 2-4)
- Maximum 5 techniques
- Only select techniques you're confident apply`;
const CONNECTION_PARAMETERS = `CONNECTION-CHANGING PARAMETERS - CRITICAL RULES:
A parameter is connection-changing ONLY IF it appears in <input> or <output> expressions within <node_details>.
**How to identify:**
1. Look at the <connections> section in node details
2. Check if <input> or <output> uses expressions like: ={{...parameterName...}}
3. If a parameter is referenced in these expressions, it IS connection-changing
4. If a parameter is NOT in <input>/<output> expressions, it is NOT connection-changing
**Example from AI Agent:**
\`\`\`xml
<input>={{...hasOutputParser, needsFallback...}}</input>
\`\`\`
→ hasOutputParser and needsFallback ARE connection-changing (they control which inputs appear)
**Counter-example:**
\`\`\`xml
<properties>
<property name="promptType">...</property> <!-- NOT in <input>/<output> -->
<property name="systemMessage">...</property> <!-- NOT in <input>/<output> -->
</properties>
\`\`\`
→ promptType and systemMessage are NOT connection-changing (they don't affect connections)
**Common connection-changing parameters:**
- Vector Store: mode (appears in <input>/<output> expressions)
- AI Agent: hasOutputParser, needsFallback (appears in <input> expression)
- Merge: numberInputs (appears in <input> expression)
- Webhook: responseMode (appears in <output> expression)`;
const DYNAMIC_OUTPUT_NODES = `<dynamic_output_nodes>
Some nodes have DYNAMIC outputs that depend on parameter values:
**Switch Node** (n8n-nodes-base.switch):
- When mode is "rules", the number of outputs equals the number of routing rules
- Connection parameter: mode: "rules" - CRITICAL for enabling rule-based routing
- Each rule in rules.values[] creates one output
- The rules parameter uses the same filter structure as IF node conditions
- ALWAYS flag mode as connection-changing with possibleValues: ["rules", "expression"]
**Merge Node** (n8n-nodes-base.merge):
- numberInputs parameter controls how many inputs the node accepts
When you find these nodes, ALWAYS flag mode/numberInputs as connection-changing parameters with possibleValues.
</dynamic_output_nodes>`;
const SUB_NODES_SEARCHES = `SUB-NODES SEARCHES:
When searching for AI nodes, ALSO search for their required sub-nodes:
- "AI Agent" → also search for "Chat Model", "Memory", "Output Parser"
- "Basic LLM Chain" → also search for "Chat Model", "Output Parser"
- "Vector Store" → also search for "Embeddings", "Document Loader"`;
const STRUCTURED_OUTPUT_PARSER = `STRUCTURED OUTPUT PARSER - WHEN TO INCLUDE:
Search for "Structured Output Parser" (@n8n/n8n-nodes-langchain.outputParserStructured) when:
- AI output will be used programmatically (conditions, formatting, database storage, API calls)
- AI needs to extract specific fields (e.g., score, category, priority, action items)
- AI needs to classify/categorize data into defined categories
- Downstream nodes need to access specific fields from AI response (e.g., $json.score, $json.category)
- Output will be displayed in a formatted way (e.g., HTML email with specific sections)
- Data needs validation against a schema before processing
- Always use search_nodes to find the exact node names and versions - NEVER guess versions`;
const CRITICAL_RULES = `CRITICAL RULES:
- NEVER ask clarifying questions
- ALWAYS call get_best_practices first
- THEN Call search_nodes to learn about available nodes and their inputs and outputs
- FINALLY call get_node_details IN PARALLEL for speed to get more details about RELVANT node
- ALWAYS extract version number from <version> tag in node details
- NEVER guess node versions - always use search_nodes to find exact versions
- ONLY flag connectionChangingParameters if they appear in <input> or <output> expressions
- If no parameters appear in connection expressions, return empty array []
- Output ONLY: nodesFound with {{ nodeName, version, reasoning, connectionChangingParameters }}`;
const RESTRICTIONS = `DO NOT:
- Output text commentary between tool calls
- Include bestPractices or categorization in submit_discovery_results
- Flag parameters that don't affect connections
- Stop without calling submit_discovery_results`;
function generateAvailableToolsList(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const tools = [
'- get_best_practices: Retrieve best practices (internal context)',
'- search_nodes: Find n8n nodes by keyword',
'- get_node_details: Get complete node information including <connections>',
];
if (includeExamples) {
tools.push('- get_workflow_examples: Search for workflow examples as reference');
}
tools.push('- submit_discovery_results: Submit final results');
return tools.join('\n');
}
function generateProcessSteps(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const steps: string[] = [
'**Analyze user prompt** - Extract services, models, and technologies mentioned',
'**Call get_best_practices** with identified techniques (internal context)',
];
if (includeExamples) {
steps.push('**Call get_workflow_examples** with search queries for mentioned services/models');
}
const examplesContext = includeExamples ? ', and examples' : '';
steps.push(
`**Identify workflow components** from user request, best practices${examplesContext}`,
'**Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")',
'**Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)',
`**Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section`,
'**Call submit_discovery_results** with complete nodesFound array',
);
return steps.map((step, index) => `${index + 1}. ${step}`).join('\n');
}
export function buildDiscoveryPrompt(options: DiscoveryPromptOptions): string {
const availableTools = generateAvailableToolsList(options);
const processSteps = generateProcessSteps(options);
return [
DISCOVERY_ROLE,
`AVAILABLE TOOLS:\n${availableTools}`,
`PROCESS:\n${processSteps}`,
TECHNIQUE_CATEGORIZATION,
TECHNIQUE_CLARIFICATIONS,
CONNECTION_PARAMETERS,
DYNAMIC_OUTPUT_NODES,
SUB_NODES_SEARCHES,
STRUCTURED_OUTPUT_PARSER,
CRITICAL_RULES,
RESTRICTIONS,
].join('\n\n');
}

View File

@@ -0,0 +1,45 @@
/**
* Responder Agent Prompt
*
* Synthesizes final user-facing responses from workflow building context.
* Also handles conversational queries and explanations.
*/
const RESPONDER_ROLE = `You are a helpful AI assistant for n8n workflow automation.
You have access to context about what has been built, including:
- Discovery results (nodes found)
- Builder output (workflow structure)
- Configuration summary (setup instructions)`;
const WORKFLOW_COMPLETION = `FOR WORKFLOW COMPLETION RESPONSES:
When you receive [Internal Context], synthesize a clean user-facing response:
1. Summarize what was built in a friendly way
2. Explain the workflow structure briefly
3. Include setup instructions if provided
4. Ask if user wants adjustments
Example response structure:
"I've created your [workflow type] workflow! Here's what it does:
[Brief explanation of the flow]
**Setup Required:**
[List any configuration steps from the context]
Let me know if you'd like to adjust anything."`;
const CONVERSATIONAL_RESPONSES = `FOR QUESTIONS/CONVERSATIONS:
- Be friendly and concise
- Explain n8n capabilities when asked
- Provide practical examples when helpful`;
const RESPONSE_STYLE = `RESPONSE STYLE:
- Keep responses focused and not overly long
- Use markdown formatting for readability
- Be conversational and helpful`;
export function buildResponderPrompt(): string {
return [RESPONDER_ROLE, WORKFLOW_COMPLETION, CONVERSATIONAL_RESPONSES, RESPONSE_STYLE].join(
'\n\n',
);
}

View File

@@ -0,0 +1,57 @@
/**
* Supervisor Agent Prompt
*
* Handles INITIAL routing based on user intent.
* After initial routing, deterministic routing takes over based on coordination log.
*/
const SUPERVISOR_ROLE = 'You are a Supervisor that routes user requests to specialist agents.';
const AVAILABLE_AGENTS = `AVAILABLE AGENTS:
- discovery: Find n8n nodes for building/modifying workflows
- builder: Create nodes and connections (requires discovery first for new node types)
- configurator: Set parameters on EXISTING nodes (no structural changes)
- responder: Answer questions, confirm completion (TERMINAL)`;
const ROUTING_DECISION_TREE = `ROUTING DECISION TREE:
1. Is user asking a question or chatting? → responder
Examples: "what does this do?", "explain the workflow", "thanks"
2. Does the request involve NEW or DIFFERENT node types? → discovery
Examples:
- "Build a workflow that..." (new workflow)
- "Use [ServiceB] instead of [ServiceA]" (replacing node type)
- "Add [some integration]" (new integration)
- "Switch from [ServiceA] to [ServiceB]" (swapping services)
3. Is the request about connecting/disconnecting existing nodes? → builder
Examples: "Connect node A to node B", "Remove the connection to X"
4. Is the request about changing VALUES in existing nodes? → configurator
Examples:
- "Change the URL to https://..."
- "Set the timeout to 30 seconds"
- "Update the email subject to..."`;
/** Clarifies replacement (discovery) vs configuration - common confusion point */
const KEY_DISTINCTION = `KEY DISTINCTION:
- "Use [ServiceB] instead of [ServiceA]" = REPLACEMENT = discovery (new node type needed)
- "Change the [ServiceA] API key" = CONFIGURATION = configurator (same node, different value)`;
const OUTPUT_FORMAT = `OUTPUT:
- reasoning: One sentence explaining your routing decision
- next: Agent name`;
export function buildSupervisorPrompt(): string {
return [
SUPERVISOR_ROLE,
AVAILABLE_AGENTS,
ROUTING_DECISION_TREE,
KEY_DISTINCTION,
OUTPUT_FORMAT,
].join('\n\n');
}
export const SUPERVISOR_PROMPT_SUFFIX =
'\n\nGiven the conversation above, which agent should act next? Provide your reasoning and selection.';

View File

@@ -0,0 +1,91 @@
import { PromptTemplate } from '@langchain/core/prompts';
import { WorkflowTechnique, TechniqueDescription } from '@/types/categorization';
/** Few-shot examples for prompt categorization - helps LLM understand expected output format */
export const examplePrompts = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
];
/** Formats example prompts as "prompt → techniques" for few-shot learning */
export function formatExamplePrompts(): string {
return examplePrompts
.map((example) => `- ${example.prompt}${example.techniques.join(',')}`)
.join('\n');
}
/** Generates bullet list of all techniques with descriptions */
export function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
/** Template for analyzing user prompts and identifying workflow techniques */
export const promptCategorizationTemplate = PromptTemplate.fromTemplate(
`Analyze the following user prompt and identify the workflow techniques required to fulfill the request.
Be specific and identify all relevant techniques.
<user_prompt>
{userPrompt}
</user_prompt>
<workflow_techniques>
{techniques}
</workflow_techniques>
The following prompt categorization examples show a prompt → techniques involved to provide a sense
of how the categorization should be carried out.
<example_categorization>
${formatExamplePrompts()}
</example_categorization>
Select a maximum of 5 techniques that you believe are applicable, but only select them if you are
confident that they are applicable. If the prompt is ambiguous or does not provide an obvious workflow
do not provide any techniques - if confidence is low avoid providing techniques.
Select ALL techniques that apply to this workflow. Most workflows use multiple techniques.
Rate your confidence in this categorization from 0.0 to 1.0.
`,
);

View File

@@ -0,0 +1,16 @@
import { PromptTemplate } from '@langchain/core/prompts';
/** Template for summarizing multi-turn conversations into a structured format */
export const compactPromptTemplate = PromptTemplate.fromTemplate(
`Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
<previous_summary>
{previousSummary}
</previous_summary>
<conversation>
{conversationText}
</conversation>
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`,
);

View File

@@ -0,0 +1,26 @@
export { ParameterUpdatePromptBuilder } from './prompt-builder';
export { instanceUrlPrompt } from './instance-url';
export {
DEFAULT_PROMPT_CONFIG,
getNodeTypeCategory,
mentionsResourceKeywords,
mentionsTextKeywords,
} from './prompt-config';
// Base prompts
export { CORE_INSTRUCTIONS } from './base/core-instructions';
export { EXPRESSION_RULES } from './base/expression-rules';
export { COMMON_PATTERNS } from './base/common-patterns';
export { OUTPUT_FORMAT } from './base/output-format';
// Node type guides
export { SET_NODE_GUIDE } from './node-types/set-node';
export { IF_NODE_GUIDE } from './node-types/if-node';
export { SWITCH_NODE_GUIDE } from './node-types/switch-node';
export { HTTP_REQUEST_GUIDE } from './node-types/http-request';
export { TOOL_NODES_GUIDE } from './node-types/tool-nodes';
// Parameter type guides
export { RESOURCE_LOCATOR_GUIDE } from './parameter-types/resource-locator';
export { SYSTEM_MESSAGE_GUIDE } from './parameter-types/system-message';
export { TEXT_FIELDS_GUIDE } from './parameter-types/text-fields';

View File

@@ -1,5 +1,7 @@
import type { INodeTypeDescription, INodeProperties } from 'n8n-workflow';
import type { PromptBuilderContext } from '@/types/config';
import { COMMON_PATTERNS } from './base/common-patterns';
import { CORE_INSTRUCTIONS } from './base/core-instructions';
import { EXPRESSION_RULES } from './base/expression-rules';
@@ -23,7 +25,6 @@ import {
getNodeTypeCategory,
mentionsResourceKeywords,
} from './prompt-config';
import type { PromptBuilderContext } from '../../types/config';
export class ParameterUpdatePromptBuilder {
/**

View File

@@ -1,4 +1,4 @@
import type { NodePromptConfig } from '../../types/config';
import type { NodePromptConfig } from '@/types/config';
export const DEFAULT_PROMPT_CONFIG: NodePromptConfig = {
nodeTypePatterns: {

View File

@@ -0,0 +1,13 @@
import { PromptTemplate } from '@langchain/core/prompts';
/** Template for generating descriptive workflow names from user prompts */
export const workflowNamingPromptTemplate = PromptTemplate.fromTemplate(
`Based on the initial user prompt, please generate a name for the workflow that captures its essence and purpose.
<initial_prompt>
{initialPrompt}
</initial_prompt>
This name should be concise, descriptive, and suitable for a workflow that automates tasks related to the given prompt. The name should be in a format that is easy to read and understand. Do not include the word "workflow" in the name.
`,
);

View File

@@ -0,0 +1,56 @@
/**
* Centralized prompts for AI Workflow Builder
*
* This directory contains all prompts used by the AI workflow builder agents and chains.
* Organization:
* - agents/ - Multi-agent system prompts (builder, configurator, discovery, etc.)
* - chains/ - Chain-level prompts (categorization, compact, workflow-name, parameter-updater)
* - legacy-agent.prompt.ts - Legacy single-agent mode prompt
*/
// Agent prompts (multi-agent system)
export { buildBuilderPrompt } from './agents/builder.prompt';
export {
buildDiscoveryPrompt,
formatTechniqueList,
formatExampleCategorizations,
type DiscoveryPromptOptions,
} from './agents/discovery.prompt';
export { buildConfiguratorPrompt, INSTANCE_URL_PROMPT } from './agents/configurator.prompt';
export { buildSupervisorPrompt, SUPERVISOR_PROMPT_SUFFIX } from './agents/supervisor.prompt';
export { buildResponderPrompt } from './agents/responder.prompt';
// Legacy agent prompt (single-agent mode)
export {
createMainAgentPrompt,
mainAgentPrompt,
type MainAgentPromptOptions,
} from './legacy-agent.prompt';
// Chain prompts
export {
promptCategorizationTemplate,
examplePrompts,
formatExamplePrompts,
formatTechniqueList as formatCategorizationTechniqueList,
} from './chains/categorization.prompt';
export { compactPromptTemplate } from './chains/compact.prompt';
export { workflowNamingPromptTemplate } from './chains/workflow-name.prompt';
// Parameter updater prompts
export {
ParameterUpdatePromptBuilder,
instanceUrlPrompt,
CORE_INSTRUCTIONS,
EXPRESSION_RULES,
COMMON_PATTERNS,
OUTPUT_FORMAT,
SET_NODE_GUIDE,
IF_NODE_GUIDE,
SWITCH_NODE_GUIDE,
HTTP_REQUEST_GUIDE,
TOOL_NODES_GUIDE,
RESOURCE_LOCATOR_GUIDE,
SYSTEM_MESSAGE_GUIDE,
TEXT_FIELDS_GUIDE,
} from './chains/parameter-updater';

View File

@@ -1,6 +1,6 @@
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { instanceUrlPrompt } from '../../chains/prompts/instance-url';
import { instanceUrlPrompt } from './chains/parameter-updater/instance-url';
/**
* Phase configuration for the workflow creation sequence

View File

@@ -7,6 +7,7 @@ import type { Logger } from '@n8n/backend-common';
import type { INodeTypeDescription } from 'n8n-workflow';
import { LLMServiceError } from '@/errors';
import { buildBuilderPrompt } from '@/prompts/agents/builder.prompt';
import type { ChatPayload } from '@/workflow-builder-agent';
import { BaseSubgraph } from './subgraph-interface';
@@ -34,234 +35,6 @@ import {
createStandardShouldContinue,
} from '../utils/subgraph-helpers';
/**
* Builder Agent Prompt
*/
const BUILDER_PROMPT = `You are a Builder Agent specialized in constructing n8n workflows.
MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CREATE NODES
- Call add_nodes for EVERY node needed based on discovery results
- Create multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START BUILDING immediately
STEP 2: CONNECT NODES
- Call connect_nodes for ALL required connections
- Connect multiple node pairs in PARALLEL
STEP 3: VALIDATE (REQUIRED)
- After ALL nodes and connections are created, call validate_structure
- This step is MANDATORY - you cannot finish without it
- If validation finds issues (missing trigger, invalid connections), fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_structure, proceed to respond regardless of remaining issues
STEP 4: RESPOND TO USER
- Only after validation passes, provide your brief summary
⚠️ NEVER respond to the user without calling validate_structure first ⚠️
NODE CREATION:
Each add_nodes call creates ONE node. You must provide:
- nodeType: The exact type from discovery (e.g., "n8n-nodes-base.httpRequest")
- name: Descriptive name (e.g., "Fetch Weather Data")
- connectionParametersReasoning: Explain your thinking about connection parameters
- connectionParameters: Parameters that affect connections (or {{}} if none needed)
<workflow_configuration_node>
Always include a Workflow Configuration node at the start of every workflow.
The Workflow Configuration node (n8n-nodes-base.set) should be placed immediately after the trigger node and before all other processing nodes.
Placement rules:
- Add between trigger and first processing node
- Connect: Trigger → Workflow Configuration → First processing node
- Name it "Workflow Configuration"
</workflow_configuration_node>
<data_parsing_strategy>
For AI-generated structured data, prefer Structured Output Parser nodes over Code nodes.
For binary file data, use Extract From File node to extract content from files before processing.
Use Code nodes only for custom business logic beyond parsing.
STRUCTURED OUTPUT PARSER RULE:
When Discovery results include Structured Output Parser:
1. Create the Structured Output Parser node
2. Set AI Agent's hasOutputParser: true in connectionParameters
3. Connect: Structured Output Parser → AI Agent (ai_outputParser connection)
</data_parsing_strategy>
<proactive_design>
Anticipate workflow needs:
- IF nodes for conditional logic when multiple outcomes exist
- Set nodes for data transformation between incompatible formats
- Schedule Triggers for recurring tasks
- Error handling for external service calls
NEVER use Split In Batches nodes.
</proactive_design>
<node_defaults_warning>
CRITICAL: NEVER RELY ON DEFAULT PARAMETER VALUES FOR CONNECTIONS
Default values often hide connection inputs/outputs. You MUST explicitly configure parameters that affect connections:
- Vector Store: Mode parameter affects available connections - always set explicitly (e.g., mode: "insert", "retrieve", "retrieve-as-tool")
- AI Agent: hasOutputParser default may not match your workflow needs
- Document Loader: textSplittingMode affects whether it accepts a text splitter input
ALWAYS check node details and set connectionParameters explicitly.
</node_defaults_warning>
CONNECTION PARAMETERS EXAMPLES:
- Static nodes (HTTP Request, Set, Code): reasoning="Static inputs/outputs", parameters={{}}
- AI Agent with structured output: reasoning="hasOutputParser enables ai_outputParser input for Structured Output Parser", parameters={{ hasOutputParser: true }}
- Vector Store insert: reasoning="Insert mode requires document input", parameters={{ mode: "insert" }}
- Document Loader custom: reasoning="Custom mode enables text splitter input", parameters={{ textSplittingMode: "custom" }}
- Switch with routing rules: reasoning="Switch needs N outputs, creating N rules.values entries with outputKeys", parameters={{ mode: "rules", rules: {{ values: [...] }} }} - see <switch_node_pattern> for full structure
<structured_output_parser_guidance>
WHEN TO SET hasOutputParser: true on AI Agent:
- Discovery found Structured Output Parser node → MUST set hasOutputParser: true
- AI output will be used in conditions (IF/Switch nodes checking $json.field)
- AI output will be formatted/displayed (HTML emails, reports with specific sections)
- AI output will be stored in database/data tables with specific fields
- AI is classifying, scoring, or extracting specific data fields
</structured_output_parser_guidance>
<node_connections_understanding>
n8n connections flow from SOURCE (output) to TARGET (input).
Regular data flow: Source node output → Target node input
Example: HTTP Request → Set (HTTP Request is source, Set is target)
AI sub-nodes PROVIDE capabilities, making them the SOURCE:
- OpenAI Chat Model → AI Agent [ai_languageModel]
- Calculator Tool → AI Agent [ai_tool]
- Window Buffer Memory → AI Agent [ai_memory]
- Token Splitter → Default Data Loader [ai_textSplitter]
- Default Data Loader → Vector Store [ai_document]
- Embeddings OpenAI → Vector Store [ai_embedding]
</node_connections_understanding>
<agent_node_distinction>
Distinguish between two different agent node types:
1. **AI Agent** (@n8n/n8n-nodes-langchain.agent)
- Main workflow node that orchestrates AI tasks
- Use for: Primary AI logic, chatbots, autonomous workflows
2. **AI Agent Tool** (@n8n/n8n-nodes-langchain.agentTool)
- Sub-node that acts as a tool for another AI Agent
- Use for: Multi-agent systems where one agent calls another
Default assumption: When discovery results include "agent", use AI Agent
unless explicitly specified as "agent tool" or "sub-agent".
</agent_node_distinction>
<rag_workflow_pattern>
For RAG (Retrieval-Augmented Generation) workflows:
Main data flow:
- Data source (e.g., HTTP Request) → Vector Store [main connection]
AI capability connections:
- Document Loader → Vector Store [ai_document]
- Embeddings → Vector Store [ai_embedding]
- Text Splitter → Document Loader [ai_textSplitter]
Common mistake to avoid:
- NEVER connect Document Loader to main data outputs
- Document Loader is an AI sub-node that gives Vector Store document processing capability
</rag_workflow_pattern>
<switch_node_pattern>
For Switch nodes with multiple routing paths:
- The number of outputs is determined by the number of entries in rules.values[]
- You MUST create the rules.values[] array with placeholder entries for each output branch
- Each entry needs: conditions structure (with empty leftValue/rightValue) + renameOutput: true + descriptive outputKey
- Configurator will fill in the actual condition values later
- Use descriptive node names like "Route by Amount" or "Route by Status"
Example connectionParameters for 3-way routing:
{{
"mode": "rules",
"rules": {{
"values": [
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 1 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 2 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 3 Name"
}}
]
}}
}}
</switch_node_pattern>
<connection_type_examples>
**Main Connections** (regular data flow):
- Trigger → HTTP Request → Set → Email
**AI Language Model Connections** (ai_languageModel):
- OpenAI Chat Model → AI Agent
**AI Tool Connections** (ai_tool):
- Calculator Tool → AI Agent
- AI Agent Tool → AI Agent (for multi-agent systems)
**AI Document Connections** (ai_document):
- Document Loader → Vector Store
**AI Embedding Connections** (ai_embedding):
- OpenAI Embeddings → Vector Store
**AI Text Splitter Connections** (ai_textSplitter):
- Token Text Splitter → Document Loader
**AI Memory Connections** (ai_memory):
- Window Buffer Memory → AI Agent
**AI Vector Store in retrieve-as-tool mode** (ai_tool):
- Vector Store → AI Agent
</connection_type_examples>
DO NOT:
- Respond before calling validate_structure
- Skip validation even if you think structure is correct
- Add commentary between tool calls - execute tools silently
- Configure node parameters (that's the Configurator Agent's job)
- Search for nodes (that's the Discovery Agent's job)
- Make assumptions about node types - use exactly what Discovery found
RESPONSE FORMAT (only after validation):
Provide ONE brief text message summarizing:
- What nodes were added
- How they're connected
Example: "Created 4 nodes: Trigger → Weather → Image Generation → Email"`;
/**
* Builder Subgraph State
*/
@@ -337,7 +110,7 @@ export class BuilderSubgraph extends BaseSubgraph<
[
{
type: 'text',
text: BUILDER_PROMPT,
text: buildBuilderPrompt(),
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -8,6 +8,7 @@ import type { Logger } from '@n8n/backend-common';
import type { INodeTypeDescription } from 'n8n-workflow';
import { LLMServiceError } from '@/errors';
import { buildConfiguratorPrompt, INSTANCE_URL_PROMPT } from '@/prompts/agents/configurator.prompt';
import { BaseSubgraph } from './subgraph-interface';
import type { ParentGraphState } from '../parent-graph-state';
@@ -33,126 +34,6 @@ import {
} from '../utils/subgraph-helpers';
import type { ChatPayload } from '../workflow-builder-agent';
/**
* Configurator Agent Prompt
*/
const CONFIGURATOR_PROMPT = `You are a Configurator Agent specialized in setting up n8n node parameters.
MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CONFIGURE ALL NODES
- Call update_node_parameters for EVERY node in the workflow
- Configure multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START CONFIGURING immediately
STEP 2: VALIDATE (REQUIRED)
- After ALL configurations complete, call validate_configuration
- This step is MANDATORY - you cannot finish without it
- If validation finds issues, fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_configuration, proceed to respond regardless of remaining issues
STEP 3: RESPOND TO USER
- Only after validation passes, provide your response
NEVER respond to the user without calling validate_configuration first
WORKFLOW JSON DETECTION:
- You receive <current_workflow_json> in your context
- If you see nodes in the workflow JSON, you MUST configure them IMMEDIATELY
- Look at the workflow JSON, identify each node, and call update_node_parameters for ALL of them
PARAMETER CONFIGURATION:
Use update_node_parameters with natural language instructions:
- "Set URL to https://api.example.com/weather"
- "Add header Authorization: Bearer token"
- "Set method to POST"
- "Add field 'status' with value 'processed'"
SPECIAL EXPRESSIONS FOR TOOL NODES:
Tool nodes (types ending in "Tool") support $fromAI expressions:
- "Set sendTo to ={{ $fromAI('to') }}"
- "Set subject to ={{ $fromAI('subject') }}"
- "Set message to ={{ $fromAI('message_html') }}"
- "Set timeMin to ={{ $fromAI('After', '', 'string') }}"
$fromAI syntax: ={{ $fromAI('key', 'description', 'type', defaultValue) }}
- ONLY use in tool nodes (check node type ends with "Tool")
- Use for dynamic values that AI determines at runtime
- For regular nodes, use static values or standard expressions
CRITICAL PARAMETERS TO ALWAYS SET:
- HTTP Request: URL, method, headers (if auth needed)
- Set node: Fields to set with values
- Code node: The actual code to execute
- IF node: Conditions to check
- Switch node: Configure rules.values[] with conditions for each output branch (uses same filter structure as IF node)
- Document Loader: dataType parameter ('binary' for files like PDF, 'json' for JSON data)
- AI nodes: Prompts, models, configurations
- Tool nodes: Use $fromAI for dynamic recipient/subject/message fields
NEVER RELY ON DEFAULT VALUES:
Defaults are traps that cause runtime failures. Examples:
- Document Loader defaults to 'json' but MUST be 'binary' when processing files
- HTTP Request defaults to GET but APIs often need POST
- Vector Store mode affects available connections - set explicitly (retrieve-as-tool when using with AI Agent)
<switch_node_configuration>
Switch nodes require configuring rules.values[] array - each entry creates one output:
Structure per rule:
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [
{{
"leftValue": "={{{{ $json.fieldName }}}}",
"rightValue": <value>,
"operator": {{ "type": "number|string", "operation": "lt|gt|equals|etc" }}
}}
],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Descriptive Label"
}}
For numeric ranges (e.g., $100-$1000):
- Use TWO conditions with combinator: "and"
- First: gte (greater than or equal)
- Second: lte (less than or equal)
Always set renameOutput: true and provide descriptive outputKey labels.
</switch_node_configuration>
<response_format>
After validation passes, provide a concise summary:
- List any placeholders requiring user configuration (e.g., "URL placeholder needs actual endpoint")
- Note which nodes were configured and key settings applied
- Keep it brief - this output is used for coordination with other LLM agents, not displayed directly to users
</response_format>
DO NOT:
- Respond before calling validate_configuration
- Skip validation even if you think configuration is correct
- Add commentary between tool calls - execute tools silently`;
/**
* Instance URL prompt template
*/
const INSTANCE_URL_PROMPT = `
<instance_url>
The n8n instance base URL is: {instanceUrl}
This URL is essential for webhook nodes and chat triggers as it provides the base URL for:
- Webhook URLs that external services need to call
- Chat trigger URLs for conversational interfaces
- Any node that requires the full instance URL to generate proper callback URLs
When working with webhook or chat trigger nodes, use this URL as the base for constructing proper endpoint URLs.
</instance_url>
`;
/**
* Configurator Subgraph State
*/
@@ -243,7 +124,7 @@ export class ConfiguratorSubgraph extends BaseSubgraph<
[
{
type: 'text',
text: CONFIGURATOR_PROMPT,
text: buildConfiguratorPrompt(),
},
{
type: 'text',

View File

@@ -11,10 +11,10 @@ import { z } from 'zod';
import { LLMServiceError } from '@/errors';
import {
TechniqueDescription,
WorkflowTechnique,
type WorkflowTechniqueType,
} from '@/types/categorization';
buildDiscoveryPrompt,
formatTechniqueList,
formatExampleCategorizations,
} from '@/prompts/agents/discovery.prompt';
import type { BuilderFeatureFlags } from '@/workflow-builder-agent';
import { BaseSubgraph } from './subgraph-interface';
@@ -31,136 +31,6 @@ import { buildWorkflowSummary, createContextMessage } from '../utils/context-bui
import { appendArrayReducer, nodeConfigurationsReducer } from '../utils/state-reducers';
import { executeSubgraphTools, extractUserRequest } from '../utils/subgraph-helpers';
/**
* Example categorizations to guide technique selection
* Expanded with diverse examples to improve accuracy
*/
const exampleCategorizations: Array<{
prompt: string;
techniques: WorkflowTechniqueType[];
}> = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
// Additional examples to address common misclassifications
{
prompt: 'Create an AI agent that writes and sends personalized emails to leads',
techniques: [WorkflowTechnique.CONTENT_GENERATION, WorkflowTechnique.NOTIFICATION],
},
{
prompt:
'Fetch trending topics from Google Trends and Reddit, select the best ones, and create social posts',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt:
'Trigger when a new contact is created in HubSpot and enrich their profile with LinkedIn data',
techniques: [WorkflowTechnique.MONITORING, WorkflowTechnique.ENRICHMENT],
},
{
prompt: 'Get stock prices from financial APIs and analyze volatility patterns',
techniques: [WorkflowTechnique.SCRAPING_AND_RESEARCH, WorkflowTechnique.DATA_ANALYSIS],
},
{
prompt: 'Generate video reels from templates and auto-post to social media on schedule',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Receive news from Telegram channels, filter relevant ones, and forward to my channel',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Analyze YouTube video performance data and generate a weekly report',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.DATA_TRANSFORMATION,
],
},
{
prompt:
'Create a chatbot that answers questions using data from a Google Sheet as knowledge base',
techniques: [WorkflowTechnique.CHATBOT, WorkflowTechnique.KNOWLEDGE_BASE],
},
{
prompt: 'Form submission with file upload triggers document extraction and approval workflow',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
],
},
];
/**
* Format technique descriptions for prompt
*/
function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
/**
* Format example categorizations for prompt
*/
function formatExampleCategorizations(): string {
return exampleCategorizations
.map((example) => `- ${example.prompt}${example.techniques.join(', ')}`)
.join('\n');
}
/**
* Strict Output Schema for Discovery
* Simplified to reduce token usage while maintaining utility for downstream subgraphs
@@ -193,191 +63,6 @@ const discoveryOutputSchema = z.object({
.describe('List of n8n nodes identified as necessary for the workflow'),
});
interface DiscoveryPromptOptions {
includeExamples: boolean;
}
/**
* Generate the process steps with proper numbering
*/
function generateProcessSteps(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const steps: string[] = [
'**Analyze user prompt** - Extract services, models, and technologies mentioned',
'**Call get_best_practices** with identified techniques (internal context)',
];
if (includeExamples) {
steps.push('**Call get_workflow_examples** with search queries for mentioned services/models');
}
const examplesContext = includeExamples ? ', and examples' : '';
steps.push(
`**Identify workflow components** from user request, best practices${examplesContext}`,
'**Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")',
'**Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)',
`**Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section`,
'**Call submit_discovery_results** with complete nodesFound array',
);
return steps.map((step, index) => `${index + 1}. ${step}`).join('\n');
}
/**
* Generate available tools list based on feature flags
*/
function generateAvailableToolsList(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const tools = [
'- get_best_practices: Retrieve best practices (internal context)',
'- search_nodes: Find n8n nodes by keyword',
'- get_node_details: Get complete node information including <connections>',
];
if (includeExamples) {
tools.push('- get_workflow_examples: Search for workflow examples as reference');
}
tools.push('- submit_discovery_results: Submit final results');
return tools.join('\n');
}
/**
* Discovery Agent Prompt
*/
function generateDiscoveryPrompt(options: DiscoveryPromptOptions): string {
const availableTools = generateAvailableToolsList(options);
const processSteps = generateProcessSteps(options);
return `You are a Discovery Agent for n8n AI Workflow Builder.
YOUR ROLE: Identify relevant n8n nodes and their connection-changing parameters.
AVAILABLE TOOLS:
${availableTools}
PROCESS:
${processSteps}
TECHNIQUE CATEGORIZATION:
When calling get_best_practices, select techniques that match the user's workflow intent.
<available_techniques>
{techniques}
</available_techniques>
<example_categorizations>
{exampleCategorizations}
</example_categorizations>
<technique_clarifications>
Common distinctions to get right:
- **NOTIFICATION vs CHATBOT**: Use NOTIFICATION when SENDING emails/messages/alerts (including to Telegram CHANNELS which are broadcast-only). Use CHATBOT only when RECEIVING and REPLYING to direct messages in a conversation.
- **MONITORING**: Use when workflow TRIGGERS on external events (new record created, status changed, incoming webhook, new message in channel). NOT just scheduled runs.
- **SCRAPING_AND_RESEARCH vs DATA_EXTRACTION**: Use SCRAPING when fetching from EXTERNAL sources (APIs, websites, social media). Use DATA_EXTRACTION for parsing INTERNAL data you already have.
- **TRIAGE**: Use when SELECTING, PRIORITIZING, ROUTING, or QUALIFYING items (e.g., "pick the best", "route to correct team", "qualify leads").
- **DOCUMENT_PROCESSING**: Use for ANY file handling - PDFs, images, videos, Excel, Google Sheets, audio files, file uploads in forms.
- **HUMAN_IN_THE_LOOP**: Use when workflow PAUSES for human approval, review, signing documents, responding to polls, or any manual input before continuing.
- **DATA_ANALYSIS**: Use when ANALYZING, CLASSIFYING, IDENTIFYING PATTERNS, or UNDERSTANDING data (e.g., "analyze outcomes", "learn from previous", "classify by type", "identify trends").
- **KNOWLEDGE_BASE**: Use when storing/retrieving from a DATA SOURCE for Q&A - includes vector DBs, spreadsheets used as databases, document collections.
- **DATA_TRANSFORMATION**: Use when CONVERTING data format, creating REPORTS/SUMMARIES from analyzed data, or restructuring output.
</technique_clarifications>
Technique selection rules:
- Select ALL techniques that apply (most workflows use 2-4)
- Maximum 5 techniques
- Only select techniques you're confident apply
CONNECTION-CHANGING PARAMETERS - CRITICAL RULES:
A parameter is connection-changing ONLY IF it appears in <input> or <output> expressions within <node_details>.
**How to identify:**
1. Look at the <connections> section in node details
2. Check if <input> or <output> uses expressions like: ={{...parameterName...}}
3. If a parameter is referenced in these expressions, it IS connection-changing
4. If a parameter is NOT in <input>/<output> expressions, it is NOT connection-changing
**Example from AI Agent:**
\`\`\`xml
<input>={{...hasOutputParser, needsFallback...}}</input>
\`\`\`
→ hasOutputParser and needsFallback ARE connection-changing (they control which inputs appear)
**Counter-example:**
\`\`\`xml
<properties>
<property name="promptType">...</property> <!-- NOT in <input>/<output> -->
<property name="systemMessage">...</property> <!-- NOT in <input>/<output> -->
</properties>
\`\`\`
→ promptType and systemMessage are NOT connection-changing (they don't affect connections)
**Common connection-changing parameters:**
- Vector Store: mode (appears in <input>/<output> expressions)
- AI Agent: hasOutputParser, needsFallback (appears in <input> expression)
- Merge: numberInputs (appears in <input> expression)
- Webhook: responseMode (appears in <output> expression)
<dynamic_output_nodes>
Some nodes have DYNAMIC outputs that depend on parameter values:
**Switch Node** (n8n-nodes-base.switch):
- When mode is "rules", the number of outputs equals the number of routing rules
- Connection parameter: mode: "rules" - CRITICAL for enabling rule-based routing
- Each rule in rules.values[] creates one output
- The rules parameter uses the same filter structure as IF node conditions
- ALWAYS flag mode as connection-changing with possibleValues: ["rules", "expression"]
**Merge Node** (n8n-nodes-base.merge):
- numberInputs parameter controls how many inputs the node accepts
When you find these nodes, ALWAYS flag mode/numberInputs as connection-changing parameters with possibleValues.
</dynamic_output_nodes>
SUB-NODES SEARCHES:
When searching for AI nodes, ALSO search for their required sub-nodes:
- "AI Agent" → also search for "Chat Model", "Memory", "Output Parser"
- "Basic LLM Chain" → also search for "Chat Model", "Output Parser"
- "Vector Store" → also search for "Embeddings", "Document Loader"
STRUCTURED OUTPUT PARSER - WHEN TO INCLUDE:
Search for "Structured Output Parser" (@n8n/n8n-nodes-langchain.outputParserStructured) when:
- AI output will be used programmatically (conditions, formatting, database storage, API calls)
- AI needs to extract specific fields (e.g., score, category, priority, action items)
- AI needs to classify/categorize data into defined categories
- Downstream nodes need to access specific fields from AI response (e.g., $json.score, $json.category)
- Output will be displayed in a formatted way (e.g., HTML email with specific sections)
- Data needs validation against a schema before processing
- Always use search_nodes to find the exact node names and versions - NEVER guess versions
CRITICAL RULES:
- NEVER ask clarifying questions
- ALWAYS call get_best_practices first
- THEN Call search_nodes to learn about available nodes and their inputs and outputs
- FINALLY call get_node_details IN PARALLEL for speed to get more details about RELVANT node
- ALWAYS extract version number from <version> tag in node details
- NEVER guess node versions - always use search_nodes to find exact versions
- ONLY flag connectionChangingParameters if they appear in <input> or <output> expressions
- If no parameters appear in connection expressions, return empty array []
- Output ONLY: nodesFound with {{ nodeName, version, reasoning, connectionChangingParameters }}
DO NOT:
- Output text commentary between tool calls
- Include bestPractices or categorization in submit_discovery_results
- Flag parameters that don't affect connections
- Stop without calling submit_discovery_results
`;
}
/**
* Discovery Subgraph State
*/
@@ -476,7 +161,7 @@ export class DiscoverySubgraph extends BaseSubgraph<
});
// Generate prompt based on feature flags
const discoveryPrompt = generateDiscoveryPrompt({ includeExamples });
const discoveryPrompt = buildDiscoveryPrompt({ includeExamples });
// Create agent with tools bound (including submit tool)
const systemPrompt = ChatPromptTemplate.fromMessages([

View File

@@ -31,7 +31,7 @@ jest.mock('@/tools/update-node-parameters.tool', () => ({
jest.mock('@/tools/get-node-parameter.tool', () => ({
createGetNodeParameterTool: jest.fn().mockReturnValue({ tool: { name: 'get_node_parameter' } }),
}));
jest.mock('@/tools/prompts/main-agent.prompt', () => ({
jest.mock('@/prompts/legacy-agent.prompt', () => ({
mainAgentPrompt: {
invoke: jest.fn().mockResolvedValue('mocked prompt'),
},
@@ -67,7 +67,7 @@ Object.defineProperty(global, 'crypto', {
import { MAX_AI_BUILDER_PROMPT_LENGTH } from '@/constants';
import { ValidationError } from '@/errors';
import { createMainAgentPrompt } from '@/tools/prompts/main-agent.prompt';
import { createMainAgentPrompt } from '@/prompts/legacy-agent.prompt';
import type { StreamOutput } from '@/types/streaming';
import { createStreamProcessor } from '@/utils/stream-processor';
import {

View File

@@ -1,6 +1,6 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { ToolMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage, RemoveMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage, isAIMessage, RemoveMessage } from '@langchain/core/messages';
import type { RunnableConfig } from '@langchain/core/runnables';
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
import type { MemorySaver, StateSnapshot } from '@langchain/langgraph';
@@ -19,6 +19,7 @@ import {
MAX_AI_BUILDER_PROMPT_LENGTH,
MAX_INPUT_TOKENS,
} from '@/constants';
import { createMainAgentPrompt } from '@/prompts/legacy-agent.prompt';
import { trimWorkflowJSON } from '@/utils/trim-workflow-context';
import { conversationCompactChain } from './chains/conversation-compact';
@@ -27,7 +28,6 @@ import { LLMServiceError, ValidationError, WorkflowStateError } from './errors';
import { createMultiAgentWorkflowWithSubgraphs } from './multi-agent-workflow-subgraphs';
import { SessionManagerService } from './session-manager.service';
import { getBuilderTools } from './tools/builder-tools';
import { createMainAgentPrompt } from './tools/prompts/main-agent.prompt';
import type { SimpleWorkflow } from './types/workflow';
import {
applyCacheControlMarkers,
@@ -143,6 +143,8 @@ export interface WorkflowBuilderAgentConfig {
onGenerationSuccess?: () => Promise<void>;
/** Metadata to include in LangSmith traces */
runMetadata?: Record<string, unknown>;
/** Feature flags for enabling/disabling features */
featureFlags?: BuilderFeatureFlags;
}
export interface ExpressionValue {
@@ -287,7 +289,7 @@ export class WorkflowBuilderAgent {
const shouldContinue = ({ messages }: typeof WorkflowState.State) => {
const lastMessage = messages[messages.length - 1];
if (!(lastMessage instanceof AIMessage)) {
if (!lastMessage || !isAIMessage(lastMessage)) {
throw new WorkflowStateError('Expected last message to be generated by the AI agent');
}

View File

@@ -0,0 +1,7 @@
import { Z } from 'zod-class';
import { dataTableColumnNameSchema } from '../../schemas/data-table.schema';
export class RenameDataTableColumnDto extends Z.class({
name: dataTableColumnNameSchema,
}) {}

View File

@@ -99,6 +99,7 @@ export { CreateDataTableColumnDto } from './data-table/create-data-table-column.
export { AddDataTableRowsDto } from './data-table/add-data-table-rows.dto';
export { AddDataTableColumnDto } from './data-table/add-data-table-column.dto';
export { MoveDataTableColumnDto } from './data-table/move-data-table-column.dto';
export { RenameDataTableColumnDto } from './data-table/rename-data-table-column.dto';
export {
OAuthClientResponseDto,

View File

@@ -87,7 +87,9 @@ type EntityName =
| 'AuthorizationCode'
| 'AccessToken'
| 'RefreshToken'
| 'UserConsent';
| 'UserConsent'
| 'DynamicCredentialEntry'
| 'DynamicCredentialResolver';
/**
* Truncate specific DB tables in a test DB.

View File

@@ -28,6 +28,7 @@ export const LOG_SCOPES = [
'chat-hub',
'breaking-changes',
'circuit-breaker',
'dynamic-credentials',
] as const;
export type LogScope = (typeof LOG_SCOPES)[number];

View File

@@ -0,0 +1,31 @@
import type { MigrationContext, ReversibleMigration } from '../migration-types';
const tableName = 'dynamic_credential_entry';
export class AddDynamicCredentialEntryTable1764689388394 implements ReversibleMigration {
async up({ schemaBuilder: { createTable, column } }: MigrationContext) {
await createTable(tableName)
.withColumns(
column('credential_id').varchar(16).primary.notNull,
column('subject_id').varchar(16).primary.notNull,
column('resolver_id').varchar(16).primary.notNull,
column('data').text.notNull,
)
.withTimestamps.withForeignKey('credential_id', {
tableName: 'credentials_entity',
columnName: 'id',
onDelete: 'CASCADE',
})
.withForeignKey('resolver_id', {
tableName: 'dynamic_credential_resolver',
columnName: 'id',
onDelete: 'CASCADE',
})
.withIndexOn(['subject_id'])
.withIndexOn(['resolver_id']);
}
async down({ schemaBuilder: { dropTable } }: MigrationContext) {
await dropTable(tableName);
}
}

View File

@@ -121,6 +121,7 @@ import { CreateBinaryDataTable1763716655000 } from '../common/1763716655000-Crea
import { CreateWorkflowPublishHistoryTable1764167920585 } from '../common/1764167920585-CreateWorkflowPublishHistoryTable';
import { AddCreatorIdToProjectTable1764276827837 } from '../common/1764276827837-AddCreatorIdToProjectTable';
import { CreateDynamicCredentialResolverTable1764682447000 } from '../common/1764682447000-CreateCredentialResolverTable';
import { AddDynamicCredentialEntryTable1764689388394 } from '../common/1764689388394-AddDynamicCredentialEntryTable';
import type { Migration } from '../migration-types';
export const mysqlMigrations: Migration[] = [
@@ -247,4 +248,5 @@ export const mysqlMigrations: Migration[] = [
CreateWorkflowPublishHistoryTable1764167920585,
AddCreatorIdToProjectTable1764276827837,
CreateDynamicCredentialResolverTable1764682447000,
AddDynamicCredentialEntryTable1764689388394,
];

View File

@@ -121,6 +121,7 @@ import { CreateBinaryDataTable1763716655000 } from '../common/1763716655000-Crea
import { CreateWorkflowPublishHistoryTable1764167920585 } from '../common/1764167920585-CreateWorkflowPublishHistoryTable';
import { AddCreatorIdToProjectTable1764276827837 } from '../common/1764276827837-AddCreatorIdToProjectTable';
import { CreateDynamicCredentialResolverTable1764682447000 } from '../common/1764682447000-CreateCredentialResolverTable';
import { AddDynamicCredentialEntryTable1764689388394 } from '../common/1764689388394-AddDynamicCredentialEntryTable';
import type { Migration } from '../migration-types';
export const postgresMigrations: Migration[] = [
@@ -247,4 +248,5 @@ export const postgresMigrations: Migration[] = [
CreateWorkflowPublishHistoryTable1764167920585,
AddCreatorIdToProjectTable1764276827837,
CreateDynamicCredentialResolverTable1764682447000,
AddDynamicCredentialEntryTable1764689388394,
];

View File

@@ -117,6 +117,7 @@ import { ChangeOAuthStateColumnToUnboundedVarchar1763572724000 } from '../common
import { CreateBinaryDataTable1763716655000 } from '../common/1763716655000-CreateBinaryDataTable';
import { CreateWorkflowPublishHistoryTable1764167920585 } from '../common/1764167920585-CreateWorkflowPublishHistoryTable';
import { CreateDynamicCredentialResolverTable1764682447000 } from '../common/1764682447000-CreateCredentialResolverTable';
import { AddDynamicCredentialEntryTable1764689388394 } from '../common/1764689388394-AddDynamicCredentialEntryTable';
import type { Migration } from '../migration-types';
const sqliteMigrations: Migration[] = [
@@ -239,6 +240,7 @@ const sqliteMigrations: Migration[] = [
CreateWorkflowPublishHistoryTable1764167920585,
AddCreatorIdToProjectTable1764276827837,
CreateDynamicCredentialResolverTable1764682447000,
AddDynamicCredentialEntryTable1764689388394,
];
export { sqliteMigrations };

View File

@@ -981,4 +981,16 @@ export class WorkflowRepository extends Repository<WorkflowEntity> {
return await qb.getMany();
}
/**
* Returns if the workflow is stored as `active`.
*
* @important Do not confuse with `ActiveWorkflows.isActive()`,
* which checks if the workflow is active in memory.
*/
async isActive(workflowId: string) {
const workflow = await this.findOne({ select: ['activeVersionId'], where: { id: workflowId } });
return !!workflow?.activeVersionId;
}
}

View File

@@ -16,13 +16,18 @@ export interface BaseEntity {
reload(): Promise<void>;
}
export interface TimestampedEntity {
export interface TimestampedIdEntity {
id: string;
createdAt: Date;
updatedAt: Date;
}
export type EntityClass = new () => BaseEntity | TimestampedEntity;
export interface TimestampedEntity {
createdAt: Date;
updatedAt: Date;
}
export type EntityClass = new () => BaseEntity | TimestampedIdEntity | TimestampedEntity;
export type ModuleSettings = Record<string, unknown>;
export type ModuleContext = Record<string, unknown>;

View File

@@ -292,7 +292,9 @@ describe('McpClientTool', () => {
it('should successfully execute a tool', async () => {
jest.spyOn(Client.prototype, 'connect').mockResolvedValue();
jest.spyOn(Client.prototype, 'callTool').mockResolvedValue({ content: 'Sunny' });
jest
.spyOn(Client.prototype, 'callTool')
.mockResolvedValue({ toolResult: 'Sunny', content: [] });
jest.spyOn(Client.prototype, 'listTools').mockResolvedValue({
tools: [
{
@@ -326,9 +328,11 @@ describe('McpClientTool', () => {
it('should handle tool errors', async () => {
jest.spyOn(Client.prototype, 'connect').mockResolvedValue();
jest
.spyOn(Client.prototype, 'callTool')
.mockResolvedValue({ isError: true, content: [{ text: 'Weather unknown at location' }] });
jest.spyOn(Client.prototype, 'callTool').mockResolvedValue({
isError: true,
toolResult: 'Weather unknown at location',
content: [{ text: 'Weather unknown at location' }],
});
jest.spyOn(Client.prototype, 'listTools').mockResolvedValue({
tools: [
{

View File

@@ -114,7 +114,7 @@ export async function connectMcpClient({
return createResultError({ type: 'invalid_url', error: endpoint.error });
}
const client = new Client({ name, version: version.toString() }, { capabilities: { tools: {} } });
const client = new Client({ name, version: version.toString() }, { capabilities: {} });
if (serverTransport === 'httpStreamable') {
try {

View File

@@ -173,9 +173,13 @@ export class WorkflowToolService {
return processedResponse;
}
// If manualLogging is false we've been called by the engine and need
// the structured response.
if (metadata && 'setMetadata' in context) {
void context.setMetadata(metadata);
}
return responseData;
} catch (error) {
// Check if error is due to cancellation

View File

@@ -203,7 +203,7 @@
"@langchain/redis": "1.0.1",
"@langchain/textsplitters": "1.0.1",
"@langchain/weaviate": "1.0.1",
"@modelcontextprotocol/sdk": "1.20.0",
"@modelcontextprotocol/sdk": "1.24.0",
"@mozilla/readability": "0.6.0",
"@n8n/client-oauth2": "workspace:*",
"@n8n/config": "workspace:*",

View File

@@ -54,7 +54,15 @@ void (async function start() {
});
sentry = Container.get(TaskRunnerSentry);
await sentry.initIfEnabled();
try {
await sentry.initIfEnabled();
} catch (error) {
console.error(
'FAILED TO INITIALIZE SENTRY. ERROR REPORTING WILL BE DISABLED. THIS IS LIKELY A CONFIGURATION OR ENVIRONMENT ISSUE.',
error,
);
sentry = undefined;
}
runner = new JsTaskRunner(config);
runner.on('runner:reached-idle-timeout', () => {

View File

@@ -138,21 +138,6 @@ export class ActiveWorkflowManager {
return this.activeWorkflows.allActiveWorkflows();
}
/**
* Returns if the workflow is stored as `active`.
*
* @important Do not confuse with `ActiveWorkflows.isActive()`,
* which checks if the workflow is active in memory.
*/
async isActive(workflowId: WorkflowId) {
const workflow = await this.workflowRepository.findOne({
select: ['activeVersionId'],
where: { id: workflowId },
});
return !!workflow?.activeVersionId;
}
/**
* Register workflow-defined webhooks in the `workflow_entity` table.
*/

View File

@@ -6,6 +6,7 @@ import type {
InvalidAuthTokenRepository,
UserRepository,
} from '@n8n/db';
import { GLOBAL_OWNER_ROLE } from '@n8n/db';
import type { NextFunction, Response } from 'express';
import { mock } from 'jest-mock-extended';
import jwt from 'jsonwebtoken';
@@ -15,6 +16,7 @@ import { AUTH_COOKIE_NAME } from '@/constants';
import type { MfaService } from '@/mfa/mfa.service';
import { JwtService } from '@/services/jwt.service';
import type { UrlService } from '@/services/url.service';
import type { License } from '@/license';
describe('AuthService', () => {
const browserId = 'test-browser-id';
@@ -35,10 +37,11 @@ describe('AuthService', () => {
const userRepository = mock<UserRepository>();
const invalidAuthTokenRepository = mock<InvalidAuthTokenRepository>();
const mfaService = mock<MfaService>();
const license = mock<License>();
const authService = new AuthService(
globalConfig,
mock(),
mock(),
license,
jwtService,
urlService,
userRepository,
@@ -61,6 +64,7 @@ describe('AuthService', () => {
globalConfig.userManagement.jwtSessionDurationHours = 168;
globalConfig.userManagement.jwtRefreshTimeoutHours = 0;
globalConfig.auth.cookie = { secure: true, samesite: 'lax' };
license.isWithinUsersLimit.mockReturnValue(true);
});
describe('createJWTHash', () => {
@@ -520,6 +524,29 @@ describe('AuthService', () => {
});
});
describe('when user limit is reached', () => {
it('should block issuance if the user is not the global owner', async () => {
license.isWithinUsersLimit.mockReturnValue(false);
expect(() => {
authService.issueCookie(res, user, false, browserId);
}).toThrowError('Maximum number of users reached');
});
it('should allow issuance if the user is the global owner', async () => {
license.isWithinUsersLimit.mockReturnValue(false);
user.role = GLOBAL_OWNER_ROLE;
expect(() => {
authService.issueCookie(res, user, false, browserId);
}).not.toThrowError('Maximum number of users reached');
expect(res.cookie).toHaveBeenCalledWith('n8n-auth', validToken, {
httpOnly: true,
maxAge: 604800000,
sameSite: 'lax',
secure: true,
});
});
});
it('should issue a cookie with the correct options, when 2FA was used', () => {
authService.issueCookie(res, user, true, browserId);

View File

@@ -10,7 +10,6 @@ import type { NextFunction, Response } from 'express';
import { JsonWebTokenError, TokenExpiredError } from 'jsonwebtoken';
import type { StringValue as TimeUnitValue } from 'ms';
import config from '@/config';
import { AuthError } from '@/errors/response-errors/auth.error';
import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
import { License } from '@/license';
@@ -171,11 +170,7 @@ export class AuthService {
// TODO: move this check to the login endpoint in AuthController
// If the instance has exceeded its user quota, prevent non-owners from logging in
const isWithinUsersLimit = this.license.isWithinUsersLimit();
if (
config.getEnv('userManagement.isInstanceOwnerSetUp') &&
user.role.slug !== GLOBAL_OWNER_ROLE.slug &&
!isWithinUsersLimit
) {
if (user.role.slug !== GLOBAL_OWNER_ROLE.slug && !isWithinUsersLimit) {
throw new ForbiddenError(RESPONSE_ERROR_MESSAGES.USERS_QUOTA_REACHED);
}

View File

@@ -3,7 +3,6 @@ import {
User,
CredentialsRepository,
ProjectRepository,
SettingsRepository,
SharedCredentialsRepository,
SharedWorkflowRepository,
UserRepository,
@@ -19,6 +18,7 @@ const defaultUserProps = {
lastName: null,
email: null,
password: null,
lastActiveAt: null,
role: 'global:owner',
};
@@ -53,11 +53,6 @@ export class Reset extends BaseCommand {
);
await Container.get(SharedCredentialsRepository).save(newSharedCredentials);
await Container.get(SettingsRepository).update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: 'false' },
);
this.logger.info('Successfully reset the database to default user state.');
}

View File

@@ -7,10 +7,12 @@ import { Container } from '@n8n/di';
export const schema = {
userManagement: {
/**
* @important Do not remove until after cloud hooks are updated to stop using convict config.
* @important Do not remove isInstanceOwnerSetUp until after cloud hooks (user-management) are updated to stop using
* this property
* @deprecated
*/
isInstanceOwnerSetUp: {
// n8n loads this setting from DB on startup
// n8n loads this setting from SettingsRepository (DB) on startup
doc: "Whether the instance owner's account has been set up",
format: Boolean,
default: false,

View File

@@ -76,7 +76,6 @@ type ToReturnType<T extends ConfigOptionPath> = T extends NumericPath
type ExceptionPaths = {
'queue.bull.redis': RedisOptions;
processedDataManager: IProcessedDataConfig;
'userManagement.isInstanceOwnerSetUp': boolean;
'ui.banners.dismissed': string[] | undefined;
easyAIWorkflowOnboarded: boolean | undefined;
};

View File

@@ -22,6 +22,7 @@ import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
import config from '@/config';
import type { AuthlessRequest } from '@/requests';
import { v4 as uuidv4 } from 'uuid';
import { OwnershipService } from '@/services/ownership.service';
describe('InvitationController', () => {
const logger: Logger = mockInstance(Logger);
@@ -33,22 +34,29 @@ describe('InvitationController', () => {
const userRepository: UserRepository = mockInstance(UserRepository);
const postHog: PostHogClient = mockInstance(PostHogClient);
const eventService: EventService = mockInstance(EventService);
const ownershipService: OwnershipService = mockInstance(OwnershipService);
function defaultInvitationController() {
return new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
ownershipService,
);
}
describe('inviteUser', () => {
it('throws a BadRequestError if SSO is enabled', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(true);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -77,18 +85,9 @@ describe('InvitationController', () => {
it('throws a ForbiddenError if the user limit quota has been reached', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(license, 'isWithinUsersLimit').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -112,18 +111,9 @@ describe('InvitationController', () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(license, 'isWithinUsersLimit').mockReturnValue(true);
jest.spyOn(config, 'getEnv').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(false));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -148,18 +138,9 @@ describe('InvitationController', () => {
jest.spyOn(license, 'isWithinUsersLimit').mockReturnValue(true);
jest.spyOn(config, 'getEnv').mockReturnValue(true);
jest.spyOn(license, 'isAdvancedPermissionsLicensed').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -209,17 +190,9 @@ describe('InvitationController', () => {
jest.spyOn(config, 'getEnv').mockReturnValue(true);
jest.spyOn(license, 'isAdvancedPermissionsLicensed').mockReturnValue(true);
jest.spyOn(userService, 'inviteUsers').mockResolvedValue(inviteUsersResult);
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -255,19 +228,11 @@ describe('InvitationController', () => {
describe('acceptInvitation', () => {
it('throws a BadRequestError if SSO is enabled', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(true);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const id = uuidv4();
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,
@@ -291,19 +256,11 @@ describe('InvitationController', () => {
it('throws a BadRequestError if the inviter ID and invitee ID are not found in the database', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const id = uuidv4();
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,
@@ -332,6 +289,8 @@ describe('InvitationController', () => {
it('throws a BadRequestError if the invitee already has a password', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitee = mock<User>({
id: '123',
email: 'valid@email.com',
@@ -346,17 +305,7 @@ describe('InvitationController', () => {
jest.spyOn(userRepository, 'find').mockResolvedValue([inviter, invitee]);
const id = uuidv4();
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,
@@ -379,6 +328,8 @@ describe('InvitationController', () => {
it('accepts the invitation successfully', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const id = uuidv4();
const inviter = mock<User>({
id: '124',
@@ -400,17 +351,7 @@ describe('InvitationController', () => {
jest.spyOn(userService, 'toPublic').mockResolvedValue(invitee as unknown as PublicUser);
jest.spyOn(externalHooks, 'run').mockResolvedValue(invitee as never);
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,

View File

@@ -1,103 +1,40 @@
import type { DismissBannerRequestDto, OwnerSetupRequestDto } from '@n8n/api-types';
import type { Logger } from '@n8n/backend-common';
import {
type AuthenticatedRequest,
type User,
type PublicUser,
type SettingsRepository,
type UserRepository,
GLOBAL_OWNER_ROLE,
} from '@n8n/db';
import type { Response } from 'express';
import type { DismissBannerRequestDto } from '@n8n/api-types';
import { mock } from 'jest-mock-extended';
import type { AuthService } from '@/auth/auth.service';
import config from '@/config';
import { OwnerController } from '@/controllers/owner.controller';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import type { EventService } from '@/events/event.service';
import type { BannerService } from '@/services/banner.service';
import type { PasswordUtility } from '@/services/password.utility';
import type { UserService } from '@/services/user.service';
import type { OwnershipService } from '@/services/ownership.service';
import type { PostHogClient } from '@/posthog';
describe('OwnerController', () => {
const configGetSpy = jest.spyOn(config, 'getEnv');
const configSetSpy = jest.spyOn(config, 'set');
const logger = mock<Logger>();
const eventService = mock<EventService>();
const authService = mock<AuthService>();
const bannerService = mock<BannerService>();
const userService = mock<UserService>();
const userRepository = mock<UserRepository>();
const settingsRepository = mock<SettingsRepository>();
const passwordUtility = mock<PasswordUtility>();
const ownershipService = mock<OwnershipService>();
const postHogClient = mock<PostHogClient>();
const controller = new OwnerController(
logger,
eventService,
settingsRepository,
authService,
bannerService,
userService,
passwordUtility,
mock(),
userRepository,
postHogClient,
ownershipService,
);
describe('setupOwner', () => {
it('should throw a BadRequestError if the instance owner is already setup', async () => {
configGetSpy.mockReturnValue(true);
it('should pass on errors from the service', async () => {
jest
.spyOn(ownershipService, 'setupOwner')
.mockRejectedValueOnce(new BadRequestError('Instance owner already setup'));
await expect(controller.setupOwner(mock(), mock(), mock())).rejects.toThrowError(
new BadRequestError('Instance owner already setup'),
);
expect(userRepository.findOneOrFail).not.toHaveBeenCalled();
expect(userRepository.save).not.toHaveBeenCalled();
expect(authService.issueCookie).not.toHaveBeenCalled();
expect(settingsRepository.update).not.toHaveBeenCalled();
expect(configSetSpy).not.toHaveBeenCalled();
expect(eventService.emit).not.toHaveBeenCalled();
expect(logger.debug).toHaveBeenCalledWith(
'Request to claim instance ownership failed because instance owner already exists',
);
});
it('should setup the instance owner successfully', async () => {
const user = mock<User>({
id: 'userId',
role: GLOBAL_OWNER_ROLE,
authIdentities: [],
});
const browserId = 'test-browser-id';
const req = mock<AuthenticatedRequest>({ user, browserId, authInfo: { usedMfa: false } });
const res = mock<Response>();
const payload = mock<OwnerSetupRequestDto>({
email: 'valid@email.com',
password: 'NewPassword123',
firstName: 'Jane',
lastName: 'Doe',
});
configGetSpy.mockReturnValue(false);
userRepository.findOneOrFail.mockResolvedValue(user);
userRepository.save.mockResolvedValue(user);
userService.toPublic.mockResolvedValue(mock<PublicUser>({ id: 'newUserId' }));
const result = await controller.setupOwner(req, res, payload);
expect(userRepository.findOneOrFail).toHaveBeenCalledWith({
where: { role: { slug: GLOBAL_OWNER_ROLE.slug } },
relations: ['role'],
});
expect(userRepository.save).toHaveBeenCalledWith(user, { transaction: false });
expect(authService.issueCookie).toHaveBeenCalledWith(res, user, false, browserId);
expect(settingsRepository.update).toHaveBeenCalledWith(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: JSON.stringify(true) },
);
expect(configSetSpy).toHaveBeenCalledWith('userManagement.isInstanceOwnerSetUp', true);
expect(eventService.emit).toHaveBeenCalledWith('instance-owner-setup', { userId: 'userId' });
expect(result.id).toEqual('newUserId');
});
});

View File

@@ -16,7 +16,6 @@ import { Request } from 'express';
import { v4 as uuid } from 'uuid';
import { ActiveWorkflowManager } from '@/active-workflow-manager';
import config from '@/config';
import { inE2ETests } from '@/constants';
import { MessageEventBus } from '@/eventbus/message-event-bus/message-event-bus';
import type { FeatureReturnType } from '@/license';
@@ -223,8 +222,7 @@ export class E2EController {
@Get('/env-feature-flags', { skipAuth: true })
async getEnvFeatureFlags() {
const currentFlags = this.frontendService.getSettings().envFeatureFlags;
return currentFlags;
return (await this.frontendService.getSettings()).envFeatureFlags;
}
@Patch('/env-feature-flags', { skipAuth: true })
@@ -254,7 +252,7 @@ export class E2EController {
}
// Return the current environment feature flags
const currentFlags = this.frontendService.getSettings().envFeatureFlags;
const currentFlags = (await this.frontendService.getSettings()).envFeatureFlags;
return {
success: true,
message: 'Environment feature flags updated',
@@ -364,13 +362,6 @@ export class E2EController {
mfaRecoveryCodes: encryptedRecoveryCodes,
});
}
await this.settingsRepo.update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: 'true' },
);
config.set('userManagement.isInstanceOwnerSetUp', true);
}
private async resetCache() {

View File

@@ -6,7 +6,6 @@ import { Post, GlobalScope, RestController, Body, Param } from '@n8n/decorators'
import { Response } from 'express';
import { AuthService } from '@/auth/auth.service';
import config from '@/config';
import { RESPONSE_ERROR_MESSAGES } from '@/constants';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
@@ -17,6 +16,7 @@ import { PostHogClient } from '@/posthog';
import { AuthlessRequest } from '@/requests';
import { PasswordUtility } from '@/services/password.utility';
import { UserService } from '@/services/user.service';
import { OwnershipService } from '@/services/ownership.service';
import { isSsoCurrentAuthenticationMethod } from '@/sso.ee/sso-helpers';
@RestController('/invitations')
@@ -31,6 +31,7 @@ export class InvitationController {
private readonly userRepository: UserRepository,
private readonly postHog: PostHogClient,
private readonly eventService: EventService,
private readonly ownershipService: OwnershipService,
) {}
/**
@@ -64,7 +65,7 @@ export class InvitationController {
throw new ForbiddenError(RESPONSE_ERROR_MESSAGES.USERS_QUOTA_REACHED);
}
if (!config.getEnv('userManagement.isInstanceOwnerSetUp')) {
if (!(await this.ownershipService.hasInstanceOwner())) {
this.logger.debug(
'Request to send email invite(s) to user(s) failed because the owner account is not set up',
);

View File

@@ -1,47 +0,0 @@
import { shouldSkipAuthOnOAuthCallback } from '../abstract-oauth.controller';
describe('shouldSkipAuthOnOAuthCallback', () => {
const originalEnv = process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK;
afterEach(() => {
// Restore original environment variable after each test
if (originalEnv === undefined) {
delete process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK;
} else {
process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK = originalEnv;
}
});
describe('when N8N_SKIP_AUTH_ON_OAUTH_CALLBACK is not set', () => {
beforeEach(() => {
delete process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK;
});
it('should return true', () => {
expect(shouldSkipAuthOnOAuthCallback()).toBe(false);
});
});
describe('with various environment variable values', () => {
const testCases = [
{ value: 'true', expected: true },
{ value: 'TRUE', expected: true },
{ value: 'True', expected: true },
{ value: 'false', expected: false },
{ value: 'FALSE', expected: false },
{ value: 'False', expected: false },
{ value: '', expected: false },
{ value: '1', expected: false },
{ value: 'yes', expected: false },
{ value: 'on', expected: false },
{ value: 'enabled', expected: false },
{ value: ' ', expected: false },
{ value: ' true ', expected: false },
] as const;
test.each(testCases)('"%s" value should return %s', ({ value, expected }) => {
process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK = value;
expect(shouldSkipAuthOnOAuthCallback()).toBe(expected);
});
});
});

View File

@@ -1,57 +1,22 @@
import { Logger } from '@n8n/backend-common';
import { mockInstance } from '@n8n/backend-test-utils';
import { Time } from '@n8n/constants';
import type { CredentialsEntity, User } from '@n8n/db';
import { CredentialsRepository, GLOBAL_OWNER_ROLE } from '@n8n/db';
import { type CredentialsEntity, type User } from '@n8n/db';
import { Container } from '@n8n/di';
import Csrf from 'csrf';
import { mock } from 'jest-mock-extended';
import axios from 'axios';
import type { Response } from 'express';
import { captor, mock } from 'jest-mock-extended';
import { Cipher, type InstanceSettings, ExternalSecretsProxy } from 'n8n-core';
import type { IWorkflowExecuteAdditionalData } from 'n8n-workflow';
import nock from 'nock';
import { OAuth1CredentialController } from '@/controllers/oauth/oauth1-credential.controller';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { CredentialsHelper } from '@/credentials-helper';
import { VariablesService } from '@/environments.ee/variables/variables.service.ee';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { ExternalHooks } from '@/external-hooks';
import type { OAuthRequest } from '@/requests';
import * as WorkflowExecuteAdditionalData from '@/workflow-execute-additional-data';
import { OauthService } from '@/oauth/oauth.service';
import { ExternalHooks } from '@/external-hooks';
jest.mock('@/workflow-execute-additional-data');
jest.mock('axios');
describe('OAuth1CredentialController', () => {
const oauthService = mockInstance(OauthService);
mockInstance(Logger);
mockInstance(ExternalHooks);
mockInstance(ExternalSecretsProxy);
mockInstance(VariablesService, {
getAllCached: async () => [],
});
const additionalData = mock<IWorkflowExecuteAdditionalData>();
(WorkflowExecuteAdditionalData.getBase as jest.Mock).mockReturnValue(additionalData);
const cipher = new Cipher(mock<InstanceSettings>({ encryptionKey: 'password' }));
Container.set(Cipher, cipher);
const credentialsHelper = mockInstance(CredentialsHelper);
const credentialsRepository = mockInstance(CredentialsRepository);
const credentialsFinderService = mockInstance(CredentialsFinderService);
const csrfSecret = 'csrf-secret';
const user = mock<User>({
id: '123',
password: 'password',
authIdentities: [],
role: GLOBAL_OWNER_ROLE,
});
const credential = mock<CredentialsEntity>({
id: '1',
name: 'Test Credential',
type: 'oAuth1Api',
data: cipher.encrypt({}),
});
const controller = Container.get(OAuth1CredentialController);
@@ -64,64 +29,26 @@ describe('OAuth1CredentialController', () => {
});
describe('getAuthUri', () => {
it('should throw a BadRequestError when credentialId is missing in the query', async () => {
const req = mock<OAuthRequest.OAuth1Credential.Auth>({ query: { id: '' } });
await expect(controller.getAuthUri(req)).rejects.toThrowError(
new BadRequestError('Required credential ID is missing'),
);
});
it('should throw a NotFoundError when no matching credential is found for the user', async () => {
credentialsFinderService.findCredentialForUser.mockResolvedValueOnce(null);
const req = mock<OAuthRequest.OAuth1Credential.Auth>({ user, query: { id: '1' } });
await expect(controller.getAuthUri(req)).rejects.toThrowError(
new NotFoundError('Credential not found'),
);
});
it('should return a valid auth URI', async () => {
jest.spyOn(Csrf.prototype, 'secretSync').mockReturnValueOnce(csrfSecret);
jest.spyOn(Csrf.prototype, 'create').mockReturnValueOnce('token');
credentialsFinderService.findCredentialForUser.mockResolvedValueOnce(credential);
credentialsHelper.getDecrypted.mockResolvedValueOnce({});
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValueOnce({
const mockResolvedCredential = mock<CredentialsEntity>({ id: '1' });
oauthService.getCredential.mockResolvedValueOnce(mockResolvedCredential);
oauthService.createCsrfState.mockReturnValueOnce(['csrf-secret', 'state']);
oauthService.getOAuthCredentials.mockResolvedValueOnce({
requestTokenUrl: 'https://example.domain/oauth/request_token',
authUrl: 'https://example.domain/oauth/authorize',
accessTokenUrl: 'https://example.domain/oauth/access_token',
signatureMethod: 'HMAC-SHA1',
signatureMethod: 'HMAC-SHA1' as const,
});
jest.mocked(axios).request.mockResolvedValueOnce({ data: { oauth_token: 'random-token' } });
const req = mock<OAuthRequest.OAuth1Credential.Auth>({
user: mock<User>({ id: '123' }),
query: { id: '1' },
});
nock('https://example.domain')
.post('/oauth/request_token', {
oauth_callback:
'http://localhost:5678/rest/oauth1-credential/callback?state=eyJ0b2tlbiI6InRva2VuIiwiY2lkIjoiMSIsImNyZWF0ZWRBdCI6MTcwNjc1MDYyNTY3OCwidXNlcklkIjoiMTIzIn0=',
})
.once()
.reply(200, { oauth_token: 'random-token' });
const req = mock<OAuthRequest.OAuth1Credential.Auth>({ user, query: { id: '1' } });
const authUri = await controller.getAuthUri(req);
expect(authUri).toEqual('https://example.domain/oauth/authorize?oauth_token=random-token');
const dataCaptor = captor();
expect(credentialsRepository.update).toHaveBeenCalledWith(
'1',
expect.objectContaining({
data: dataCaptor,
id: '1',
name: 'Test Credential',
type: 'oAuth1Api',
}),
);
expect(cipher.decrypt(dataCaptor.value)).toEqual(
JSON.stringify({ csrfSecret: 'csrf-secret' }),
);
expect(credentialsHelper.getDecrypted).toHaveBeenCalledWith(
additionalData,
credential,
credential.type,
'internal',
undefined,
false,
expect(oauthService.encryptAndSaveData).toHaveBeenCalledWith(
mockResolvedCredential,
expect.objectContaining({ csrfSecret: 'csrf-secret' }),
);
});
});
@@ -149,113 +76,40 @@ describe('OAuth1CredentialController', () => {
invalidReq.query = { state: 'test' } as OAuthRequest.OAuth1Credential.Callback['query'];
await controller.handleCallback(invalidReq, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'Insufficient parameters for OAuth1 callback.',
reason: 'Received following query parameters: {"state":"test"}',
},
});
expect(credentialsRepository.findOneBy).not.toHaveBeenCalled();
});
it('should render the error page when `state` query param is invalid', async () => {
const invalidReq = mock<OAuthRequest.OAuth1Credential.Callback>({
query: {
oauth_verifier: 'verifier',
oauth_token: 'token',
state: 'test',
},
});
await controller.handleCallback(invalidReq, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'Invalid state format',
},
});
expect(credentialsRepository.findOneBy).not.toHaveBeenCalled();
});
it('should render the error page when credential is not found in DB', async () => {
credentialsRepository.findOneBy.mockResolvedValueOnce(null);
await controller.handleCallback(req, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'OAuth callback failed because of insufficient permissions',
},
});
expect(credentialsRepository.findOneBy).toHaveBeenCalledTimes(1);
expect(credentialsRepository.findOneBy).toHaveBeenCalledWith({ id: '1' });
});
it('should render the error page when state differs from the stored state in the credential', async () => {
credentialsRepository.findOneBy.mockResolvedValue(credential);
credentialsHelper.getDecrypted.mockResolvedValue({ csrfSecret: 'invalid' });
await controller.handleCallback(req, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'The OAuth callback state is invalid!',
},
});
});
it('should render the error page when state is older than 5 minutes', async () => {
credentialsRepository.findOneBy.mockResolvedValue(credential);
credentialsHelper.getDecrypted.mockResolvedValue({ csrfSecret });
jest.spyOn(Csrf.prototype, 'verify').mockReturnValueOnce(true);
jest.advanceTimersByTime(10 * Time.minutes.toMilliseconds);
await controller.handleCallback(req, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'The OAuth callback state is invalid!',
},
});
expect(oauthService.renderCallbackError).toHaveBeenCalledWith(
res,
'Insufficient parameters for OAuth1 callback.',
'Received following query parameters: {"state":"test"}',
);
});
it('should exchange the code for a valid token, and save it to DB', async () => {
credentialsRepository.findOneBy.mockResolvedValue(credential);
credentialsHelper.getDecrypted.mockResolvedValue({ csrfSecret });
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValueOnce({
const mockResolvedCredential = mock<CredentialsEntity>({ id: '1' });
oauthService.getCredential.mockResolvedValueOnce(mockResolvedCredential);
// @ts-ignore
oauthService.getDecryptedData.mockResolvedValue({ csrfSecret: 'invalid' });
oauthService.getOAuthCredentials.mockResolvedValueOnce({
requestTokenUrl: 'https://example.domain/oauth/request_token',
accessTokenUrl: 'https://example.domain/oauth/access_token',
signatureMethod: 'HMAC-SHA1',
});
jest.spyOn(Csrf.prototype, 'verify').mockReturnValueOnce(true);
nock('https://example.domain')
.post('/oauth/access_token', 'oauth_token=token&oauth_verifier=verifier')
.once()
.reply(200, 'access_token=new_token');
oauthService.resolveCredential.mockResolvedValueOnce([
mockResolvedCredential,
{ csrfSecret: 'invalid' },
{ accessTokenUrl: 'https://example.domain/oauth/access_token' },
]);
jest.mocked(axios).post.mockResolvedValueOnce({ data: { access_token: 'new_token' } });
await controller.handleCallback(req, res);
const dataCaptor = captor();
expect(credentialsRepository.update).toHaveBeenCalledWith(
'1',
// @ts-ignore
expect(oauthService.encryptAndSaveData).toHaveBeenCalledWith(
mockResolvedCredential,
expect.objectContaining({
data: dataCaptor,
id: '1',
name: 'Test Credential',
type: 'oAuth1Api',
oauthTokenData: { access_token: 'new_token' },
}),
);
expect(cipher.decrypt(dataCaptor.value)).toEqual(
JSON.stringify({ oauthTokenData: { access_token: 'new_token' } }),
['csrfSecret'],
);
expect(res.render).toHaveBeenCalledWith('oauth-callback');
expect(credentialsHelper.getDecrypted).toHaveBeenCalledWith(
additionalData,
credential,
credential.type,
'internal',
undefined,
true,
);
});
});
});

View File

@@ -9,7 +9,9 @@ import clientOAuth1 from 'oauth-1.0a';
import { OAuthRequest } from '@/requests';
import { AbstractOAuthController, skipAuthOnOAuthCallback } from './abstract-oauth.controller';
import { OauthService, OauthVersion, skipAuthOnOAuthCallback } from '@/oauth/oauth.service';
import { Logger } from '@n8n/backend-common';
import { ExternalHooks } from '@/external-hooks';
interface OAuth1CredentialData {
signatureMethod: 'HMAC-SHA256' | 'HMAC-SHA512' | 'HMAC-SHA1';
@@ -27,24 +29,24 @@ const algorithmMap = {
} as const;
@RestController('/oauth1-credential')
export class OAuth1CredentialController extends AbstractOAuthController {
override oauthVersion = 1;
export class OAuth1CredentialController {
constructor(
private readonly oauthService: OauthService,
private readonly externalHooks: ExternalHooks,
private readonly logger: Logger,
) {}
/** Get Authorization url */
@Get('/auth')
async getAuthUri(req: OAuthRequest.OAuth1Credential.Auth): Promise<string> {
const credential = await this.getCredential(req);
const additionalData = await this.getAdditionalData();
const decryptedDataOriginal = await this.getDecryptedDataForAuthUri(credential, additionalData);
const oauthCredentials = await this.applyDefaultsAndOverwrites<OAuth1CredentialData>(
credential,
decryptedDataOriginal,
additionalData,
);
const [csrfSecret, state] = this.createCsrfState(
credential.id,
skipAuthOnOAuthCallback ? undefined : req.user.id,
);
const credential = await this.oauthService.getCredential(req);
const oauthCredentials =
await this.oauthService.getOAuthCredentials<OAuth1CredentialData>(credential);
const [csrfSecret, state] = this.oauthService.createCsrfState({
cid: credential.id,
userId: skipAuthOnOAuthCallback ? undefined : req.user.id,
});
const signatureMethod = oauthCredentials.signatureMethod;
@@ -62,7 +64,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
};
const oauthRequestData = {
oauth_callback: `${this.baseUrl}/callback?state=${state}`,
oauth_callback: `${this.oauthService.getBaseUrl(OauthVersion.V1)}/callback?state=${state}`,
};
await this.externalHooks.run('oauth1.authenticate', [oAuthOptions, oauthRequestData]);
@@ -91,7 +93,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
const returnUri = `${oauthCredentials.authUrl}?oauth_token=${responseJson.oauth_token}`;
await this.encryptAndSaveData(credential, { csrfSecret });
await this.oauthService.encryptAndSaveData(credential, { csrfSecret });
this.logger.debug('OAuth1 authorization successful for new credential', {
userId: req.user.id,
@@ -108,7 +110,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
const { oauth_verifier, oauth_token, state: encodedState } = req.query;
if (!oauth_verifier || !oauth_token || !encodedState) {
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
'Insufficient parameters for OAuth1 callback.',
`Received following query parameters: ${JSON.stringify(req.query)}`,
@@ -116,7 +118,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
}
const [credential, _, oauthCredentials] =
await this.resolveCredential<OAuth1CredentialData>(req);
await this.oauthService.resolveCredential<OAuth1CredentialData>(req);
// Form URL encoded body https://datatracker.ietf.org/doc/html/rfc5849#section-3.5.2
const oauthToken = await axios.post<string>(
@@ -131,15 +133,18 @@ export class OAuth1CredentialController extends AbstractOAuthController {
const oauthTokenData = Object.fromEntries(paramParser.entries());
await this.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
console.log('oauthTokenData', oauthTokenData);
await this.oauthService.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
this.logger.debug('OAuth1 callback successful for new credential', {
credentialId: credential.id,
});
return res.render('oauth-callback');
} catch (e) {
console.log('error', e);
const error = ensureError(e);
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
error.message,
'body' in error ? jsonStringify(error.body) : undefined,

View File

@@ -20,44 +20,31 @@ import {
import pkceChallenge from 'pkce-challenge';
import * as qs from 'querystring';
import { AbstractOAuthController, skipAuthOnOAuthCallback } from './abstract-oauth.controller';
import {
oAuthAuthorizationServerMetadataSchema,
dynamicClientRegistrationResponseSchema,
} from './oauth2-dynamic-client-registration.schema';
import { GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE as GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE } from '@/constants';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { OAuthRequest } from '@/requests';
import { OauthService, OauthVersion, skipAuthOnOAuthCallback } from '@/oauth/oauth.service';
import { Logger } from '@n8n/backend-common';
import { ExternalHooks } from '@/external-hooks';
@RestController('/oauth2-credential')
export class OAuth2CredentialController extends AbstractOAuthController {
override oauthVersion = 2;
export class OAuth2CredentialController {
constructor(
private readonly oauthService: OauthService,
private readonly logger: Logger,
private readonly externalHooks: ExternalHooks,
) {}
/** Get Authorization url */
@Get('/auth')
async getAuthUri(req: OAuthRequest.OAuth2Credential.Auth): Promise<string> {
const credential = await this.getCredential(req);
const additionalData = await this.getAdditionalData();
const decryptedDataOriginal = await this.getDecryptedDataForAuthUri(credential, additionalData);
// At some point in the past we saved hidden scopes to credentials (but shouldn't)
// Delete scope before applying defaults to make sure new scopes are present on reconnect
// Generic Oauth2 API is an exception because it needs to save the scope
if (
decryptedDataOriginal?.scope &&
credential.type.includes('OAuth2') &&
!GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE.includes(credential.type)
) {
delete decryptedDataOriginal.scope;
}
const oauthCredentials = await this.applyDefaultsAndOverwrites<OAuth2CredentialData>(
credential,
decryptedDataOriginal,
additionalData,
);
const credential = await this.oauthService.getCredential(req);
const oauthCredentials: OAuth2CredentialData =
await this.oauthService.getOAuthCredentials<OAuth2CredentialData>(credential);
const toUpdate: ICredentialDataDecryptedObject = {};
@@ -102,7 +89,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
authentication,
);
const registerPayload = {
redirect_uris: [`${this.baseUrl}/callback`],
redirect_uris: [`${this.oauthService.getBaseUrl(OauthVersion.V2)}/callback`],
token_endpoint_auth_method,
grant_types,
response_types: ['code'],
@@ -135,10 +122,10 @@ export class OAuth2CredentialController extends AbstractOAuthController {
}
// Generate a CSRF prevention token and send it as an OAuth2 state string
const [csrfSecret, state] = this.createCsrfState(
credential.id,
skipAuthOnOAuthCallback ? undefined : req.user.id,
);
const [csrfSecret, state] = this.oauthService.createCsrfState({
cid: credential.id,
userId: skipAuthOnOAuthCallback ? undefined : req.user.id,
});
const oAuthOptions = {
...this.convertCredentialToOptions(oauthCredentials),
@@ -162,7 +149,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
toUpdate.codeVerifier = code_verifier;
}
await this.encryptAndSaveData(credential, toUpdate);
await this.oauthService.encryptAndSaveData(credential, toUpdate);
const oAuthObj = new ClientOAuth2(oAuthOptions);
const returnUri = oAuthObj.code.getUri();
@@ -181,7 +168,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
try {
const { code, state: encodedState } = req.query;
if (!code || !encodedState) {
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
'Insufficient parameters for OAuth2 callback.',
`Received following query parameters: ${JSON.stringify(req.query)}`,
@@ -189,7 +176,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
}
const [credential, decryptedDataOriginal, oauthCredentials] =
await this.resolveCredential<OAuth2CredentialData>(req);
await this.oauthService.resolveCredential<OAuth2CredentialData>(req);
let options: Partial<ClientOAuth2Options> = {};
@@ -233,7 +220,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
...oauthToken.data,
};
await this.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
await this.oauthService.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
this.logger.debug('OAuth2 callback successful for credential', {
credentialId: credential.id,
@@ -242,7 +229,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
return res.render('oauth-callback');
} catch (e) {
const error = ensureError(e);
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
error.message,
'body' in error ? jsonStringify(error.body) : undefined,
@@ -257,7 +244,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
accessTokenUri: credential.accessTokenUrl ?? '',
authorizationUri: credential.authUrl ?? '',
authentication: credential.authentication ?? 'header',
redirectUri: `${this.baseUrl}/callback`,
redirectUri: `${this.oauthService.getBaseUrl(OauthVersion.V2)}/callback`,
scopes: split(credential.scope ?? 'openid', ','),
scopesSeparator: credential.scope?.includes(',') ? ',' : ' ',
ignoreSSLIssues: credential.ignoreSSLIssues ?? false,

View File

@@ -1,82 +1,31 @@
import { DismissBannerRequestDto, OwnerSetupRequestDto } from '@n8n/api-types';
import { Logger } from '@n8n/backend-common';
import {
AuthenticatedRequest,
GLOBAL_OWNER_ROLE,
SettingsRepository,
UserRepository,
} from '@n8n/db';
import { AuthenticatedRequest } from '@n8n/db';
import { Body, GlobalScope, Post, RestController } from '@n8n/decorators';
import { Response } from 'express';
import { AuthService } from '@/auth/auth.service';
import config from '@/config';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { EventService } from '@/events/event.service';
import { validateEntity } from '@/generic-helpers';
import { PostHogClient } from '@/posthog';
import { BannerService } from '@/services/banner.service';
import { PasswordUtility } from '@/services/password.utility';
import { UserService } from '@/services/user.service';
import { OwnershipService } from '@/services/ownership.service';
@RestController('/owner')
export class OwnerController {
constructor(
private readonly logger: Logger,
private readonly eventService: EventService,
private readonly settingsRepository: SettingsRepository,
private readonly authService: AuthService,
private readonly bannerService: BannerService,
private readonly userService: UserService,
private readonly passwordUtility: PasswordUtility,
private readonly postHog: PostHogClient,
private readonly userRepository: UserRepository,
private readonly ownershipService: OwnershipService,
) {}
/**
* Promote a shell into the owner of the n8n instance,
* and enable `isInstanceOwnerSetUp` setting.
* Promote a shell into the owner of the n8n instance
*/
@Post('/setup', { skipAuth: true })
async setupOwner(req: AuthenticatedRequest, res: Response, @Body payload: OwnerSetupRequestDto) {
const { email, firstName, lastName, password } = payload;
if (config.getEnv('userManagement.isInstanceOwnerSetUp')) {
this.logger.debug(
'Request to claim instance ownership failed because instance owner already exists',
);
throw new BadRequestError('Instance owner already setup');
}
let owner = await this.userRepository.findOneOrFail({
where: { role: { slug: GLOBAL_OWNER_ROLE.slug } },
relations: ['role'],
});
owner.email = email;
owner.firstName = firstName;
owner.lastName = lastName;
owner.password = await this.passwordUtility.hash(password);
// TODO: move XSS validation out into the DTO class
await validateEntity(owner);
owner = await this.userRepository.save(owner, { transaction: false });
this.logger.info('Owner was set up successfully');
await this.settingsRepository.update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: JSON.stringify(true) },
);
config.set('userManagement.isInstanceOwnerSetUp', true);
this.logger.debug('Setting isInstanceOwnerSetUp updated successfully');
const owner = await this.ownershipService.setupOwner(payload);
this.authService.issueCookie(res, owner, req.authInfo?.usedMfa ?? false, req.browserId);
this.eventService.emit('instance-owner-setup', { userId: owner.id });
return await this.userService.toPublic(owner, { posthog: this.postHog, withScopes: true });
}

View File

@@ -1,14 +1,15 @@
import type { LicenseState } from '@n8n/backend-common';
import type { AuthenticatedRequest, SharedCredentialsRepository, CredentialsEntity } from '@n8n/db';
import { GLOBAL_OWNER_ROLE, GLOBAL_MEMBER_ROLE } from '@n8n/db';
import { mock } from 'jest-mock-extended';
import { createRawProjectData } from '@/__tests__/project.test-data';
import type { EventService } from '@/events/event.service';
import { createdCredentialsWithScopes, createNewCredentialsPayload } from './credentials.test-data';
import type { CredentialsFinderService } from '../credentials-finder.service';
import { CredentialsController } from '../credentials.controller';
import type { CredentialsService } from '../credentials.service';
import type { CredentialsFinderService } from '../credentials-finder.service';
import { createRawProjectData } from '@/__tests__/project.test-data';
import type { EventService } from '@/events/event.service';
import type { CredentialRequest } from '@/requests';
describe('CredentialsController', () => {
@@ -16,13 +17,14 @@ describe('CredentialsController', () => {
const credentialsService = mock<CredentialsService>();
const sharedCredentialsRepository = mock<SharedCredentialsRepository>();
const credentialsFinderService = mock<CredentialsFinderService>();
const licenseState = mock<LicenseState>();
const credentialsController = new CredentialsController(
mock(),
credentialsService,
mock(),
mock(),
mock(),
licenseState,
mock(),
mock(),
sharedCredentialsRepository,
@@ -126,7 +128,7 @@ describe('CredentialsController', () => {
] as any);
});
it('should allow owner to set isGlobal to true', async () => {
it('should not allow owner to set isGlobal to true if not licensed', async () => {
// ARRANGE
const ownerReq = {
user: { id: 'owner-id', role: GLOBAL_OWNER_ROLE },
@@ -139,6 +141,34 @@ describe('CredentialsController', () => {
},
} as unknown as CredentialRequest.Update;
licenseState.isSharingLicensed.mockReturnValue(false);
credentialsFinderService.findCredentialForUser.mockResolvedValue(existingCredential);
// ACT
await expect(credentialsController.updateCredentials(ownerReq)).rejects.toThrowError(
'You are not licensed for sharing credentials',
);
// ASSERT
expect(credentialsService.update).not.toHaveBeenCalled();
});
it('should allow owner to set isGlobal to true if licensed', async () => {
// ARRANGE
const ownerReq = {
user: { id: 'owner-id', role: GLOBAL_OWNER_ROLE },
params: { credentialId },
body: {
name: 'Updated Credential',
type: 'apiKey',
data: { apiKey: 'updated-key' },
isGlobal: true,
},
} as unknown as CredentialRequest.Update;
licenseState.isSharingLicensed.mockReturnValue(true);
credentialsFinderService.findCredentialForUser.mockResolvedValue(existingCredential);
credentialsService.update.mockResolvedValue({
...existingCredential,
@@ -163,7 +193,7 @@ describe('CredentialsController', () => {
});
});
it('should allow owner to set isGlobal to false', async () => {
it('should allow owner to set isGlobal to false if licensed', async () => {
// ARRANGE
const globalCredential = mock<CredentialsEntity>({
...existingCredential,
@@ -180,6 +210,8 @@ describe('CredentialsController', () => {
},
} as unknown as CredentialRequest.Update;
licenseState.isSharingLicensed.mockReturnValue(true);
credentialsFinderService.findCredentialForUser.mockResolvedValue(globalCredential);
credentialsService.update.mockResolvedValue({
...globalCredential,
@@ -198,7 +230,7 @@ describe('CredentialsController', () => {
);
});
it('should prevent non-owner from changing isGlobal', async () => {
it('should prevent non-owner from changing isGlobal if licensed', async () => {
// ARRANGE
const memberReq = {
user: { id: 'member-id', role: GLOBAL_MEMBER_ROLE },
@@ -211,6 +243,8 @@ describe('CredentialsController', () => {
},
} as unknown as CredentialRequest.Update;
licenseState.isSharingLicensed.mockReturnValue(true);
credentialsFinderService.findCredentialForUser.mockResolvedValue(existingCredential);
// ACT
@@ -235,6 +269,8 @@ describe('CredentialsController', () => {
},
} as unknown as CredentialRequest.Update;
licenseState.isSharingLicensed.mockReturnValue(true);
credentialsFinderService.findCredentialForUser.mockResolvedValue({
...existingCredential,
isGlobal: true,

View File

@@ -4,7 +4,7 @@ import {
CredentialsGetOneRequestQuery,
GenerateCredentialNameRequestQuery,
} from '@n8n/api-types';
import { Logger } from '@n8n/backend-common';
import { LicenseState, Logger } from '@n8n/backend-common';
import { GlobalConfig } from '@n8n/config';
import {
SharedCredentials,
@@ -40,7 +40,6 @@ import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { EventService } from '@/events/event.service';
import { License } from '@/license';
import { listQueryMiddleware } from '@/middlewares';
import { CredentialRequest } from '@/requests';
import { NamingService } from '@/services/naming.service';
@@ -54,7 +53,7 @@ export class CredentialsController {
private readonly credentialsService: CredentialsService,
private readonly enterpriseCredentialsService: EnterpriseCredentialsService,
private readonly namingService: NamingService,
private readonly license: License,
private readonly licenseState: LicenseState,
private readonly logger: Logger,
private readonly userManagementMailer: UserManagementMailer,
private readonly sharedCredentialsRepository: SharedCredentialsRepository,
@@ -114,7 +113,7 @@ export class CredentialsController {
@Param('credentialId') credentialId: string,
@Query query: CredentialsGetOneRequestQuery,
) {
const { shared, ...credential } = this.license.isSharingEnabled()
const { shared, ...credential } = this.licenseState.isSharingLicensed()
? await this.enterpriseCredentialsService.getOne(
req.user,
credentialId,
@@ -246,6 +245,10 @@ export class CredentialsController {
// Update isGlobal if provided in the payload and user has permission
const isGlobal = body.isGlobal;
if (isGlobal !== undefined && isGlobal !== credential.isGlobal) {
if (!this.licenseState.isSharingLicensed()) {
throw new ForbiddenError('You are not licensed for sharing credentials');
}
const canShareGlobally = hasGlobalScope(req.user, 'credential:shareGlobally');
if (!canShareGlobally) {
throw new ForbiddenError(

View File

@@ -0,0 +1,10 @@
import { UserError } from 'n8n-workflow';
export class SingleWebhookTriggerError extends UserError {
constructor(triggerName: string) {
super(
`Because of limitations in ${triggerName}, n8n can't listen for test executions at the same time as listening for production ones. Unpublish the workflow to execute.`,
{ extra: { triggerName } },
);
}
}

View File

@@ -1,16 +1,20 @@
import type { InstanceSettingsConfig } from '@n8n/config';
import type { GlobalConfig } from '@n8n/config';
import { mock } from 'jest-mock-extended';
import { SettingsFilePermissionsRule } from '../settings-file-permissions.rule';
describe('SettingsFilePermissionsRule', () => {
let rule: SettingsFilePermissionsRule;
const instanceSettingsConfig = mock<InstanceSettingsConfig>({});
const mockGlobalConfig = mock<GlobalConfig>({
deployment: { type: 'default' },
});
let originalEnvValue: string | undefined;
beforeEach(() => {
rule = new SettingsFilePermissionsRule(instanceSettingsConfig);
rule = new SettingsFilePermissionsRule(mockGlobalConfig);
originalEnvValue = process.env.N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS;
// Clear env var before each test
delete process.env.N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS;
});
afterEach(() => {
@@ -22,8 +26,21 @@ describe('SettingsFilePermissionsRule', () => {
});
describe('detect()', () => {
it('should not be affected when enforceSettingsFilePermissions is set to false', async () => {
instanceSettingsConfig.enforceSettingsFilePermissions = false;
it('should not be affected on cloud deployments', async () => {
const cloudGlobalConfig = mock<GlobalConfig>({
deployment: { type: 'cloud' },
});
const cloudRule = new SettingsFilePermissionsRule(cloudGlobalConfig);
const result = await cloudRule.detect();
expect(result.isAffected).toBe(false);
expect(result.instanceIssues).toHaveLength(0);
expect(result.recommendations).toHaveLength(0);
});
it('should not be affected when N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS is explicitly set to false', async () => {
process.env.N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS = 'false';
const result = await rule.detect();
@@ -32,8 +49,18 @@ describe('SettingsFilePermissionsRule', () => {
expect(result.recommendations).toHaveLength(0);
});
it('should be affected when enforceSettingsFilePermissions is not set to false', async () => {
instanceSettingsConfig.enforceSettingsFilePermissions = true;
it('should not be affected when N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS is explicitly set to true', async () => {
process.env.N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS = 'true';
const result = await rule.detect();
expect(result.isAffected).toBe(false);
expect(result.instanceIssues).toHaveLength(0);
expect(result.recommendations).toHaveLength(0);
});
it('should be affected when N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS is not set (default behavior change)', async () => {
// Env var is not set (cleared in beforeEach)
const result = await rule.detect();
expect(result.isAffected).toBe(true);

View File

@@ -1,10 +1,16 @@
import type { GlobalConfig } from '@n8n/config';
import { mock } from 'jest-mock-extended';
import { TaskRunnerDockerImageRule } from '../task-runner-docker-image.rule';
describe('TaskRunnerDockerImageRule', () => {
let rule: TaskRunnerDockerImageRule;
beforeEach(() => {
rule = new TaskRunnerDockerImageRule();
const mockGlobalConfig = mock<GlobalConfig>({
deployment: { type: 'default' },
});
rule = new TaskRunnerDockerImageRule(mockGlobalConfig);
});
describe('getMetadata()', () => {
@@ -18,6 +24,19 @@ describe('TaskRunnerDockerImageRule', () => {
});
describe('detect()', () => {
it('should not be affected on cloud deployments', async () => {
const mockGlobalConfig = mock<GlobalConfig>({
deployment: { type: 'cloud' },
});
const cloudRule = new TaskRunnerDockerImageRule(mockGlobalConfig);
const result = await cloudRule.detect();
expect(result.isAffected).toBe(false);
expect(result.instanceIssues).toHaveLength(0);
expect(result.recommendations).toHaveLength(0);
});
it('should always be affected (informational)', async () => {
const result = await rule.detect();

View File

@@ -1,12 +1,35 @@
import type { TaskRunnersConfig } from '@n8n/config';
import type { GlobalConfig, TaskRunnersConfig } from '@n8n/config';
import { mock } from 'jest-mock-extended';
import { TaskRunnersRule } from '../task-runners.rule';
describe('TaskRunnersRule', () => {
let mockGlobalConfig: GlobalConfig;
beforeEach(() => {
mockGlobalConfig = mock<GlobalConfig>({
deployment: { type: 'default' },
});
});
describe('detect()', () => {
it('should not be affected on cloud deployments', async () => {
const mockConfig = { enabled: false } as TaskRunnersConfig;
const cloudGlobalConfig = mock<GlobalConfig>({
deployment: { type: 'cloud' },
});
const rule = new TaskRunnersRule(mockConfig, cloudGlobalConfig);
const result = await rule.detect();
expect(result.isAffected).toBe(false);
expect(result.instanceIssues).toHaveLength(0);
expect(result.recommendations).toHaveLength(0);
});
it('should not be affected when runners are already enabled', async () => {
const mockConfig = { enabled: true } as TaskRunnersConfig;
const rule = new TaskRunnersRule(mockConfig);
const rule = new TaskRunnersRule(mockConfig, mockGlobalConfig);
const result = await rule.detect();
@@ -16,18 +39,19 @@ describe('TaskRunnersRule', () => {
it('should be affected when runners are not enabled', async () => {
const mockConfig = { enabled: false } as TaskRunnersConfig;
const rule = new TaskRunnersRule(mockConfig);
const rule = new TaskRunnersRule(mockConfig, mockGlobalConfig);
const result = await rule.detect();
expect(result.isAffected).toBe(true);
expect(result.instanceIssues).toHaveLength(1);
expect(result.instanceIssues[0].title).toBe('Task Runners will be enabled by default');
expect(result.recommendations).toHaveLength(3);
});
it('should be affected when runners are explicitly disabled', async () => {
const mockConfig = { enabled: false } as TaskRunnersConfig;
const rule = new TaskRunnersRule(mockConfig);
const rule = new TaskRunnersRule(mockConfig, mockGlobalConfig);
const result = await rule.detect();

View File

@@ -1,4 +1,4 @@
import { InstanceSettingsConfig } from '@n8n/config';
import { GlobalConfig } from '@n8n/config';
import { Service } from '@n8n/di';
import type {
@@ -10,7 +10,7 @@ import { BreakingChangeCategory } from '../../types';
@Service()
export class SettingsFilePermissionsRule implements IBreakingChangeInstanceRule {
constructor(private readonly instanceSettingsConfig: InstanceSettingsConfig) {}
constructor(private readonly globalConfig: GlobalConfig) {}
id: string = 'settings-file-permissions-v2';
@@ -28,9 +28,18 @@ export class SettingsFilePermissionsRule implements IBreakingChangeInstanceRule
}
async detect(): Promise<InstanceDetectionReport> {
// If enforceSettingsFilePermissions is explicitly set to 'false', users are not affected
// because they've configured the system to not enforce file permissions
if (!this.instanceSettingsConfig.enforceSettingsFilePermissions) {
// Not relevant for cloud deployments - cloud manages infrastructure and file permissions
if (this.globalConfig.deployment.type === 'cloud') {
return {
isAffected: false,
instanceIssues: [],
recommendations: [],
};
}
// If N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS is explicitly set to any value, users are not affected
// because they've already handled the configuration and are aware of this setting.
if (process.env.N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS) {
return {
isAffected: false,
instanceIssues: [],

View File

@@ -1,3 +1,4 @@
import { GlobalConfig } from '@n8n/config';
import { Service } from '@n8n/di';
import type {
@@ -9,6 +10,8 @@ import { BreakingChangeCategory } from '../../types';
@Service()
export class TaskRunnerDockerImageRule implements IBreakingChangeInstanceRule {
constructor(private readonly globalConfig: GlobalConfig) {}
id: string = 'task-runner-docker-image-v2';
getMetadata(): BreakingChangeRuleMetadata {
@@ -25,6 +28,15 @@ export class TaskRunnerDockerImageRule implements IBreakingChangeInstanceRule {
}
async detect(): Promise<InstanceDetectionReport> {
// Not relevant for cloud deployments - cloud manages Docker images
if (this.globalConfig.deployment.type === 'cloud') {
return {
isAffected: false,
instanceIssues: [],
recommendations: [],
};
}
const result: InstanceDetectionReport = {
isAffected: true,
instanceIssues: [

View File

@@ -1,4 +1,4 @@
import { TaskRunnersConfig } from '@n8n/config';
import { GlobalConfig, TaskRunnersConfig } from '@n8n/config';
import { Service } from '@n8n/di';
import type {
@@ -10,7 +10,10 @@ import { BreakingChangeCategory } from '../../types';
@Service()
export class TaskRunnersRule implements IBreakingChangeInstanceRule {
constructor(private readonly taskRunnersConfig: TaskRunnersConfig) {}
constructor(
private readonly taskRunnersConfig: TaskRunnersConfig,
private readonly globalConfig: GlobalConfig,
) {}
id: string = 'task-runners-v2';
@@ -27,6 +30,15 @@ export class TaskRunnersRule implements IBreakingChangeInstanceRule {
}
async detect(): Promise<InstanceDetectionReport> {
// Not relevant for cloud deployments - cloud manages task runner infrastructure
if (this.globalConfig.deployment.type === 'cloud') {
return {
isAffected: false,
instanceIssues: [],
recommendations: [],
};
}
const result: InstanceDetectionReport = {
isAffected: false,
instanceIssues: [],

Some files were not shown because too many files have changed in this diff Show More