Merge branch 'master' into ds-298-replace-old-tooltip-component

This commit is contained in:
Csaba Tuncsik
2025-12-05 22:37:09 +02:00
committed by GitHub
207 changed files with 7876 additions and 6032 deletions

View File

@@ -43,7 +43,7 @@ jobs:
pnpm add --global wrangler
- name: Deploy
uses: cloudflare/wrangler-action@da0e0dfe58b7a431659754fdf3f186c529afbe65
uses: cloudflare/wrangler-action@707f63750981584eb6abc365a50d441516fb04b8
id: cloudflare_deployment
with:
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}

View File

@@ -104,21 +104,29 @@ The Langsmith integration provides two key components:
#### 6. Pairwise Evaluation
Pairwise evaluation provides a simpler, criteria-based approach to workflow evaluation. Instead of using the complex multi-metric evaluation system, it evaluates workflows against a custom set of "do" and "don't" rules defined in the dataset.
Pairwise evaluation provides a criteria-based approach to workflow evaluation with hierarchical scoring and multi-judge consensus. It evaluates workflows against a custom set of "do" and "don't" rules defined in the dataset.
**Evaluator (`chains/pairwise-evaluator.ts`):**
- Evaluates workflows against a checklist of criteria (dos and don'ts)
- Uses an LLM to determine if each criterion passes or fails
- Requires evidence-based justification for each decision
- Calculates a simple pass/fail score (passes / total rules)
- Returns `primaryPass` (true only if ALL criteria pass) and `diagnosticScore` (ratio of passes)
**Runner (`langsmith/pairwise-runner.ts`):**
- Generates workflows from prompts in the dataset
- Applies pairwise evaluation to each generated workflow
- Reports three metrics to Langsmith:
- `pairwise_score`: Overall score (0-1)
- `pairwise_passed_count`: Number of criteria passed
- `pairwise_failed_count`: Number of criteria violated
- Runs multiple LLM judges in parallel for each evaluation (configurable via `--judges`)
- Aggregates judge results using majority vote
- Supports filtering by `notion_id` metadata for single-example runs
- Reports five metrics to Langsmith:
- `pairwise_primary`: Majority vote result (0 or 1)
- `pairwise_diagnostic`: Average diagnostic score across judges
- `pairwise_judges_passed`: Count of judges that passed
- `pairwise_total_violations`: Sum of all violations
- `pairwise_total_passes`: Sum of all passes
**Logger (`utils/logger.ts`):**
- Simple evaluation logger with verbose mode support
- Controls output verbosity via `--verbose` flag
**Dataset Format:**
The pairwise evaluation expects a Langsmith dataset with examples containing:
@@ -217,6 +225,9 @@ GENERATE_TEST_CASES=true pnpm eval
# With custom concurrency
EVALUATION_CONCURRENCY=10 pnpm eval
# With feature flags enabled
pnpm eval --multi-agent --template-examples
```
### Langsmith Evaluation
@@ -229,11 +240,59 @@ export LANGSMITH_DATASET_NAME=your_dataset_name
# Run evaluation
pnpm eval:langsmith
# With feature flags enabled
pnpm eval:langsmith --multi-agent
```
### Pairwise Evaluation
Pairwise evaluation uses a dataset with custom do/don't criteria for each prompt.
Pairwise evaluation uses a dataset with custom do/don't criteria for each prompt. It implements a hierarchical scoring system with multiple LLM judges per evaluation.
#### CLI Options
| Option | Description | Default |
|--------|-------------|---------|
| `--prompt <text>` | Run local evaluation with this prompt (no LangSmith required) | - |
| `--dos <rules>` | Newline-separated "do" rules for local evaluation | - |
| `--donts <rules>` | Newline-separated "don't" rules for local evaluation | - |
| `--notion-id <id>` | Filter to a single example by its `notion_id` metadata | (all examples) |
| `--max-examples <n>` | Limit number of examples to evaluate (useful for testing) | (no limit) |
| `--repetitions <n>` | Number of times to repeat the entire evaluation | 1 |
| `--generations <n>` | Number of workflow generations per prompt (for variance reduction) | 1 |
| `--judges <n>` | Number of LLM judges per evaluation | 3 |
| `--concurrency <n>` | Number of prompts to evaluate in parallel | 5 |
| `--name <name>` | Custom experiment name in LangSmith | `pairwise-evals` |
| `--output-dir <path>` | Save generated workflows and evaluation results to this directory | - |
| `--verbose`, `-v` | Enable verbose logging (shows judge details, violations, etc.) | false |
| `--multi-agent` | Enable multi-agent architecture (see [Feature Flags](#feature-flags)) | false |
| `--template-examples` | Enable template-based examples (see [Feature Flags](#feature-flags)) | false |
#### Local Mode (No LangSmith Required)
Run a single pairwise evaluation locally without needing a LangSmith account:
```bash
# Basic local evaluation
pnpm eval:pairwise --prompt "Create a workflow that sends Slack messages" --dos "Use Slack node"
# With don'ts and multiple judges
pnpm eval:pairwise \
--prompt "Create a workflow that fetches data from an API" \
--dos "Use HTTP Request node\nHandle errors" \
--donts "Don't hardcode URLs" \
--judges 5 \
--verbose
```
Local mode is useful for:
- Testing prompts before adding them to a dataset
- Quick iteration on evaluation criteria
- Running evaluations without LangSmith setup
#### LangSmith Mode
For dataset-based evaluation with experiment tracking:
```bash
# Set required environment variables
@@ -242,14 +301,104 @@ export LANGSMITH_API_KEY=your_api_key
# Run pairwise evaluation (uses default dataset: notion-pairwise-workflows)
pnpm eval:pairwise
# Run a single example by notion_id
pnpm eval:pairwise --notion-id 30d29454-b397-4a35-8e0b-74a2302fa81a
# Run with 3 repetitions and 5 judges, custom experiment name
pnpm eval:pairwise --repetitions 3 --judges 5 --name "my-experiment"
# Enable verbose logging to see all judge details
pnpm eval:pairwise --notion-id abc123 --verbose
# Use a custom dataset
LANGSMITH_DATASET_NAME=my-pairwise-dataset pnpm eval:pairwise
# Limit to specific number of examples (useful for testing)
EVAL_MAX_EXAMPLES=2 pnpm eval:pairwise
pnpm eval:pairwise --max-examples 2
```
# Run with multiple repetitions
pnpm eval:pairwise --repetitions 3
#### Multi-Generation Evaluation
The `--generations` flag enables multiple workflow generations per prompt, providing a **Generation Correctness** metric:
```bash
# Run 3 generations per prompt with 3 judges each
pnpm eval:pairwise --generations 3 --judges 3 --verbose
# Example output:
# Gen 1: 2/3 judges → ✓ PASS (diag=85%)
# Gen 2: 1/3 judges → ✗ FAIL (diag=60%)
# Gen 3: 3/3 judges → ✓ PASS (diag=95%)
# 📊 [#1] 2/3 gens → PASS (gen_corr=0.67, diag=80%)
```
**Generation Correctness** = (# passing generations) / total generations:
- With `--generations 3`: Values are 0, 0.33, 0.67, or 1
- With `--generations 5`: Values are 0, 0.2, 0.4, 0.6, 0.8, or 1
#### Hierarchical Scoring System
The pairwise evaluation uses a multi-level scoring hierarchy:
| Level | Primary Score | Secondary Score |
|-------|--------------|-----------------|
| Individual do/don't | Binary (true/false) | 0 or 1 |
| 1 LLM judge | false if ANY criterion fails | Average of criteria scores |
| N judges on 1 generation | Majority vote (≥50% pass) | Average diagnostic across judges |
| N generations on 1 prompt | (# passing gens) / N | Average diagnostic across generations |
| Full dataset | Average across prompts | Average diagnostic across all |
This approach reduces variance from LLM non-determinism by using multiple judges and generations.
#### Saving Artifacts with --output-dir
The `--output-dir` flag saves all generated workflows and evaluation results to disk:
```bash
# Save artifacts to ./eval-output directory
pnpm eval:pairwise --generations 3 --output-dir ./eval-output --verbose
```
**Output structure:**
```
eval-output/
├── prompt-1/
│ ├── prompt.txt # Original prompt text
│ ├── criteria.json # dos/donts criteria
│ ├── gen-1/
│ │ ├── workflow.json # Importable n8n workflow
│ │ └── evaluation.json # Judge results for this generation
│ ├── gen-2/
│ │ ├── workflow.json
│ │ └── evaluation.json
│ └── gen-3/
│ ├── workflow.json
│ └── evaluation.json
├── prompt-2/
│ └── ...
└── summary.json # Overall results summary
```
**workflow.json**: Directly importable into n8n (File → Import from file)
**evaluation.json**: Contains per-judge results including violations and passes:
```json
{
"generationIndex": 1,
"majorityPass": false,
"primaryPasses": 1,
"numJudges": 3,
"diagnosticScore": 0.35,
"judges": [
{
"judgeIndex": 1,
"primaryPass": false,
"diagnosticScore": 0.30,
"violations": [{"rule": "...", "justification": "..."}],
"passes": [{"rule": "...", "justification": "..."}]
}
]
}
```
## Configuration
@@ -282,10 +431,77 @@ The evaluation will fail with a clear error message if `nodes.json` is missing.
- `USE_LANGSMITH_EVAL` - Set to "true" to use Langsmith mode
- `USE_PAIRWISE_EVAL` - Set to "true" to use pairwise evaluation mode
- `LANGSMITH_DATASET_NAME` - Override default dataset name
- `EVAL_MAX_EXAMPLES` - Limit number of examples to evaluate (useful for testing)
- `EVALUATION_CONCURRENCY` - Number of parallel test executions (default: 5)
- `GENERATE_TEST_CASES` - Set to "true" to generate additional test cases
- `LLM_MODEL` - Model identifier for metadata tracking
- `EVAL_FEATURE_MULTI_AGENT` - Set to "true" to enable multi-agent mode
- `EVAL_FEATURE_TEMPLATE_EXAMPLES` - Set to "true" to enable template examples
### Feature Flags
Feature flags control experimental or optional behaviors in the AI Workflow Builder agent during evaluations. They can be set via environment variables or CLI arguments.
#### Available Flags
| Flag | Description | Default |
|------|-------------|---------|
| `multiAgent` | Enables multi-agent architecture with specialized sub-agents (supervisor, builder, configurator, discovery) | `false` |
| `templateExamples` | Enables template-based examples in agent prompts | `false` |
#### Setting Feature Flags
**Via Environment Variables:**
```bash
# Enable multi-agent mode
EVAL_FEATURE_MULTI_AGENT=true pnpm eval
# Enable template examples
EVAL_FEATURE_TEMPLATE_EXAMPLES=true pnpm eval:pairwise
# Enable both
EVAL_FEATURE_MULTI_AGENT=true EVAL_FEATURE_TEMPLATE_EXAMPLES=true pnpm eval:langsmith
```
**Via CLI Arguments:**
```bash
# Enable multi-agent mode
pnpm eval --multi-agent
# Enable template examples
pnpm eval:pairwise --template-examples
# Enable both
pnpm eval:langsmith --multi-agent --template-examples
```
#### Usage Across Evaluation Modes
Feature flags work consistently across all evaluation modes:
**CLI Evaluation:**
```bash
pnpm eval --multi-agent --template-examples
```
**Langsmith Evaluation:**
```bash
pnpm eval:langsmith --multi-agent
```
**Pairwise Evaluation (LangSmith mode):**
```bash
pnpm eval:pairwise --multi-agent --template-examples
```
**Pairwise Evaluation (Local mode):**
```bash
pnpm eval:pairwise --prompt "Create a Slack workflow" --dos "Use Slack node" --multi-agent
```
When feature flags are enabled, they are logged at the start of the evaluation:
```
➔ Feature flags enabled: multiAgent, templateExamples
```
## Output
@@ -304,14 +520,22 @@ The evaluation will fail with a clear error message if `nodes.json` is missing.
### Pairwise Evaluation Output
- Results are stored in Langsmith dashboard
- Experiment name format: `pairwise-evals-[uuid]`
- Metrics reported:
- `pairwise_score`: Overall pass rate (0-1)
- `pairwise_passed_count`: Number of criteria that passed
- `pairwise_failed_count`: Number of criteria that were violated
- Experiment name format: `<name>-[uuid]` (default: `pairwise-evals-[uuid]`)
- Metrics reported (single generation mode):
- `pairwise_primary`: Binary pass/fail based on majority vote (0 or 1)
- `pairwise_diagnostic`: Average diagnostic score across judges (0-1)
- `pairwise_judges_passed`: Number of judges that returned primaryPass=true
- `pairwise_total_violations`: Sum of violations across all judges
- `pairwise_total_passes`: Sum of passes across all judges
- Additional metrics reported (multi-generation mode with `--generations N`):
- `pairwise_generation_correctness`: (# passing generations) / N (0, 0.33, 0.67, 1 for N=3)
- `pairwise_aggregated_diagnostic`: Average diagnostic score across all generations
- `pairwise_generations_passed`: Count of generations that passed majority vote
- `pairwise_total_judge_calls`: Total judge invocations (generations × judges)
- Each result includes detailed comments with:
- List of violations with justifications
- List of passes with justifications
- Majority vote summary
- List of violations with justifications (per judge)
- List of passes (per judge)
## Adding New Test Cases

View File

@@ -52,7 +52,8 @@ describe('evaluateWorkflowPairwise', () => {
expect(result).toEqual({
...mockResult,
score: 1,
primaryPass: true,
diagnosticScore: 1,
});
expect(baseEvaluator.createEvaluatorChain).toHaveBeenCalledWith(
mockLlm,
@@ -69,7 +70,7 @@ describe('evaluateWorkflowPairwise', () => {
);
});
it('should calculate score correctly with violations', async () => {
it('should calculate diagnosticScore correctly with violations', async () => {
const mockResult = {
violations: [{ rule: "Don't do that", justification: 'Did it' }],
passes: [{ rule: 'Do this', justification: 'Done' }],
@@ -79,10 +80,11 @@ describe('evaluateWorkflowPairwise', () => {
const result = await evaluateWorkflowPairwise(mockLlm, input);
expect(result.score).toBe(0.5);
expect(result.primaryPass).toBe(false);
expect(result.diagnosticScore).toBe(0.5);
});
it('should return score 0 when no rules evaluated', async () => {
it('should return diagnosticScore 0 when no rules evaluated', async () => {
const mockResult = {
violations: [],
passes: [],
@@ -92,6 +94,7 @@ describe('evaluateWorkflowPairwise', () => {
const result = await evaluateWorkflowPairwise(mockLlm, input);
expect(result.score).toBe(0);
expect(result.primaryPass).toBe(true);
expect(result.diagnosticScore).toBe(0);
});
});

View File

@@ -32,7 +32,10 @@ const pairwiseEvaluationLLMResultSchema = z.object({
});
export type PairwiseEvaluationResult = z.infer<typeof pairwiseEvaluationLLMResultSchema> & {
score: number;
/** True only if ALL criteria passed (no violations) */
primaryPass: boolean;
/** Ratio of passed criteria to total criteria (0-1) */
diagnosticScore: number;
};
const EVALUATOR_SYSTEM_PROMPT = `You are an expert n8n workflow auditor. Your task is to strictly evaluate a candidate workflow against a provided set of requirements.
@@ -96,10 +99,12 @@ export async function evaluateWorkflowPairwise(
});
const totalRules = result.passes.length + result.violations.length;
const score = totalRules > 0 ? result.passes.length / totalRules : 0;
const diagnosticScore = totalRules > 0 ? result.passes.length / totalRules : 0;
const primaryPass = result.violations.length === 0;
return {
...result,
score,
primaryPass,
diagnosticScore,
};
}

View File

@@ -2,6 +2,7 @@ import pLimit from 'p-limit';
import pc from 'picocolors';
import { createProgressBar, updateProgress, displayResults, displayError } from './display.js';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent.js';
import { basicTestCases, generateTestCases } from '../chains/test-case-generator.js';
import {
setupTestEnvironment,
@@ -25,6 +26,7 @@ type CliEvaluationOptions = {
testCaseFilter?: string; // Optional test case ID to run only a specific test
testCases?: TestCase[]; // Optional array of test cases to run (if not provided, uses defaults and generation)
repetitions?: number; // Number of times to run each test (e.g. for cache warming analysis)
featureFlags?: BuilderFeatureFlags; // Optional feature flags to pass to the agent (e.g. templateExamples, multiAgent)
};
/**
@@ -32,12 +34,20 @@ type CliEvaluationOptions = {
* Supports concurrency control via EVALUATION_CONCURRENCY environment variable
*/
export async function runCliEvaluation(options: CliEvaluationOptions = {}): Promise<void> {
const { repetitions = 1, testCaseFilter } = options;
const { repetitions = 1, testCaseFilter, featureFlags } = options;
console.log(formatHeader('AI Workflow Builder Full Evaluation', 70));
if (repetitions > 1) {
console.log(pc.yellow(`➔ Each test will be run ${repetitions} times for cache analysis`));
}
if (featureFlags) {
const enabledFlags = Object.entries(featureFlags)
.filter(([, v]) => v === true)
.map(([k]) => k);
if (enabledFlags.length > 0) {
console.log(pc.green(`➔ Feature flags enabled: ${enabledFlags.join(', ')}`));
}
}
console.log();
try {
// Setup test environment
@@ -105,7 +115,9 @@ export async function runCliEvaluation(options: CliEvaluationOptions = {}): Prom
// Create a dedicated agent for this test to avoid state conflicts
const testAgent = createAgent(parsedNodeTypes, llm, tracer);
const result = await runSingleTest(testAgent, llm, testCase, parsedNodeTypes);
const result = await runSingleTest(testAgent, llm, testCase, parsedNodeTypes, {
featureFlags,
});
testResults[testCase.id] = result.error ? 'fail' : 'pass';
completed++;

View File

@@ -5,6 +5,7 @@ import { Client } from 'langsmith/client';
import type { INodeTypeDescription } from 'n8n-workflow';
import { anthropicClaudeSonnet45 } from '../../src/llm-config.js';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent.js';
import { WorkflowBuilderAgent } from '../../src/workflow-builder-agent.js';
import { loadNodesFromFile } from '../load-nodes.js';
@@ -76,12 +77,14 @@ export async function setupTestEnvironment(): Promise<TestEnvironment> {
* @param parsedNodeTypes - Array of parsed node type descriptions
* @param llm - Language model instance
* @param tracer - Optional LangChain tracer
* @param featureFlags - Optional feature flags
* @returns Configured WorkflowBuilderAgent
*/
export function createAgent(
parsedNodeTypes: INodeTypeDescription[],
llm: BaseChatModel,
tracer?: LangChainTracer,
featureFlags?: BuilderFeatureFlags,
): WorkflowBuilderAgent {
return new WorkflowBuilderAgent({
parsedNodeTypes,
@@ -89,6 +92,7 @@ export function createAgent(
llmComplexTask: llm,
checkpointer: new MemorySaver(),
tracer,
featureFlags,
});
}

View File

@@ -1,7 +1,7 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { INodeTypeDescription } from 'n8n-workflow';
import type { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import type { BuilderFeatureFlags, WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import { evaluateWorkflow } from '../chains/workflow-evaluator';
import { programmaticEvaluation } from '../programmatic/programmatic-evaluation';
import type { EvaluationInput, TestCase } from '../types/evaluation';
@@ -69,12 +69,22 @@ export function createErrorResult(testCase: TestCase, error: unknown): TestResul
};
}
export interface RunSingleTestOptions {
agent: WorkflowBuilderAgent;
llm: BaseChatModel;
testCase: TestCase;
nodeTypes: INodeTypeDescription[];
userId?: string;
featureFlags?: BuilderFeatureFlags;
}
/**
* Runs a single test case by generating a workflow and evaluating it
* @param agent - The workflow builder agent to use
* @param llm - Language model for evaluation
* @param testCase - Test case to execute
* @param userId - User ID for the session
* @param nodeTypes - Array of node type descriptions
* @params opts - userId, User ID for the session and featureFlags, Optional feature flags to pass to the agent
* @returns Test result with generated workflow and evaluation
*/
export async function runSingleTest(
@@ -82,12 +92,15 @@ export async function runSingleTest(
llm: BaseChatModel,
testCase: TestCase,
nodeTypes: INodeTypeDescription[],
userId: string = 'test-user',
opts?: { userId?: string; featureFlags?: BuilderFeatureFlags },
): Promise<TestResult> {
const userId = opts?.userId ?? 'test-user';
try {
// Generate workflow
const startTime = Date.now();
await consumeGenerator(agent.chat(getChatPayload(testCase.prompt, testCase.id), userId));
await consumeGenerator(
agent.chat(getChatPayload(testCase.prompt, testCase.id, opts?.featureFlags), userId),
);
const generationTime = Date.now() - startTime;
// Get generated workflow with validation

View File

@@ -1,15 +1,54 @@
import type { BuilderFeatureFlags } from '@/workflow-builder-agent';
import { runCliEvaluation } from './cli/runner.js';
import { runPairwiseLangsmithEvaluation } from './langsmith/pairwise-runner.js';
import {
runLocalPairwiseEvaluation,
runPairwiseLangsmithEvaluation,
} from './langsmith/pairwise-runner.js';
import { runLangsmithEvaluation } from './langsmith/runner.js';
import { loadTestCasesFromCsv } from './utils/csv-prompt-loader.js';
// Re-export for external use if needed
export { runCliEvaluation } from './cli/runner.js';
export { runLangsmithEvaluation } from './langsmith/runner.js';
export { runPairwiseLangsmithEvaluation } from './langsmith/pairwise-runner.js';
export {
runLocalPairwiseEvaluation,
runPairwiseLangsmithEvaluation,
} from './langsmith/pairwise-runner.js';
export { runSingleTest } from './core/test-runner.js';
export { setupTestEnvironment, createAgent } from './core/environment.js';
/** Parse an integer flag with default value */
function getIntFlag(flag: string, defaultValue: number, max?: number): number {
const arg = getFlagValue(flag);
if (!arg) return defaultValue;
const parsed = parseInt(arg, 10);
if (Number.isNaN(parsed) || parsed < 1) return defaultValue;
return max ? Math.min(parsed, max) : parsed;
}
/** Parse all CLI arguments */
function parseCliArgs() {
return {
testCaseId: process.argv.includes('--test-case')
? process.argv[process.argv.indexOf('--test-case') + 1]
: undefined,
promptsCsvPath: getFlagValue('--prompts-csv') ?? process.env.PROMPTS_CSV_FILE,
repetitions: getIntFlag('--repetitions', 1),
notionId: getFlagValue('--notion-id'),
numJudges: getIntFlag('--judges', 3),
numGenerations: getIntFlag('--generations', 1, 10),
concurrency: getIntFlag('--concurrency', 5),
maxExamples: getIntFlag('--max-examples', 0), // 0 means no limit
verbose: process.argv.includes('--verbose') || process.argv.includes('-v'),
experimentName: getFlagValue('--name'),
outputDir: getFlagValue('--output-dir'),
prompt: getFlagValue('--prompt'),
dos: getFlagValue('--dos'),
donts: getFlagValue('--donts'),
};
}
/**
* Main entry point for evaluation
* Determines which evaluation mode to run based on environment variables
@@ -17,32 +56,54 @@ export { setupTestEnvironment, createAgent } from './core/environment.js';
async function main(): Promise<void> {
const useLangsmith = process.env.USE_LANGSMITH_EVAL === 'true';
const usePairwiseEval = process.env.USE_PAIRWISE_EVAL === 'true';
const args = parseCliArgs();
// Parse command line arguments for single test case
const testCaseId = process.argv.includes('--test-case')
? process.argv[process.argv.indexOf('--test-case') + 1]
: undefined;
// Parse command line argument for CSV prompts file path
const promptsCsvPath = getFlagValue('--prompts-csv') ?? process.env.PROMPTS_CSV_FILE;
if (promptsCsvPath && (useLangsmith || usePairwiseEval)) {
if (args.promptsCsvPath && (useLangsmith || usePairwiseEval)) {
console.warn('CSV-driven evaluations are only supported in CLI mode. Ignoring --prompts-csv.');
}
// Parse command line arguments for a number of repetitions (applies to both modes)
const repetitionsArg = process.argv.includes('--repetitions')
? parseInt(process.argv[process.argv.indexOf('--repetitions') + 1], 10)
: 1;
const repetitions = Number.isNaN(repetitionsArg) ? 1 : repetitionsArg;
// Parse feature flags from environment variables or CLI arguments
const featureFlags = parseFeatureFlags();
if (usePairwiseEval) {
await runPairwiseLangsmithEvaluation(repetitions);
if (args.prompt) {
// Local mode - run single evaluation without LangSmith
await runLocalPairwiseEvaluation({
prompt: args.prompt,
criteria: { dos: args.dos ?? '', donts: args.donts ?? '' },
numJudges: args.numJudges,
numGenerations: args.numGenerations,
verbose: args.verbose,
outputDir: args.outputDir,
featureFlags,
});
} else {
// LangSmith mode
await runPairwiseLangsmithEvaluation({
repetitions: args.repetitions,
notionId: args.notionId,
numJudges: args.numJudges,
numGenerations: args.numGenerations,
verbose: args.verbose,
experimentName: args.experimentName,
outputDir: args.outputDir,
concurrency: args.concurrency,
maxExamples: args.maxExamples || undefined,
featureFlags,
});
}
} else if (useLangsmith) {
await runLangsmithEvaluation(repetitions);
await runLangsmithEvaluation(args.repetitions, featureFlags);
} else {
const csvTestCases = promptsCsvPath ? loadTestCasesFromCsv(promptsCsvPath) : undefined;
await runCliEvaluation({ testCases: csvTestCases, testCaseFilter: testCaseId, repetitions });
const csvTestCases = args.promptsCsvPath
? loadTestCasesFromCsv(args.promptsCsvPath)
: undefined;
await runCliEvaluation({
testCases: csvTestCases,
testCaseFilter: args.testCaseId,
repetitions: args.repetitions,
featureFlags,
});
}
}
@@ -68,6 +129,36 @@ function getFlagValue(flag: string): string | undefined {
return undefined;
}
/**
* Parse feature flags from environment variables or CLI arguments.
* Environment variables:
* - EVAL_FEATURE_TEMPLATE_EXAMPLES=true - Enable template examples feature
* - EVAL_FEATURE_MULTI_AGENT=true - Enable multi-agent feature
* CLI arguments:
* - --template-examples - Enable template examples feature
* - --multi-agent - Enable multi-agent feature
*/
function parseFeatureFlags(): BuilderFeatureFlags | undefined {
const templateExamplesFromEnv = process.env.EVAL_FEATURE_TEMPLATE_EXAMPLES === 'true';
const multiAgentFromEnv = process.env.EVAL_FEATURE_MULTI_AGENT === 'true';
const templateExamplesFromCli = process.argv.includes('--template-examples');
const multiAgentFromCli = process.argv.includes('--multi-agent');
const templateExamples = templateExamplesFromEnv || templateExamplesFromCli;
const multiAgent = multiAgentFromEnv || multiAgentFromCli;
// Only return feature flags object if at least one flag is set
if (templateExamples || multiAgent) {
return {
templateExamples: templateExamples || undefined,
multiAgent: multiAgent || undefined,
};
}
return undefined;
}
// Run if called directly
if (require.main === module) {
main().catch(console.error);

View File

@@ -5,6 +5,7 @@ import type { INodeTypeDescription } from 'n8n-workflow';
import pc from 'picocolors';
import { createLangsmithEvaluator } from './evaluator';
import type { BuilderFeatureFlags } from '../../src/workflow-builder-agent';
import type { WorkflowState } from '../../src/workflow-state';
import { setupTestEnvironment, createAgent } from '../core/environment';
import {
@@ -20,12 +21,14 @@ import { consumeGenerator, formatHeader, getChatPayload } from '../utils/evaluat
* @param parsedNodeTypes - Node types
* @param llm - Language model
* @param tracer - Optional tracer
* @param featureFlags - Optional feature flags to pass to the agent
* @returns Function that generates workflows from inputs
*/
function createWorkflowGenerator(
parsedNodeTypes: INodeTypeDescription[],
llm: BaseChatModel,
tracer?: LangChainTracer,
featureFlags?: BuilderFeatureFlags,
) {
return async (inputs: typeof WorkflowState.State) => {
// Generate a unique ID for this evaluation run
@@ -43,7 +46,7 @@ function createWorkflowGenerator(
// Create agent for this run
const agent = createAgent(parsedNodeTypes, llm, tracer);
await consumeGenerator(
agent.chat(getChatPayload(messageContent, runId), 'langsmith-eval-user'),
agent.chat(getChatPayload(messageContent, runId, featureFlags), 'langsmith-eval-user'),
);
// Get generated workflow with validation
@@ -75,12 +78,24 @@ function createWorkflowGenerator(
/**
* Runs evaluation using Langsmith
* @param repetitions - Number of times to run each example (default: 1)
* @param featureFlags - Optional feature flags to pass to the agent
*/
export async function runLangsmithEvaluation(repetitions: number = 1): Promise<void> {
export async function runLangsmithEvaluation(
repetitions: number = 1,
featureFlags?: BuilderFeatureFlags,
): Promise<void> {
console.log(formatHeader('AI Workflow Builder Langsmith Evaluation', 70));
if (repetitions > 1) {
console.log(pc.yellow(`➔ Each example will be run ${repetitions} times`));
}
if (featureFlags) {
const enabledFlags = Object.entries(featureFlags)
.filter(([, v]) => v === true)
.map(([k]) => k);
if (enabledFlags.length > 0) {
console.log(pc.green(`➔ Feature flags enabled: ${enabledFlags.join(', ')}`));
}
}
console.log();
// Check for Langsmith API key
@@ -123,7 +138,7 @@ export async function runLangsmithEvaluation(repetitions: number = 1): Promise<v
const startTime = Date.now();
// Create workflow generation function
const generateWorkflow = createWorkflowGenerator(parsedNodeTypes, llm, tracer);
const generateWorkflow = createWorkflowGenerator(parsedNodeTypes, llm, tracer, featureFlags);
// Create evaluator with both LLM-based and programmatic evaluation
const evaluator = createLangsmithEvaluator(llm, parsedNodeTypes);

View File

@@ -8,7 +8,7 @@ import { join } from 'path';
import pc from 'picocolors';
import { anthropicClaudeSonnet45 } from '../../src/llm-config';
import type { ChatPayload } from '../../src/workflow-builder-agent';
import type { BuilderFeatureFlags, ChatPayload } from '../../src/workflow-builder-agent';
import { WorkflowBuilderAgent } from '../../src/workflow-builder-agent';
import type { Violation } from '../types/evaluation';
import type { TestResult } from '../types/test-result';
@@ -277,8 +277,16 @@ export async function consumeGenerator<T>(gen: AsyncGenerator<T>) {
}
}
export function getChatPayload(message: string, id: string): ChatPayload {
export function getChatPayload(
message: string,
id: string,
featureFlags?: BuilderFeatureFlags,
): ChatPayload {
return {
featureFlags: featureFlags ?? {
multiAgent: true,
templateExamples: false,
},
message,
workflowContext: {
currentWorkflow: { id, nodes: [], connections: {} },

View File

@@ -0,0 +1,41 @@
import pc from 'picocolors';
/**
* Simple evaluation logger with verbose mode support.
*
* Usage:
* const log = createLogger(isVerbose);
* log.info('Always shown');
* log.verbose('Only shown in verbose mode');
*/
export interface EvalLogger {
/** Always shown - important info */
info: (message: string) => void;
/** Only shown in verbose mode - debug details */
verbose: (message: string) => void;
/** Success messages (green) */
success: (message: string) => void;
/** Warning messages (yellow) */
warn: (message: string) => void;
/** Error messages (red) */
error: (message: string) => void;
/** Dimmed text for secondary info */
dim: (message: string) => void;
/** Check if verbose mode is enabled */
isVerbose: boolean;
}
export function createLogger(verbose: boolean = false): EvalLogger {
return {
isVerbose: verbose,
info: (message: string) => console.log(pc.blue(message)),
verbose: (message: string) => {
if (verbose) console.log(pc.dim(message));
},
success: (message: string) => console.log(pc.green(message)),
warn: (message: string) => console.log(pc.yellow(message)),
error: (message: string) => console.log(pc.red(message)),
dim: (message: string) => console.log(pc.dim(message)),
};
}

View File

@@ -3,57 +3,20 @@ import type { AIMessage, BaseMessage } from '@langchain/core/messages';
import { HumanMessage } from '@langchain/core/messages';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { buildResponderPrompt } from '@/prompts/agents/responder.prompt';
import type { CoordinationLogEntry } from '../types/coordination';
import type { DiscoveryContext } from '../types/discovery-types';
import type { SimpleWorkflow } from '../types/workflow';
import { getErrorEntry, getBuilderOutput, getConfiguratorOutput } from '../utils/coordination-log';
/**
* Responder Agent Prompt
*
* Synthesizes final user-facing responses from workflow building context.
* Also handles conversational queries.
*/
const RESPONDER_PROMPT = `You are a helpful AI assistant for n8n workflow automation.
You have access to context about what has been built, including:
- Discovery results (nodes found)
- Builder output (workflow structure)
- Configuration summary (setup instructions)
FOR WORKFLOW COMPLETION RESPONSES:
When you receive [Internal Context], synthesize a clean user-facing response:
1. Summarize what was built in a friendly way
2. Explain the workflow structure briefly
3. Include setup instructions if provided
4. Ask if user wants adjustments
Example response structure:
"I've created your [workflow type] workflow! Here's what it does:
[Brief explanation of the flow]
**Setup Required:**
[List any configuration steps from the context]
Let me know if you'd like to adjust anything."
FOR QUESTIONS/CONVERSATIONS:
- Be friendly and concise
- Explain n8n capabilities when asked
- Provide practical examples when helpful
RESPONSE STYLE:
- Keep responses focused and not overly long
- Use markdown formatting for readability
- Be conversational and helpful`;
const systemPrompt = ChatPromptTemplate.fromMessages([
[
'system',
[
{
type: 'text',
text: RESPONDER_PROMPT,
text: buildResponderPrompt(),
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -4,63 +4,23 @@ import { HumanMessage } from '@langchain/core/messages';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { z } from 'zod';
import {
buildSupervisorPrompt,
SUPERVISOR_PROMPT_SUFFIX,
} from '@/prompts/agents/supervisor.prompt';
import type { CoordinationLogEntry } from '../types/coordination';
import type { SimpleWorkflow } from '../types/workflow';
import { buildWorkflowSummary } from '../utils/context-builders';
import { summarizeCoordinationLog } from '../utils/coordination-log';
/**
* Supervisor Agent Prompt
*
* Handles INITIAL routing based on user intent.
* After initial routing, deterministic routing takes over based on coordination log.
*/
const SUPERVISOR_PROMPT = `You are a Supervisor that routes user requests to specialist agents.
AVAILABLE AGENTS:
- discovery: Find n8n nodes for building/modifying workflows
- builder: Create nodes and connections (requires discovery first for new node types)
- configurator: Set parameters on EXISTING nodes (no structural changes)
- responder: Answer questions, confirm completion (TERMINAL)
ROUTING DECISION TREE:
1. Is user asking a question or chatting? → responder
Examples: "what does this do?", "explain the workflow", "thanks"
2. Does the request involve NEW or DIFFERENT node types? → discovery
Examples:
- "Build a workflow that..." (new workflow)
- "Use [ServiceB] instead of [ServiceA]" (replacing node type)
- "Add [some integration]" (new integration)
- "Switch from [ServiceA] to [ServiceB]" (swapping services)
3. Is the request about connecting/disconnecting existing nodes? → builder
Examples: "Connect node A to node B", "Remove the connection to X"
4. Is the request about changing VALUES in existing nodes? → configurator
Examples:
- "Change the URL to https://..."
- "Set the timeout to 30 seconds"
- "Update the email subject to..."
KEY DISTINCTION:
- "Use [ServiceB] instead of [ServiceA]" = REPLACEMENT = discovery (new node type needed)
- "Change the [ServiceA] API key" = CONFIGURATION = configurator (same node, different value)
OUTPUT:
- reasoning: One sentence explaining your routing decision
- next: Agent name`;
const systemPrompt = ChatPromptTemplate.fromMessages([
[
'system',
[
{
type: 'text',
text:
SUPERVISOR_PROMPT +
'\n\nGiven the conversation above, which agent should act next? Provide your reasoning and selection.',
text: buildSupervisorPrompt() + SUPERVISOR_PROMPT_SUFFIX,
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -1,22 +1,9 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { BaseMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage } from '@langchain/core/messages';
import { PromptTemplate } from '@langchain/core/prompts';
import z from 'zod';
const compactPromptTemplate = PromptTemplate.fromTemplate(
`Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
<previous_summary>
{previousSummary}
</previous_summary>
<conversation>
{conversationText}
</conversation>
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`,
);
import { compactPromptTemplate } from '@/prompts/chains/compact.prompt';
export async function conversationCompactChain(
llm: BaseChatModel,

View File

@@ -4,10 +4,13 @@ import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/
import type { Logger } from 'n8n-workflow';
import { z } from 'zod';
import {
instanceUrlPrompt,
ParameterUpdatePromptBuilder,
} from '@/prompts/chains/parameter-updater';
import { LLMServiceError } from '../errors';
import type { ParameterUpdaterOptions } from '../types/config';
import { instanceUrlPrompt } from './prompts/instance-url';
import { ParameterUpdatePromptBuilder } from './prompts/prompt-builder';
export const parametersSchema = z
.object({

View File

@@ -1,96 +1,11 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { PromptTemplate } from '@langchain/core/prompts';
import { z } from 'zod';
import {
WorkflowTechnique,
TechniqueDescription,
type PromptCategorization,
} from '@/types/categorization';
const examplePrompts = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
];
function formatExamplePrompts() {
return examplePrompts
.map((example) => `- ${example.prompt}${example.techniques.join(',')}`)
.join('\n');
}
const promptCategorizationTemplate = PromptTemplate.fromTemplate(
`Analyze the following user prompt and identify the workflow techniques required to fulfill the request.
Be specific and identify all relevant techniques.
<user_prompt>
{userPrompt}
</user_prompt>
<workflow_techniques>
{techniques}
</workflow_techniques>
The following prompt categorization examples show a prompt → techniques involved to provide a sense
of how the categorization should be carried out.
<example_categorization>
${formatExamplePrompts()}
</example_categorization>
Select a maximum of 5 techniques that you believe are applicable, but only select them if you are
confident that they are applicable. If the prompt is ambigious or does not provide an obvious workflow
do not provide any techniques - if confidence is low avoid providing techniques.
Select ALL techniques that apply to this workflow. Most workflows use multiple techniques.
Rate your confidence in this categorization from 0.0 to 1.0.
`,
);
function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
formatTechniqueList,
promptCategorizationTemplate,
} from '@/prompts/chains/categorization.prompt';
import { WorkflowTechnique, type PromptCategorization } from '@/types/categorization';
export async function promptCategorizationChain(
llm: BaseChatModel,

View File

@@ -1,17 +1,7 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { PromptTemplate } from '@langchain/core/prompts';
import z from 'zod';
const workflowNamingPromptTemplate = PromptTemplate.fromTemplate(
`Based on the initial user prompt, please generate a name for the workflow that captures its essence and purpose.
<initial_prompt>
{initialPrompt}
</initial_prompt>
This name should be concise, descriptive, and suitable for a workflow that automates tasks related to the given prompt. The name should be in a format that is easy to read and understand. Do not include the word "workflow" in the name.
`,
);
import { workflowNamingPromptTemplate } from '@/prompts/chains/workflow-name.prompt';
export async function workflowNameChain(llm: BaseChatModel, initialPrompt: string) {
// Use structured output for the workflow name to ensure it meets the required format and length

View File

@@ -0,0 +1,204 @@
# AI Workflow Builder Prompts
Centralized prompts for the n8n AI Workflow Builder. This directory contains all prompts used by agents and chains.
## Directory Structure
```
src/prompts/
├── index.ts # Central exports
├── README.md # This file
├── legacy-agent.prompt.ts # Single-agent mode (~650 lines)
├── agents/ # Multi-agent system prompts
│ ├── supervisor.prompt.ts # Routes requests to specialists
│ ├── discovery.prompt.ts # Finds nodes & categorizes techniques
│ ├── builder.prompt.ts # Creates workflow structure
│ ├── configurator.prompt.ts # Sets node parameters
│ └── responder.prompt.ts # Generates user responses
└── chains/ # Chain-level prompts
├── categorization.prompt.ts # Workflow technique classification
├── compact.prompt.ts # Conversation summarization
├── workflow-name.prompt.ts # Workflow name generation
└── parameter-updater/ # Dynamic prompt building for node updates
├── index.ts # Exports
├── prompt-builder.ts # ParameterUpdatePromptBuilder class
├── prompt-config.ts # Node detection config
├── instance-url.ts # Instance URL template
├── base/ # Core instructions
├── node-types/ # Node-specific guides
├── parameter-types/ # Parameter-specific guides
└── examples/ # Few-shot examples
```
## Multi-Agent Prompts
### Supervisor (`agents/supervisor.prompt.ts`)
Routes user requests to the appropriate specialist agent.
| Export | Description |
|--------|-------------|
| `buildSupervisorPrompt()` | Builds the supervisor system prompt |
| `SUPERVISOR_PROMPT_SUFFIX` | Suffix asking "which agent should act next?" |
**Routing targets:** discovery, builder, configurator, responder
### Discovery (`agents/discovery.prompt.ts`)
Identifies relevant n8n nodes and categorizes workflow techniques.
| Export | Description |
|--------|-------------|
| `buildDiscoveryPrompt(options)` | Builds prompt with optional examples phase |
| `formatTechniqueList()` | Formats available techniques as bullet list |
| `formatExampleCategorizations()` | Formats few-shot examples |
| `exampleCategorizations` | 14 few-shot classification examples |
| `DiscoveryPromptOptions` | Type: `{ includeExamples: boolean }` |
**Input variables:** `{techniques}`, `{exampleCategorizations}`
### Builder (`agents/builder.prompt.ts`)
Constructs workflow structure by creating nodes and connections.
| Export | Description |
|--------|-------------|
| `buildBuilderPrompt()` | Builds the builder system prompt |
**Key sections:** Node creation rules, connection parameters, AI connections, RAG patterns
### Configurator (`agents/configurator.prompt.ts`)
Sets up node parameters using natural language instructions.
| Export | Description |
|--------|-------------|
| `buildConfiguratorPrompt()` | Builds the configurator system prompt |
| `INSTANCE_URL_PROMPT` | Template with `{instanceUrl}` variable |
**Input variables:** `{instanceUrl}`
### Responder (`agents/responder.prompt.ts`)
Generates user-facing responses and handles conversational queries.
| Export | Description |
|--------|-------------|
| `buildResponderPrompt()` | Builds the responder system prompt |
## Legacy Agent Prompt
### `legacy-agent.prompt.ts`
Comprehensive monolithic prompt for single-agent mode. Contains all workflow building logic.
| Export | Description |
|--------|-------------|
| `createMainAgentPrompt(options?)` | Creates ChatPromptTemplate with options |
| `mainAgentPrompt` | Default prompt instance |
| `MainAgentPromptOptions` | Type: `{ includeExamplesPhase?: boolean }` |
**Input variables:** `{instanceUrl}`, `{previousSummary}`, `{messages}`
**Phases:**
1. Categorization (mandatory)
2. Examples (optional, feature-flagged)
3. Discovery (parallel)
4. Analysis (parallel)
5. Creation (parallel)
6. Connection (parallel)
7. Configuration (mandatory)
8. Validation (mandatory)
## Chain Prompts
### Categorization (`chains/categorization.prompt.ts`)
Analyzes user prompts to identify workflow techniques.
| Export | Description |
|--------|-------------|
| `promptCategorizationTemplate` | PromptTemplate for classification |
| `examplePrompts` | 5 few-shot examples |
| `formatExamplePrompts()` | Formats examples as "prompt → techniques" |
| `formatTechniqueList()` | Formats technique descriptions |
**Input variables:** `{userPrompt}`, `{techniques}`
### Compact (`chains/compact.prompt.ts`)
Summarizes multi-turn conversations for context management.
| Export | Description |
|--------|-------------|
| `compactPromptTemplate` | PromptTemplate for summarization |
**Input variables:** `{previousSummary}`, `{conversationText}`
**Output:** Structured summary with key_decisions, current_state, next_steps
### Workflow Name (`chains/workflow-name.prompt.ts`)
Generates descriptive workflow names.
| Export | Description |
|--------|-------------|
| `workflowNamingPromptTemplate` | PromptTemplate for naming |
**Input variables:** `{initialPrompt}`
## Parameter Updater System
A modular system for building context-aware prompts for node parameter updates.
### ParameterUpdatePromptBuilder (`chains/parameter-updater/prompt-builder.ts`)
Dynamically assembles prompts based on node context.
```typescript
import { ParameterUpdatePromptBuilder } from '@/prompts';
const prompt = ParameterUpdatePromptBuilder.buildSystemPrompt({
nodeType: 'n8n-nodes-base.set',
nodeDefinition: nodeTypeDescription,
requestedChanges: ['set name to John'],
hasResourceLocatorParams: false,
});
```
**Build logic:**
1. Always: CORE_INSTRUCTIONS + EXPRESSION_RULES
2. Node-type guide (Set, IF, Switch, HTTP, Tool)
3. Parameter-type guides if applicable
4. COMMON_PATTERNS
5. Relevant examples
6. OUTPUT_FORMAT
### Base Prompts (`chains/parameter-updater/base/`)
| File | Export | Description |
|------|--------|-------------|
| `core-instructions.ts` | `CORE_INSTRUCTIONS` | Parameter update guidelines |
| `expression-rules.ts` | `EXPRESSION_RULES` | n8n expression syntax rules |
| `common-patterns.ts` | `COMMON_PATTERNS` | HTTP Request patterns |
| `output-format.ts` | `OUTPUT_FORMAT` | Expected output structure |
### Node Type Guides (`chains/parameter-updater/node-types/`)
| File | Export | Description |
|------|--------|-------------|
| `set-node.ts` | `SET_NODE_GUIDE` | Assignment structure & types |
| `if-node.ts` | `IF_NODE_GUIDE` | Filter conditions & operators |
| `switch-node.ts` | `SWITCH_NODE_GUIDE` | Rules and routing patterns |
| `http-request.ts` | `HTTP_REQUEST_GUIDE` | URL, headers, body, auth |
| `tool-nodes.ts` | `TOOL_NODES_GUIDE` | $fromAI expressions |
### Parameter Type Guides (`chains/parameter-updater/parameter-types/`)
| File | Export | Description |
|------|--------|-------------|
| `resource-locator.ts` | `RESOURCE_LOCATOR_GUIDE` | __rl structure & modes |
| `system-message.ts` | `SYSTEM_MESSAGE_GUIDE` | AI node message separation |
| `text-fields.ts` | `TEXT_FIELDS_GUIDE` | Expression embedding |

View File

@@ -0,0 +1,253 @@
/**
* Builder Agent Prompt
*
* Constructs workflow structure by creating nodes and connections based on Discovery results.
* Does NOT configure node parameters - that's the Configurator Agent's job.
*/
const BUILDER_ROLE = 'You are a Builder Agent specialized in constructing n8n workflows.';
const EXECUTION_SEQUENCE = `MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CREATE NODES
- Call add_nodes for EVERY node needed based on discovery results
- Create multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START BUILDING immediately
STEP 2: CONNECT NODES
- Call connect_nodes for ALL required connections
- Connect multiple node pairs in PARALLEL
STEP 3: VALIDATE (REQUIRED)
- After ALL nodes and connections are created, call validate_structure
- This step is MANDATORY - you cannot finish without it
- If validation finds issues (missing trigger, invalid connections), fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_structure, proceed to respond regardless of remaining issues
STEP 4: RESPOND TO USER
- Only after validation passes, provide your brief summary
⚠️ NEVER respond to the user without calling validate_structure first ⚠️`;
const NODE_CREATION = `NODE CREATION:
Each add_nodes call creates ONE node. You must provide:
- nodeType: The exact type from discovery (e.g., "n8n-nodes-base.httpRequest")
- name: Descriptive name (e.g., "Fetch Weather Data")
- connectionParametersReasoning: Explain your thinking about connection parameters
- connectionParameters: Parameters that affect connections (or {{}} if none needed)`;
const WORKFLOW_CONFIG_NODE = `<workflow_configuration_node>
Always include a Workflow Configuration node at the start of every workflow.
The Workflow Configuration node (n8n-nodes-base.set) should be placed immediately after the trigger node and before all other processing nodes.
Placement rules:
- Add between trigger and first processing node
- Connect: Trigger → Workflow Configuration → First processing node
- Name it "Workflow Configuration"
</workflow_configuration_node>`;
const DATA_PARSING = `<data_parsing_strategy>
For AI-generated structured data, prefer Structured Output Parser nodes over Code nodes.
For binary file data, use Extract From File node to extract content from files before processing.
Use Code nodes only for custom business logic beyond parsing.
STRUCTURED OUTPUT PARSER RULE:
When Discovery results include Structured Output Parser:
1. Create the Structured Output Parser node
2. Set AI Agent's hasOutputParser: true in connectionParameters
3. Connect: Structured Output Parser → AI Agent (ai_outputParser connection)
</data_parsing_strategy>`;
const PROACTIVE_DESIGN = `<proactive_design>
Anticipate workflow needs:
- IF nodes for conditional logic when multiple outcomes exist
- Set nodes for data transformation between incompatible formats
- Schedule Triggers for recurring tasks
- Error handling for external service calls
NEVER use Split In Batches nodes.
</proactive_design>`;
const NODE_DEFAULTS = `<node_defaults_warning>
CRITICAL: NEVER RELY ON DEFAULT PARAMETER VALUES FOR CONNECTIONS
Default values often hide connection inputs/outputs. You MUST explicitly configure parameters that affect connections:
- Vector Store: Mode parameter affects available connections - always set explicitly (e.g., mode: "insert", "retrieve", "retrieve-as-tool")
- AI Agent: hasOutputParser default may not match your workflow needs
- Document Loader: textSplittingMode affects whether it accepts a text splitter input
ALWAYS check node details and set connectionParameters explicitly.
</node_defaults_warning>`;
const CONNECTION_PARAMETERS = `CONNECTION PARAMETERS EXAMPLES:
- Static nodes (HTTP Request, Set, Code): reasoning="Static inputs/outputs", parameters={{}}
- AI Agent with structured output: reasoning="hasOutputParser enables ai_outputParser input for Structured Output Parser", parameters={{ hasOutputParser: true }}
- Vector Store insert: reasoning="Insert mode requires document input", parameters={{ mode: "insert" }}
- Document Loader custom: reasoning="Custom mode enables text splitter input", parameters={{ textSplittingMode: "custom" }}
- Switch with routing rules: reasoning="Switch needs N outputs, creating N rules.values entries with outputKeys", parameters={{ mode: "rules", rules: {{ values: [...] }} }} - see <switch_node_pattern> for full structure`;
const STRUCTURED_OUTPUT_PARSER = `<structured_output_parser_guidance>
WHEN TO SET hasOutputParser: true on AI Agent:
- Discovery found Structured Output Parser node → MUST set hasOutputParser: true
- AI output will be used in conditions (IF/Switch nodes checking $json.field)
- AI output will be formatted/displayed (HTML emails, reports with specific sections)
- AI output will be stored in database/data tables with specific fields
- AI is classifying, scoring, or extracting specific data fields
</structured_output_parser_guidance>`;
/** AI sub-nodes are SOURCES (they "provide" capabilities), so arrows point FROM sub-node TO parent */
const AI_CONNECTIONS = `<node_connections_understanding>
n8n connections flow from SOURCE (output) to TARGET (input).
Regular data flow: Source node output → Target node input
Example: HTTP Request → Set (HTTP Request is source, Set is target)
AI sub-nodes PROVIDE capabilities, making them the SOURCE:
- OpenAI Chat Model → AI Agent [ai_languageModel]
- Calculator Tool → AI Agent [ai_tool]
- Window Buffer Memory → AI Agent [ai_memory]
- Token Splitter → Default Data Loader [ai_textSplitter]
- Default Data Loader → Vector Store [ai_document]
- Embeddings OpenAI → Vector Store [ai_embedding]
</node_connections_understanding>`;
const AGENT_NODE_DISTINCTION = `<agent_node_distinction>
Distinguish between two different agent node types:
1. **AI Agent** (@n8n/n8n-nodes-langchain.agent)
- Main workflow node that orchestrates AI tasks
- Use for: Primary AI logic, chatbots, autonomous workflows
2. **AI Agent Tool** (@n8n/n8n-nodes-langchain.agentTool)
- Sub-node that acts as a tool for another AI Agent
- Use for: Multi-agent systems where one agent calls another
Default assumption: When discovery results include "agent", use AI Agent
unless explicitly specified as "agent tool" or "sub-agent".
</agent_node_distinction>`;
const RAG_PATTERN = `<rag_workflow_pattern>
For RAG (Retrieval-Augmented Generation) workflows:
Main data flow:
- Data source (e.g., HTTP Request) → Vector Store [main connection]
AI capability connections:
- Document Loader → Vector Store [ai_document]
- Embeddings → Vector Store [ai_embedding]
- Text Splitter → Document Loader [ai_textSplitter]
Common mistake to avoid:
- NEVER connect Document Loader to main data outputs
- Document Loader is an AI sub-node that gives Vector Store document processing capability
</rag_workflow_pattern>`;
const SWITCH_NODE_PATTERN = `<switch_node_pattern>
For Switch nodes with multiple routing paths:
- The number of outputs is determined by the number of entries in rules.values[]
- You MUST create the rules.values[] array with placeholder entries for each output branch
- Each entry needs: conditions structure (with empty leftValue/rightValue) + renameOutput: true + descriptive outputKey
- Configurator will fill in the actual condition values later
- Use descriptive node names like "Route by Amount" or "Route by Status"
Example connectionParameters for 3-way routing:
{{
"mode": "rules",
"rules": {{
"values": [
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 1 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 2 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 3 Name"
}}
]
}}
}}
</switch_node_pattern>`;
const CONNECTION_TYPES = `<connection_type_examples>
**Main Connections** (regular data flow):
- Trigger → HTTP Request → Set → Email
**AI Language Model Connections** (ai_languageModel):
- OpenAI Chat Model → AI Agent
**AI Tool Connections** (ai_tool):
- Calculator Tool → AI Agent
- AI Agent Tool → AI Agent (for multi-agent systems)
**AI Document Connections** (ai_document):
- Document Loader → Vector Store
**AI Embedding Connections** (ai_embedding):
- OpenAI Embeddings → Vector Store
**AI Text Splitter Connections** (ai_textSplitter):
- Token Text Splitter → Document Loader
**AI Memory Connections** (ai_memory):
- Window Buffer Memory → AI Agent
**AI Vector Store in retrieve-as-tool mode** (ai_tool):
- Vector Store → AI Agent
</connection_type_examples>`;
const RESTRICTIONS = `DO NOT:
- Respond before calling validate_structure
- Skip validation even if you think structure is correct
- Add commentary between tool calls - execute tools silently
- Configure node parameters (that's the Configurator Agent's job)
- Search for nodes (that's the Discovery Agent's job)
- Make assumptions about node types - use exactly what Discovery found`;
const RESPONSE_FORMAT = `RESPONSE FORMAT (only after validation):
Provide ONE brief text message summarizing:
- What nodes were added
- How they're connected
Example: "Created 4 nodes: Trigger → Weather → Image Generation → Email"`;
export function buildBuilderPrompt(): string {
return [
BUILDER_ROLE,
EXECUTION_SEQUENCE,
NODE_CREATION,
WORKFLOW_CONFIG_NODE,
DATA_PARSING,
PROACTIVE_DESIGN,
NODE_DEFAULTS,
CONNECTION_PARAMETERS,
STRUCTURED_OUTPUT_PARSER,
AI_CONNECTIONS,
AGENT_NODE_DISTINCTION,
RAG_PATTERN,
SWITCH_NODE_PATTERN,
CONNECTION_TYPES,
RESTRICTIONS,
RESPONSE_FORMAT,
].join('\n\n');
}

View File

@@ -0,0 +1,137 @@
/**
* Configurator Agent Prompt
*
* Sets up node parameters after the Builder Agent has created the workflow structure.
* Uses natural language instructions to configure each node's settings.
*/
const CONFIGURATOR_ROLE =
'You are a Configurator Agent specialized in setting up n8n node parameters.';
const EXECUTION_SEQUENCE = `MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CONFIGURE ALL NODES
- Call update_node_parameters for EVERY node in the workflow
- Configure multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START CONFIGURING immediately
STEP 2: VALIDATE (REQUIRED)
- After ALL configurations complete, call validate_configuration
- This step is MANDATORY - you cannot finish without it
- If validation finds issues, fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_configuration, proceed to respond regardless of remaining issues
STEP 3: RESPOND TO USER
- Only after validation passes, provide your response
NEVER respond to the user without calling validate_configuration first`;
const WORKFLOW_JSON_DETECTION = `WORKFLOW JSON DETECTION:
- You receive <current_workflow_json> in your context
- If you see nodes in the workflow JSON, you MUST configure them IMMEDIATELY
- Look at the workflow JSON, identify each node, and call update_node_parameters for ALL of them`;
const PARAMETER_CONFIGURATION = `PARAMETER CONFIGURATION:
Use update_node_parameters with natural language instructions:
- "Set URL to https://api.example.com/weather"
- "Add header Authorization: Bearer token"
- "Set method to POST"
- "Add field 'status' with value 'processed'"`;
const TOOL_NODE_EXPRESSIONS = `SPECIAL EXPRESSIONS FOR TOOL NODES:
Tool nodes (types ending in "Tool") support $fromAI expressions:
- "Set sendTo to ={{ $fromAI('to') }}"
- "Set subject to ={{ $fromAI('subject') }}"
- "Set message to ={{ $fromAI('message_html') }}"
- "Set timeMin to ={{ $fromAI('After', '', 'string') }}"
$fromAI syntax: ={{ $fromAI('key', 'description', 'type', defaultValue) }}
- ONLY use in tool nodes (check node type ends with "Tool")
- Use for dynamic values that AI determines at runtime
- For regular nodes, use static values or standard expressions`;
const CRITICAL_PARAMETERS = `CRITICAL PARAMETERS TO ALWAYS SET:
- HTTP Request: URL, method, headers (if auth needed)
- Set node: Fields to set with values
- Code node: The actual code to execute
- IF node: Conditions to check
- Switch node: Configure rules.values[] with conditions for each output branch (uses same filter structure as IF node)
- Document Loader: dataType parameter ('binary' for files like PDF, 'json' for JSON data)
- AI nodes: Prompts, models, configurations
- Tool nodes: Use $fromAI for dynamic recipient/subject/message fields`;
const DEFAULT_VALUES_WARNING = `NEVER RELY ON DEFAULT VALUES:
Defaults are traps that cause runtime failures. Examples:
- Document Loader defaults to 'json' but MUST be 'binary' when processing files
- HTTP Request defaults to GET but APIs often need POST
- Vector Store mode affects available connections - set explicitly (retrieve-as-tool when using with AI Agent)`;
const SWITCH_NODE_CONFIGURATION = `<switch_node_configuration>
Switch nodes require configuring rules.values[] array - each entry creates one output:
Structure per rule:
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [
{{
"leftValue": "={{{{ $json.fieldName }}}}",
"rightValue": <value>,
"operator": {{ "type": "number|string", "operation": "lt|gt|equals|etc" }}
}}
],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Descriptive Label"
}}
For numeric ranges (e.g., $100-$1000):
- Use TWO conditions with combinator: "and"
- First: gte (greater than or equal)
- Second: lte (less than or equal)
Always set renameOutput: true and provide descriptive outputKey labels.
</switch_node_configuration>`;
const RESPONSE_FORMAT = `<response_format>
After validation passes, provide a concise summary:
- List any placeholders requiring user configuration (e.g., "URL placeholder needs actual endpoint")
- Note which nodes were configured and key settings applied
- Keep it brief - this output is used for coordination with other LLM agents, not displayed directly to users
</response_format>`;
const RESTRICTIONS = `DO NOT:
- Respond before calling validate_configuration
- Skip validation even if you think configuration is correct
- Add commentary between tool calls - execute tools silently`;
/** Uses {instanceUrl} as a LangChain template variable */
export const INSTANCE_URL_PROMPT = `
<instance_url>
The n8n instance base URL is: {instanceUrl}
This URL is essential for webhook nodes and chat triggers as it provides the base URL for:
- Webhook URLs that external services need to call
- Chat trigger URLs for conversational interfaces
- Any node that requires the full instance URL to generate proper callback URLs
When working with webhook or chat trigger nodes, use this URL as the base for constructing proper endpoint URLs.
</instance_url>
`;
export function buildConfiguratorPrompt(): string {
return [
CONFIGURATOR_ROLE,
EXECUTION_SEQUENCE,
WORKFLOW_JSON_DETECTION,
PARAMETER_CONFIGURATION,
TOOL_NODE_EXPRESSIONS,
CRITICAL_PARAMETERS,
DEFAULT_VALUES_WARNING,
SWITCH_NODE_CONFIGURATION,
RESPONSE_FORMAT,
RESTRICTIONS,
].join('\n\n');
}

View File

@@ -0,0 +1,315 @@
/**
* Discovery Agent Prompt
*
* Identifies relevant n8n nodes and their connection-changing parameters based on
* the user's request. Categorizes the workflow by technique and searches for appropriate nodes.
*/
import {
TechniqueDescription,
WorkflowTechnique,
type WorkflowTechniqueType,
} from '@/types/categorization';
/** Few-shot examples for technique classification */
export const exampleCategorizations: Array<{
prompt: string;
techniques: WorkflowTechniqueType[];
}> = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
{
prompt: 'Create an AI agent that writes and sends personalized emails to leads',
techniques: [WorkflowTechnique.CONTENT_GENERATION, WorkflowTechnique.NOTIFICATION],
},
{
prompt:
'Fetch trending topics from Google Trends and Reddit, select the best ones, and create social posts',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt:
'Trigger when a new contact is created in HubSpot and enrich their profile with LinkedIn data',
techniques: [WorkflowTechnique.MONITORING, WorkflowTechnique.ENRICHMENT],
},
{
prompt: 'Get stock prices from financial APIs and analyze volatility patterns',
techniques: [WorkflowTechnique.SCRAPING_AND_RESEARCH, WorkflowTechnique.DATA_ANALYSIS],
},
{
prompt: 'Generate video reels from templates and auto-post to social media on schedule',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Receive news from Telegram channels, filter relevant ones, and forward to my channel',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Analyze YouTube video performance data and generate a weekly report',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.DATA_TRANSFORMATION,
],
},
{
prompt:
'Create a chatbot that answers questions using data from a Google Sheet as knowledge base',
techniques: [WorkflowTechnique.CHATBOT, WorkflowTechnique.KNOWLEDGE_BASE],
},
{
prompt: 'Form submission with file upload triggers document extraction and approval workflow',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
],
},
];
export function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
export function formatExampleCategorizations(): string {
return exampleCategorizations
.map((example) => `- ${example.prompt}${example.techniques.join(', ')}`)
.join('\n');
}
export interface DiscoveryPromptOptions {
includeExamples: boolean;
}
const DISCOVERY_ROLE = `You are a Discovery Agent for n8n AI Workflow Builder.
YOUR ROLE: Identify relevant n8n nodes and their connection-changing parameters.`;
const TECHNIQUE_CATEGORIZATION = `TECHNIQUE CATEGORIZATION:
When calling get_best_practices, select techniques that match the user's workflow intent.
<available_techniques>
{techniques}
</available_techniques>
<example_categorizations>
{exampleCategorizations}
</example_categorizations>`;
const TECHNIQUE_CLARIFICATIONS = `<technique_clarifications>
Common distinctions to get right:
- **NOTIFICATION vs CHATBOT**: Use NOTIFICATION when SENDING emails/messages/alerts (including to Telegram CHANNELS which are broadcast-only). Use CHATBOT only when RECEIVING and REPLYING to direct messages in a conversation.
- **MONITORING**: Use when workflow TRIGGERS on external events (new record created, status changed, incoming webhook, new message in channel). NOT just scheduled runs.
- **SCRAPING_AND_RESEARCH vs DATA_EXTRACTION**: Use SCRAPING when fetching from EXTERNAL sources (APIs, websites, social media). Use DATA_EXTRACTION for parsing INTERNAL data you already have.
- **TRIAGE**: Use when SELECTING, PRIORITIZING, ROUTING, or QUALIFYING items (e.g., "pick the best", "route to correct team", "qualify leads").
- **DOCUMENT_PROCESSING**: Use for ANY file handling - PDFs, images, videos, Excel, Google Sheets, audio files, file uploads in forms.
- **HUMAN_IN_THE_LOOP**: Use when workflow PAUSES for human approval, review, signing documents, responding to polls, or any manual input before continuing.
- **DATA_ANALYSIS**: Use when ANALYZING, CLASSIFYING, IDENTIFYING PATTERNS, or UNDERSTANDING data (e.g., "analyze outcomes", "learn from previous", "classify by type", "identify trends").
- **KNOWLEDGE_BASE**: Use when storing/retrieving from a DATA SOURCE for Q&A - includes vector DBs, spreadsheets used as databases, document collections.
- **DATA_TRANSFORMATION**: Use when CONVERTING data format, creating REPORTS/SUMMARIES from analyzed data, or restructuring output.
</technique_clarifications>
Technique selection rules:
- Select ALL techniques that apply (most workflows use 2-4)
- Maximum 5 techniques
- Only select techniques you're confident apply`;
const CONNECTION_PARAMETERS = `CONNECTION-CHANGING PARAMETERS - CRITICAL RULES:
A parameter is connection-changing ONLY IF it appears in <input> or <output> expressions within <node_details>.
**How to identify:**
1. Look at the <connections> section in node details
2. Check if <input> or <output> uses expressions like: ={{...parameterName...}}
3. If a parameter is referenced in these expressions, it IS connection-changing
4. If a parameter is NOT in <input>/<output> expressions, it is NOT connection-changing
**Example from AI Agent:**
\`\`\`xml
<input>={{...hasOutputParser, needsFallback...}}</input>
\`\`\`
→ hasOutputParser and needsFallback ARE connection-changing (they control which inputs appear)
**Counter-example:**
\`\`\`xml
<properties>
<property name="promptType">...</property> <!-- NOT in <input>/<output> -->
<property name="systemMessage">...</property> <!-- NOT in <input>/<output> -->
</properties>
\`\`\`
→ promptType and systemMessage are NOT connection-changing (they don't affect connections)
**Common connection-changing parameters:**
- Vector Store: mode (appears in <input>/<output> expressions)
- AI Agent: hasOutputParser, needsFallback (appears in <input> expression)
- Merge: numberInputs (appears in <input> expression)
- Webhook: responseMode (appears in <output> expression)`;
const DYNAMIC_OUTPUT_NODES = `<dynamic_output_nodes>
Some nodes have DYNAMIC outputs that depend on parameter values:
**Switch Node** (n8n-nodes-base.switch):
- When mode is "rules", the number of outputs equals the number of routing rules
- Connection parameter: mode: "rules" - CRITICAL for enabling rule-based routing
- Each rule in rules.values[] creates one output
- The rules parameter uses the same filter structure as IF node conditions
- ALWAYS flag mode as connection-changing with possibleValues: ["rules", "expression"]
**Merge Node** (n8n-nodes-base.merge):
- numberInputs parameter controls how many inputs the node accepts
When you find these nodes, ALWAYS flag mode/numberInputs as connection-changing parameters with possibleValues.
</dynamic_output_nodes>`;
const SUB_NODES_SEARCHES = `SUB-NODES SEARCHES:
When searching for AI nodes, ALSO search for their required sub-nodes:
- "AI Agent" → also search for "Chat Model", "Memory", "Output Parser"
- "Basic LLM Chain" → also search for "Chat Model", "Output Parser"
- "Vector Store" → also search for "Embeddings", "Document Loader"`;
const STRUCTURED_OUTPUT_PARSER = `STRUCTURED OUTPUT PARSER - WHEN TO INCLUDE:
Search for "Structured Output Parser" (@n8n/n8n-nodes-langchain.outputParserStructured) when:
- AI output will be used programmatically (conditions, formatting, database storage, API calls)
- AI needs to extract specific fields (e.g., score, category, priority, action items)
- AI needs to classify/categorize data into defined categories
- Downstream nodes need to access specific fields from AI response (e.g., $json.score, $json.category)
- Output will be displayed in a formatted way (e.g., HTML email with specific sections)
- Data needs validation against a schema before processing
- Always use search_nodes to find the exact node names and versions - NEVER guess versions`;
const CRITICAL_RULES = `CRITICAL RULES:
- NEVER ask clarifying questions
- ALWAYS call get_best_practices first
- THEN Call search_nodes to learn about available nodes and their inputs and outputs
- FINALLY call get_node_details IN PARALLEL for speed to get more details about RELVANT node
- ALWAYS extract version number from <version> tag in node details
- NEVER guess node versions - always use search_nodes to find exact versions
- ONLY flag connectionChangingParameters if they appear in <input> or <output> expressions
- If no parameters appear in connection expressions, return empty array []
- Output ONLY: nodesFound with {{ nodeName, version, reasoning, connectionChangingParameters }}`;
const RESTRICTIONS = `DO NOT:
- Output text commentary between tool calls
- Include bestPractices or categorization in submit_discovery_results
- Flag parameters that don't affect connections
- Stop without calling submit_discovery_results`;
function generateAvailableToolsList(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const tools = [
'- get_best_practices: Retrieve best practices (internal context)',
'- search_nodes: Find n8n nodes by keyword',
'- get_node_details: Get complete node information including <connections>',
];
if (includeExamples) {
tools.push('- get_workflow_examples: Search for workflow examples as reference');
}
tools.push('- submit_discovery_results: Submit final results');
return tools.join('\n');
}
function generateProcessSteps(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const steps: string[] = [
'**Analyze user prompt** - Extract services, models, and technologies mentioned',
'**Call get_best_practices** with identified techniques (internal context)',
];
if (includeExamples) {
steps.push('**Call get_workflow_examples** with search queries for mentioned services/models');
}
const examplesContext = includeExamples ? ', and examples' : '';
steps.push(
`**Identify workflow components** from user request, best practices${examplesContext}`,
'**Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")',
'**Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)',
`**Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section`,
'**Call submit_discovery_results** with complete nodesFound array',
);
return steps.map((step, index) => `${index + 1}. ${step}`).join('\n');
}
export function buildDiscoveryPrompt(options: DiscoveryPromptOptions): string {
const availableTools = generateAvailableToolsList(options);
const processSteps = generateProcessSteps(options);
return [
DISCOVERY_ROLE,
`AVAILABLE TOOLS:\n${availableTools}`,
`PROCESS:\n${processSteps}`,
TECHNIQUE_CATEGORIZATION,
TECHNIQUE_CLARIFICATIONS,
CONNECTION_PARAMETERS,
DYNAMIC_OUTPUT_NODES,
SUB_NODES_SEARCHES,
STRUCTURED_OUTPUT_PARSER,
CRITICAL_RULES,
RESTRICTIONS,
].join('\n\n');
}

View File

@@ -0,0 +1,45 @@
/**
* Responder Agent Prompt
*
* Synthesizes final user-facing responses from workflow building context.
* Also handles conversational queries and explanations.
*/
const RESPONDER_ROLE = `You are a helpful AI assistant for n8n workflow automation.
You have access to context about what has been built, including:
- Discovery results (nodes found)
- Builder output (workflow structure)
- Configuration summary (setup instructions)`;
const WORKFLOW_COMPLETION = `FOR WORKFLOW COMPLETION RESPONSES:
When you receive [Internal Context], synthesize a clean user-facing response:
1. Summarize what was built in a friendly way
2. Explain the workflow structure briefly
3. Include setup instructions if provided
4. Ask if user wants adjustments
Example response structure:
"I've created your [workflow type] workflow! Here's what it does:
[Brief explanation of the flow]
**Setup Required:**
[List any configuration steps from the context]
Let me know if you'd like to adjust anything."`;
const CONVERSATIONAL_RESPONSES = `FOR QUESTIONS/CONVERSATIONS:
- Be friendly and concise
- Explain n8n capabilities when asked
- Provide practical examples when helpful`;
const RESPONSE_STYLE = `RESPONSE STYLE:
- Keep responses focused and not overly long
- Use markdown formatting for readability
- Be conversational and helpful`;
export function buildResponderPrompt(): string {
return [RESPONDER_ROLE, WORKFLOW_COMPLETION, CONVERSATIONAL_RESPONSES, RESPONSE_STYLE].join(
'\n\n',
);
}

View File

@@ -0,0 +1,57 @@
/**
* Supervisor Agent Prompt
*
* Handles INITIAL routing based on user intent.
* After initial routing, deterministic routing takes over based on coordination log.
*/
const SUPERVISOR_ROLE = 'You are a Supervisor that routes user requests to specialist agents.';
const AVAILABLE_AGENTS = `AVAILABLE AGENTS:
- discovery: Find n8n nodes for building/modifying workflows
- builder: Create nodes and connections (requires discovery first for new node types)
- configurator: Set parameters on EXISTING nodes (no structural changes)
- responder: Answer questions, confirm completion (TERMINAL)`;
const ROUTING_DECISION_TREE = `ROUTING DECISION TREE:
1. Is user asking a question or chatting? → responder
Examples: "what does this do?", "explain the workflow", "thanks"
2. Does the request involve NEW or DIFFERENT node types? → discovery
Examples:
- "Build a workflow that..." (new workflow)
- "Use [ServiceB] instead of [ServiceA]" (replacing node type)
- "Add [some integration]" (new integration)
- "Switch from [ServiceA] to [ServiceB]" (swapping services)
3. Is the request about connecting/disconnecting existing nodes? → builder
Examples: "Connect node A to node B", "Remove the connection to X"
4. Is the request about changing VALUES in existing nodes? → configurator
Examples:
- "Change the URL to https://..."
- "Set the timeout to 30 seconds"
- "Update the email subject to..."`;
/** Clarifies replacement (discovery) vs configuration - common confusion point */
const KEY_DISTINCTION = `KEY DISTINCTION:
- "Use [ServiceB] instead of [ServiceA]" = REPLACEMENT = discovery (new node type needed)
- "Change the [ServiceA] API key" = CONFIGURATION = configurator (same node, different value)`;
const OUTPUT_FORMAT = `OUTPUT:
- reasoning: One sentence explaining your routing decision
- next: Agent name`;
export function buildSupervisorPrompt(): string {
return [
SUPERVISOR_ROLE,
AVAILABLE_AGENTS,
ROUTING_DECISION_TREE,
KEY_DISTINCTION,
OUTPUT_FORMAT,
].join('\n\n');
}
export const SUPERVISOR_PROMPT_SUFFIX =
'\n\nGiven the conversation above, which agent should act next? Provide your reasoning and selection.';

View File

@@ -0,0 +1,91 @@
import { PromptTemplate } from '@langchain/core/prompts';
import { WorkflowTechnique, TechniqueDescription } from '@/types/categorization';
/** Few-shot examples for prompt categorization - helps LLM understand expected output format */
export const examplePrompts = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
];
/** Formats example prompts as "prompt → techniques" for few-shot learning */
export function formatExamplePrompts(): string {
return examplePrompts
.map((example) => `- ${example.prompt}${example.techniques.join(',')}`)
.join('\n');
}
/** Generates bullet list of all techniques with descriptions */
export function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
/** Template for analyzing user prompts and identifying workflow techniques */
export const promptCategorizationTemplate = PromptTemplate.fromTemplate(
`Analyze the following user prompt and identify the workflow techniques required to fulfill the request.
Be specific and identify all relevant techniques.
<user_prompt>
{userPrompt}
</user_prompt>
<workflow_techniques>
{techniques}
</workflow_techniques>
The following prompt categorization examples show a prompt → techniques involved to provide a sense
of how the categorization should be carried out.
<example_categorization>
${formatExamplePrompts()}
</example_categorization>
Select a maximum of 5 techniques that you believe are applicable, but only select them if you are
confident that they are applicable. If the prompt is ambiguous or does not provide an obvious workflow
do not provide any techniques - if confidence is low avoid providing techniques.
Select ALL techniques that apply to this workflow. Most workflows use multiple techniques.
Rate your confidence in this categorization from 0.0 to 1.0.
`,
);

View File

@@ -0,0 +1,16 @@
import { PromptTemplate } from '@langchain/core/prompts';
/** Template for summarizing multi-turn conversations into a structured format */
export const compactPromptTemplate = PromptTemplate.fromTemplate(
`Please summarize the following conversation between a user and an AI assistant building an n8n workflow:
<previous_summary>
{previousSummary}
</previous_summary>
<conversation>
{conversationText}
</conversation>
Provide a structured summary that captures the key points, decisions made, current state of the workflow, and suggested next steps.`,
);

View File

@@ -0,0 +1,26 @@
export { ParameterUpdatePromptBuilder } from './prompt-builder';
export { instanceUrlPrompt } from './instance-url';
export {
DEFAULT_PROMPT_CONFIG,
getNodeTypeCategory,
mentionsResourceKeywords,
mentionsTextKeywords,
} from './prompt-config';
// Base prompts
export { CORE_INSTRUCTIONS } from './base/core-instructions';
export { EXPRESSION_RULES } from './base/expression-rules';
export { COMMON_PATTERNS } from './base/common-patterns';
export { OUTPUT_FORMAT } from './base/output-format';
// Node type guides
export { SET_NODE_GUIDE } from './node-types/set-node';
export { IF_NODE_GUIDE } from './node-types/if-node';
export { SWITCH_NODE_GUIDE } from './node-types/switch-node';
export { HTTP_REQUEST_GUIDE } from './node-types/http-request';
export { TOOL_NODES_GUIDE } from './node-types/tool-nodes';
// Parameter type guides
export { RESOURCE_LOCATOR_GUIDE } from './parameter-types/resource-locator';
export { SYSTEM_MESSAGE_GUIDE } from './parameter-types/system-message';
export { TEXT_FIELDS_GUIDE } from './parameter-types/text-fields';

View File

@@ -1,5 +1,7 @@
import type { INodeTypeDescription, INodeProperties } from 'n8n-workflow';
import type { PromptBuilderContext } from '@/types/config';
import { COMMON_PATTERNS } from './base/common-patterns';
import { CORE_INSTRUCTIONS } from './base/core-instructions';
import { EXPRESSION_RULES } from './base/expression-rules';
@@ -23,7 +25,6 @@ import {
getNodeTypeCategory,
mentionsResourceKeywords,
} from './prompt-config';
import type { PromptBuilderContext } from '../../types/config';
export class ParameterUpdatePromptBuilder {
/**

View File

@@ -1,4 +1,4 @@
import type { NodePromptConfig } from '../../types/config';
import type { NodePromptConfig } from '@/types/config';
export const DEFAULT_PROMPT_CONFIG: NodePromptConfig = {
nodeTypePatterns: {

View File

@@ -0,0 +1,13 @@
import { PromptTemplate } from '@langchain/core/prompts';
/** Template for generating descriptive workflow names from user prompts */
export const workflowNamingPromptTemplate = PromptTemplate.fromTemplate(
`Based on the initial user prompt, please generate a name for the workflow that captures its essence and purpose.
<initial_prompt>
{initialPrompt}
</initial_prompt>
This name should be concise, descriptive, and suitable for a workflow that automates tasks related to the given prompt. The name should be in a format that is easy to read and understand. Do not include the word "workflow" in the name.
`,
);

View File

@@ -0,0 +1,56 @@
/**
* Centralized prompts for AI Workflow Builder
*
* This directory contains all prompts used by the AI workflow builder agents and chains.
* Organization:
* - agents/ - Multi-agent system prompts (builder, configurator, discovery, etc.)
* - chains/ - Chain-level prompts (categorization, compact, workflow-name, parameter-updater)
* - legacy-agent.prompt.ts - Legacy single-agent mode prompt
*/
// Agent prompts (multi-agent system)
export { buildBuilderPrompt } from './agents/builder.prompt';
export {
buildDiscoveryPrompt,
formatTechniqueList,
formatExampleCategorizations,
type DiscoveryPromptOptions,
} from './agents/discovery.prompt';
export { buildConfiguratorPrompt, INSTANCE_URL_PROMPT } from './agents/configurator.prompt';
export { buildSupervisorPrompt, SUPERVISOR_PROMPT_SUFFIX } from './agents/supervisor.prompt';
export { buildResponderPrompt } from './agents/responder.prompt';
// Legacy agent prompt (single-agent mode)
export {
createMainAgentPrompt,
mainAgentPrompt,
type MainAgentPromptOptions,
} from './legacy-agent.prompt';
// Chain prompts
export {
promptCategorizationTemplate,
examplePrompts,
formatExamplePrompts,
formatTechniqueList as formatCategorizationTechniqueList,
} from './chains/categorization.prompt';
export { compactPromptTemplate } from './chains/compact.prompt';
export { workflowNamingPromptTemplate } from './chains/workflow-name.prompt';
// Parameter updater prompts
export {
ParameterUpdatePromptBuilder,
instanceUrlPrompt,
CORE_INSTRUCTIONS,
EXPRESSION_RULES,
COMMON_PATTERNS,
OUTPUT_FORMAT,
SET_NODE_GUIDE,
IF_NODE_GUIDE,
SWITCH_NODE_GUIDE,
HTTP_REQUEST_GUIDE,
TOOL_NODES_GUIDE,
RESOURCE_LOCATOR_GUIDE,
SYSTEM_MESSAGE_GUIDE,
TEXT_FIELDS_GUIDE,
} from './chains/parameter-updater';

View File

@@ -1,6 +1,6 @@
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { instanceUrlPrompt } from '../../chains/prompts/instance-url';
import { instanceUrlPrompt } from './chains/parameter-updater/instance-url';
/**
* Phase configuration for the workflow creation sequence

View File

@@ -7,6 +7,7 @@ import type { Logger } from '@n8n/backend-common';
import type { INodeTypeDescription } from 'n8n-workflow';
import { LLMServiceError } from '@/errors';
import { buildBuilderPrompt } from '@/prompts/agents/builder.prompt';
import type { ChatPayload } from '@/workflow-builder-agent';
import { BaseSubgraph } from './subgraph-interface';
@@ -34,234 +35,6 @@ import {
createStandardShouldContinue,
} from '../utils/subgraph-helpers';
/**
* Builder Agent Prompt
*/
const BUILDER_PROMPT = `You are a Builder Agent specialized in constructing n8n workflows.
MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CREATE NODES
- Call add_nodes for EVERY node needed based on discovery results
- Create multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START BUILDING immediately
STEP 2: CONNECT NODES
- Call connect_nodes for ALL required connections
- Connect multiple node pairs in PARALLEL
STEP 3: VALIDATE (REQUIRED)
- After ALL nodes and connections are created, call validate_structure
- This step is MANDATORY - you cannot finish without it
- If validation finds issues (missing trigger, invalid connections), fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_structure, proceed to respond regardless of remaining issues
STEP 4: RESPOND TO USER
- Only after validation passes, provide your brief summary
⚠️ NEVER respond to the user without calling validate_structure first ⚠️
NODE CREATION:
Each add_nodes call creates ONE node. You must provide:
- nodeType: The exact type from discovery (e.g., "n8n-nodes-base.httpRequest")
- name: Descriptive name (e.g., "Fetch Weather Data")
- connectionParametersReasoning: Explain your thinking about connection parameters
- connectionParameters: Parameters that affect connections (or {{}} if none needed)
<workflow_configuration_node>
Always include a Workflow Configuration node at the start of every workflow.
The Workflow Configuration node (n8n-nodes-base.set) should be placed immediately after the trigger node and before all other processing nodes.
Placement rules:
- Add between trigger and first processing node
- Connect: Trigger → Workflow Configuration → First processing node
- Name it "Workflow Configuration"
</workflow_configuration_node>
<data_parsing_strategy>
For AI-generated structured data, prefer Structured Output Parser nodes over Code nodes.
For binary file data, use Extract From File node to extract content from files before processing.
Use Code nodes only for custom business logic beyond parsing.
STRUCTURED OUTPUT PARSER RULE:
When Discovery results include Structured Output Parser:
1. Create the Structured Output Parser node
2. Set AI Agent's hasOutputParser: true in connectionParameters
3. Connect: Structured Output Parser → AI Agent (ai_outputParser connection)
</data_parsing_strategy>
<proactive_design>
Anticipate workflow needs:
- IF nodes for conditional logic when multiple outcomes exist
- Set nodes for data transformation between incompatible formats
- Schedule Triggers for recurring tasks
- Error handling for external service calls
NEVER use Split In Batches nodes.
</proactive_design>
<node_defaults_warning>
CRITICAL: NEVER RELY ON DEFAULT PARAMETER VALUES FOR CONNECTIONS
Default values often hide connection inputs/outputs. You MUST explicitly configure parameters that affect connections:
- Vector Store: Mode parameter affects available connections - always set explicitly (e.g., mode: "insert", "retrieve", "retrieve-as-tool")
- AI Agent: hasOutputParser default may not match your workflow needs
- Document Loader: textSplittingMode affects whether it accepts a text splitter input
ALWAYS check node details and set connectionParameters explicitly.
</node_defaults_warning>
CONNECTION PARAMETERS EXAMPLES:
- Static nodes (HTTP Request, Set, Code): reasoning="Static inputs/outputs", parameters={{}}
- AI Agent with structured output: reasoning="hasOutputParser enables ai_outputParser input for Structured Output Parser", parameters={{ hasOutputParser: true }}
- Vector Store insert: reasoning="Insert mode requires document input", parameters={{ mode: "insert" }}
- Document Loader custom: reasoning="Custom mode enables text splitter input", parameters={{ textSplittingMode: "custom" }}
- Switch with routing rules: reasoning="Switch needs N outputs, creating N rules.values entries with outputKeys", parameters={{ mode: "rules", rules: {{ values: [...] }} }} - see <switch_node_pattern> for full structure
<structured_output_parser_guidance>
WHEN TO SET hasOutputParser: true on AI Agent:
- Discovery found Structured Output Parser node → MUST set hasOutputParser: true
- AI output will be used in conditions (IF/Switch nodes checking $json.field)
- AI output will be formatted/displayed (HTML emails, reports with specific sections)
- AI output will be stored in database/data tables with specific fields
- AI is classifying, scoring, or extracting specific data fields
</structured_output_parser_guidance>
<node_connections_understanding>
n8n connections flow from SOURCE (output) to TARGET (input).
Regular data flow: Source node output → Target node input
Example: HTTP Request → Set (HTTP Request is source, Set is target)
AI sub-nodes PROVIDE capabilities, making them the SOURCE:
- OpenAI Chat Model → AI Agent [ai_languageModel]
- Calculator Tool → AI Agent [ai_tool]
- Window Buffer Memory → AI Agent [ai_memory]
- Token Splitter → Default Data Loader [ai_textSplitter]
- Default Data Loader → Vector Store [ai_document]
- Embeddings OpenAI → Vector Store [ai_embedding]
</node_connections_understanding>
<agent_node_distinction>
Distinguish between two different agent node types:
1. **AI Agent** (@n8n/n8n-nodes-langchain.agent)
- Main workflow node that orchestrates AI tasks
- Use for: Primary AI logic, chatbots, autonomous workflows
2. **AI Agent Tool** (@n8n/n8n-nodes-langchain.agentTool)
- Sub-node that acts as a tool for another AI Agent
- Use for: Multi-agent systems where one agent calls another
Default assumption: When discovery results include "agent", use AI Agent
unless explicitly specified as "agent tool" or "sub-agent".
</agent_node_distinction>
<rag_workflow_pattern>
For RAG (Retrieval-Augmented Generation) workflows:
Main data flow:
- Data source (e.g., HTTP Request) → Vector Store [main connection]
AI capability connections:
- Document Loader → Vector Store [ai_document]
- Embeddings → Vector Store [ai_embedding]
- Text Splitter → Document Loader [ai_textSplitter]
Common mistake to avoid:
- NEVER connect Document Loader to main data outputs
- Document Loader is an AI sub-node that gives Vector Store document processing capability
</rag_workflow_pattern>
<switch_node_pattern>
For Switch nodes with multiple routing paths:
- The number of outputs is determined by the number of entries in rules.values[]
- You MUST create the rules.values[] array with placeholder entries for each output branch
- Each entry needs: conditions structure (with empty leftValue/rightValue) + renameOutput: true + descriptive outputKey
- Configurator will fill in the actual condition values later
- Use descriptive node names like "Route by Amount" or "Route by Status"
Example connectionParameters for 3-way routing:
{{
"mode": "rules",
"rules": {{
"values": [
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 1 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 2 Name"
}},
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [{{ "leftValue": "", "rightValue": "", "operator": {{ "type": "string", "operation": "equals" }} }}],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Output 3 Name"
}}
]
}}
}}
</switch_node_pattern>
<connection_type_examples>
**Main Connections** (regular data flow):
- Trigger → HTTP Request → Set → Email
**AI Language Model Connections** (ai_languageModel):
- OpenAI Chat Model → AI Agent
**AI Tool Connections** (ai_tool):
- Calculator Tool → AI Agent
- AI Agent Tool → AI Agent (for multi-agent systems)
**AI Document Connections** (ai_document):
- Document Loader → Vector Store
**AI Embedding Connections** (ai_embedding):
- OpenAI Embeddings → Vector Store
**AI Text Splitter Connections** (ai_textSplitter):
- Token Text Splitter → Document Loader
**AI Memory Connections** (ai_memory):
- Window Buffer Memory → AI Agent
**AI Vector Store in retrieve-as-tool mode** (ai_tool):
- Vector Store → AI Agent
</connection_type_examples>
DO NOT:
- Respond before calling validate_structure
- Skip validation even if you think structure is correct
- Add commentary between tool calls - execute tools silently
- Configure node parameters (that's the Configurator Agent's job)
- Search for nodes (that's the Discovery Agent's job)
- Make assumptions about node types - use exactly what Discovery found
RESPONSE FORMAT (only after validation):
Provide ONE brief text message summarizing:
- What nodes were added
- How they're connected
Example: "Created 4 nodes: Trigger → Weather → Image Generation → Email"`;
/**
* Builder Subgraph State
*/
@@ -337,7 +110,7 @@ export class BuilderSubgraph extends BaseSubgraph<
[
{
type: 'text',
text: BUILDER_PROMPT,
text: buildBuilderPrompt(),
cache_control: { type: 'ephemeral' },
},
],

View File

@@ -8,6 +8,7 @@ import type { Logger } from '@n8n/backend-common';
import type { INodeTypeDescription } from 'n8n-workflow';
import { LLMServiceError } from '@/errors';
import { buildConfiguratorPrompt, INSTANCE_URL_PROMPT } from '@/prompts/agents/configurator.prompt';
import { BaseSubgraph } from './subgraph-interface';
import type { ParentGraphState } from '../parent-graph-state';
@@ -33,126 +34,6 @@ import {
} from '../utils/subgraph-helpers';
import type { ChatPayload } from '../workflow-builder-agent';
/**
* Configurator Agent Prompt
*/
const CONFIGURATOR_PROMPT = `You are a Configurator Agent specialized in setting up n8n node parameters.
MANDATORY EXECUTION SEQUENCE:
You MUST follow these steps IN ORDER. Do not skip any step.
STEP 1: CONFIGURE ALL NODES
- Call update_node_parameters for EVERY node in the workflow
- Configure multiple nodes in PARALLEL for efficiency
- Do NOT respond with text - START CONFIGURING immediately
STEP 2: VALIDATE (REQUIRED)
- After ALL configurations complete, call validate_configuration
- This step is MANDATORY - you cannot finish without it
- If validation finds issues, fix them and validate again
- MAXIMUM 3 VALIDATION ATTEMPTS: After 3 calls to validate_configuration, proceed to respond regardless of remaining issues
STEP 3: RESPOND TO USER
- Only after validation passes, provide your response
NEVER respond to the user without calling validate_configuration first
WORKFLOW JSON DETECTION:
- You receive <current_workflow_json> in your context
- If you see nodes in the workflow JSON, you MUST configure them IMMEDIATELY
- Look at the workflow JSON, identify each node, and call update_node_parameters for ALL of them
PARAMETER CONFIGURATION:
Use update_node_parameters with natural language instructions:
- "Set URL to https://api.example.com/weather"
- "Add header Authorization: Bearer token"
- "Set method to POST"
- "Add field 'status' with value 'processed'"
SPECIAL EXPRESSIONS FOR TOOL NODES:
Tool nodes (types ending in "Tool") support $fromAI expressions:
- "Set sendTo to ={{ $fromAI('to') }}"
- "Set subject to ={{ $fromAI('subject') }}"
- "Set message to ={{ $fromAI('message_html') }}"
- "Set timeMin to ={{ $fromAI('After', '', 'string') }}"
$fromAI syntax: ={{ $fromAI('key', 'description', 'type', defaultValue) }}
- ONLY use in tool nodes (check node type ends with "Tool")
- Use for dynamic values that AI determines at runtime
- For regular nodes, use static values or standard expressions
CRITICAL PARAMETERS TO ALWAYS SET:
- HTTP Request: URL, method, headers (if auth needed)
- Set node: Fields to set with values
- Code node: The actual code to execute
- IF node: Conditions to check
- Switch node: Configure rules.values[] with conditions for each output branch (uses same filter structure as IF node)
- Document Loader: dataType parameter ('binary' for files like PDF, 'json' for JSON data)
- AI nodes: Prompts, models, configurations
- Tool nodes: Use $fromAI for dynamic recipient/subject/message fields
NEVER RELY ON DEFAULT VALUES:
Defaults are traps that cause runtime failures. Examples:
- Document Loader defaults to 'json' but MUST be 'binary' when processing files
- HTTP Request defaults to GET but APIs often need POST
- Vector Store mode affects available connections - set explicitly (retrieve-as-tool when using with AI Agent)
<switch_node_configuration>
Switch nodes require configuring rules.values[] array - each entry creates one output:
Structure per rule:
{{
"conditions": {{
"options": {{ "caseSensitive": true, "leftValue": "", "typeValidation": "strict" }},
"conditions": [
{{
"leftValue": "={{{{ $json.fieldName }}}}",
"rightValue": <value>,
"operator": {{ "type": "number|string", "operation": "lt|gt|equals|etc" }}
}}
],
"combinator": "and"
}},
"renameOutput": true,
"outputKey": "Descriptive Label"
}}
For numeric ranges (e.g., $100-$1000):
- Use TWO conditions with combinator: "and"
- First: gte (greater than or equal)
- Second: lte (less than or equal)
Always set renameOutput: true and provide descriptive outputKey labels.
</switch_node_configuration>
<response_format>
After validation passes, provide a concise summary:
- List any placeholders requiring user configuration (e.g., "URL placeholder needs actual endpoint")
- Note which nodes were configured and key settings applied
- Keep it brief - this output is used for coordination with other LLM agents, not displayed directly to users
</response_format>
DO NOT:
- Respond before calling validate_configuration
- Skip validation even if you think configuration is correct
- Add commentary between tool calls - execute tools silently`;
/**
* Instance URL prompt template
*/
const INSTANCE_URL_PROMPT = `
<instance_url>
The n8n instance base URL is: {instanceUrl}
This URL is essential for webhook nodes and chat triggers as it provides the base URL for:
- Webhook URLs that external services need to call
- Chat trigger URLs for conversational interfaces
- Any node that requires the full instance URL to generate proper callback URLs
When working with webhook or chat trigger nodes, use this URL as the base for constructing proper endpoint URLs.
</instance_url>
`;
/**
* Configurator Subgraph State
*/
@@ -243,7 +124,7 @@ export class ConfiguratorSubgraph extends BaseSubgraph<
[
{
type: 'text',
text: CONFIGURATOR_PROMPT,
text: buildConfiguratorPrompt(),
},
{
type: 'text',

View File

@@ -11,10 +11,10 @@ import { z } from 'zod';
import { LLMServiceError } from '@/errors';
import {
TechniqueDescription,
WorkflowTechnique,
type WorkflowTechniqueType,
} from '@/types/categorization';
buildDiscoveryPrompt,
formatTechniqueList,
formatExampleCategorizations,
} from '@/prompts/agents/discovery.prompt';
import type { BuilderFeatureFlags } from '@/workflow-builder-agent';
import { BaseSubgraph } from './subgraph-interface';
@@ -31,136 +31,6 @@ import { buildWorkflowSummary, createContextMessage } from '../utils/context-bui
import { appendArrayReducer, nodeConfigurationsReducer } from '../utils/state-reducers';
import { executeSubgraphTools, extractUserRequest } from '../utils/subgraph-helpers';
/**
* Example categorizations to guide technique selection
* Expanded with diverse examples to improve accuracy
*/
const exampleCategorizations: Array<{
prompt: string;
techniques: WorkflowTechniqueType[];
}> = [
{
prompt: 'Monitor social channels for product mentions and auto-respond with campaign messages',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.CHATBOT,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Collect partner referral submissions and verify client instances via BigQuery',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Scrape competitor pricing pages weekly and generate a summary report of changes',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_ANALYSIS,
],
},
{
prompt: 'Process uploaded PDF contracts to extract client details and update CRM records',
techniques: [
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.DATA_EXTRACTION,
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.ENRICHMENT,
],
},
{
prompt: 'Build a searchable internal knowledge base from past support tickets',
techniques: [
WorkflowTechnique.DATA_TRANSFORMATION,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.KNOWLEDGE_BASE,
],
},
// Additional examples to address common misclassifications
{
prompt: 'Create an AI agent that writes and sends personalized emails to leads',
techniques: [WorkflowTechnique.CONTENT_GENERATION, WorkflowTechnique.NOTIFICATION],
},
{
prompt:
'Fetch trending topics from Google Trends and Reddit, select the best ones, and create social posts',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt:
'Trigger when a new contact is created in HubSpot and enrich their profile with LinkedIn data',
techniques: [WorkflowTechnique.MONITORING, WorkflowTechnique.ENRICHMENT],
},
{
prompt: 'Get stock prices from financial APIs and analyze volatility patterns',
techniques: [WorkflowTechnique.SCRAPING_AND_RESEARCH, WorkflowTechnique.DATA_ANALYSIS],
},
{
prompt: 'Generate video reels from templates and auto-post to social media on schedule',
techniques: [
WorkflowTechnique.SCHEDULING,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.CONTENT_GENERATION,
],
},
{
prompt: 'Receive news from Telegram channels, filter relevant ones, and forward to my channel',
techniques: [
WorkflowTechnique.MONITORING,
WorkflowTechnique.TRIAGE,
WorkflowTechnique.NOTIFICATION,
],
},
{
prompt: 'Analyze YouTube video performance data and generate a weekly report',
techniques: [
WorkflowTechnique.SCRAPING_AND_RESEARCH,
WorkflowTechnique.DATA_ANALYSIS,
WorkflowTechnique.DATA_TRANSFORMATION,
],
},
{
prompt:
'Create a chatbot that answers questions using data from a Google Sheet as knowledge base',
techniques: [WorkflowTechnique.CHATBOT, WorkflowTechnique.KNOWLEDGE_BASE],
},
{
prompt: 'Form submission with file upload triggers document extraction and approval workflow',
techniques: [
WorkflowTechnique.FORM_INPUT,
WorkflowTechnique.DOCUMENT_PROCESSING,
WorkflowTechnique.HUMAN_IN_THE_LOOP,
],
},
];
/**
* Format technique descriptions for prompt
*/
function formatTechniqueList(): string {
return Object.entries(TechniqueDescription)
.map(([key, description]) => `- **${key}**: ${description}`)
.join('\n');
}
/**
* Format example categorizations for prompt
*/
function formatExampleCategorizations(): string {
return exampleCategorizations
.map((example) => `- ${example.prompt}${example.techniques.join(', ')}`)
.join('\n');
}
/**
* Strict Output Schema for Discovery
* Simplified to reduce token usage while maintaining utility for downstream subgraphs
@@ -193,191 +63,6 @@ const discoveryOutputSchema = z.object({
.describe('List of n8n nodes identified as necessary for the workflow'),
});
interface DiscoveryPromptOptions {
includeExamples: boolean;
}
/**
* Generate the process steps with proper numbering
*/
function generateProcessSteps(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const steps: string[] = [
'**Analyze user prompt** - Extract services, models, and technologies mentioned',
'**Call get_best_practices** with identified techniques (internal context)',
];
if (includeExamples) {
steps.push('**Call get_workflow_examples** with search queries for mentioned services/models');
}
const examplesContext = includeExamples ? ', and examples' : '';
steps.push(
`**Identify workflow components** from user request, best practices${examplesContext}`,
'**Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")',
'**Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)',
`**Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section`,
'**Call submit_discovery_results** with complete nodesFound array',
);
return steps.map((step, index) => `${index + 1}. ${step}`).join('\n');
}
/**
* Generate available tools list based on feature flags
*/
function generateAvailableToolsList(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const tools = [
'- get_best_practices: Retrieve best practices (internal context)',
'- search_nodes: Find n8n nodes by keyword',
'- get_node_details: Get complete node information including <connections>',
];
if (includeExamples) {
tools.push('- get_workflow_examples: Search for workflow examples as reference');
}
tools.push('- submit_discovery_results: Submit final results');
return tools.join('\n');
}
/**
* Discovery Agent Prompt
*/
function generateDiscoveryPrompt(options: DiscoveryPromptOptions): string {
const availableTools = generateAvailableToolsList(options);
const processSteps = generateProcessSteps(options);
return `You are a Discovery Agent for n8n AI Workflow Builder.
YOUR ROLE: Identify relevant n8n nodes and their connection-changing parameters.
AVAILABLE TOOLS:
${availableTools}
PROCESS:
${processSteps}
TECHNIQUE CATEGORIZATION:
When calling get_best_practices, select techniques that match the user's workflow intent.
<available_techniques>
{techniques}
</available_techniques>
<example_categorizations>
{exampleCategorizations}
</example_categorizations>
<technique_clarifications>
Common distinctions to get right:
- **NOTIFICATION vs CHATBOT**: Use NOTIFICATION when SENDING emails/messages/alerts (including to Telegram CHANNELS which are broadcast-only). Use CHATBOT only when RECEIVING and REPLYING to direct messages in a conversation.
- **MONITORING**: Use when workflow TRIGGERS on external events (new record created, status changed, incoming webhook, new message in channel). NOT just scheduled runs.
- **SCRAPING_AND_RESEARCH vs DATA_EXTRACTION**: Use SCRAPING when fetching from EXTERNAL sources (APIs, websites, social media). Use DATA_EXTRACTION for parsing INTERNAL data you already have.
- **TRIAGE**: Use when SELECTING, PRIORITIZING, ROUTING, or QUALIFYING items (e.g., "pick the best", "route to correct team", "qualify leads").
- **DOCUMENT_PROCESSING**: Use for ANY file handling - PDFs, images, videos, Excel, Google Sheets, audio files, file uploads in forms.
- **HUMAN_IN_THE_LOOP**: Use when workflow PAUSES for human approval, review, signing documents, responding to polls, or any manual input before continuing.
- **DATA_ANALYSIS**: Use when ANALYZING, CLASSIFYING, IDENTIFYING PATTERNS, or UNDERSTANDING data (e.g., "analyze outcomes", "learn from previous", "classify by type", "identify trends").
- **KNOWLEDGE_BASE**: Use when storing/retrieving from a DATA SOURCE for Q&A - includes vector DBs, spreadsheets used as databases, document collections.
- **DATA_TRANSFORMATION**: Use when CONVERTING data format, creating REPORTS/SUMMARIES from analyzed data, or restructuring output.
</technique_clarifications>
Technique selection rules:
- Select ALL techniques that apply (most workflows use 2-4)
- Maximum 5 techniques
- Only select techniques you're confident apply
CONNECTION-CHANGING PARAMETERS - CRITICAL RULES:
A parameter is connection-changing ONLY IF it appears in <input> or <output> expressions within <node_details>.
**How to identify:**
1. Look at the <connections> section in node details
2. Check if <input> or <output> uses expressions like: ={{...parameterName...}}
3. If a parameter is referenced in these expressions, it IS connection-changing
4. If a parameter is NOT in <input>/<output> expressions, it is NOT connection-changing
**Example from AI Agent:**
\`\`\`xml
<input>={{...hasOutputParser, needsFallback...}}</input>
\`\`\`
→ hasOutputParser and needsFallback ARE connection-changing (they control which inputs appear)
**Counter-example:**
\`\`\`xml
<properties>
<property name="promptType">...</property> <!-- NOT in <input>/<output> -->
<property name="systemMessage">...</property> <!-- NOT in <input>/<output> -->
</properties>
\`\`\`
→ promptType and systemMessage are NOT connection-changing (they don't affect connections)
**Common connection-changing parameters:**
- Vector Store: mode (appears in <input>/<output> expressions)
- AI Agent: hasOutputParser, needsFallback (appears in <input> expression)
- Merge: numberInputs (appears in <input> expression)
- Webhook: responseMode (appears in <output> expression)
<dynamic_output_nodes>
Some nodes have DYNAMIC outputs that depend on parameter values:
**Switch Node** (n8n-nodes-base.switch):
- When mode is "rules", the number of outputs equals the number of routing rules
- Connection parameter: mode: "rules" - CRITICAL for enabling rule-based routing
- Each rule in rules.values[] creates one output
- The rules parameter uses the same filter structure as IF node conditions
- ALWAYS flag mode as connection-changing with possibleValues: ["rules", "expression"]
**Merge Node** (n8n-nodes-base.merge):
- numberInputs parameter controls how many inputs the node accepts
When you find these nodes, ALWAYS flag mode/numberInputs as connection-changing parameters with possibleValues.
</dynamic_output_nodes>
SUB-NODES SEARCHES:
When searching for AI nodes, ALSO search for their required sub-nodes:
- "AI Agent" → also search for "Chat Model", "Memory", "Output Parser"
- "Basic LLM Chain" → also search for "Chat Model", "Output Parser"
- "Vector Store" → also search for "Embeddings", "Document Loader"
STRUCTURED OUTPUT PARSER - WHEN TO INCLUDE:
Search for "Structured Output Parser" (@n8n/n8n-nodes-langchain.outputParserStructured) when:
- AI output will be used programmatically (conditions, formatting, database storage, API calls)
- AI needs to extract specific fields (e.g., score, category, priority, action items)
- AI needs to classify/categorize data into defined categories
- Downstream nodes need to access specific fields from AI response (e.g., $json.score, $json.category)
- Output will be displayed in a formatted way (e.g., HTML email with specific sections)
- Data needs validation against a schema before processing
- Always use search_nodes to find the exact node names and versions - NEVER guess versions
CRITICAL RULES:
- NEVER ask clarifying questions
- ALWAYS call get_best_practices first
- THEN Call search_nodes to learn about available nodes and their inputs and outputs
- FINALLY call get_node_details IN PARALLEL for speed to get more details about RELVANT node
- ALWAYS extract version number from <version> tag in node details
- NEVER guess node versions - always use search_nodes to find exact versions
- ONLY flag connectionChangingParameters if they appear in <input> or <output> expressions
- If no parameters appear in connection expressions, return empty array []
- Output ONLY: nodesFound with {{ nodeName, version, reasoning, connectionChangingParameters }}
DO NOT:
- Output text commentary between tool calls
- Include bestPractices or categorization in submit_discovery_results
- Flag parameters that don't affect connections
- Stop without calling submit_discovery_results
`;
}
/**
* Discovery Subgraph State
*/
@@ -476,7 +161,7 @@ export class DiscoverySubgraph extends BaseSubgraph<
});
// Generate prompt based on feature flags
const discoveryPrompt = generateDiscoveryPrompt({ includeExamples });
const discoveryPrompt = buildDiscoveryPrompt({ includeExamples });
// Create agent with tools bound (including submit tool)
const systemPrompt = ChatPromptTemplate.fromMessages([

View File

@@ -31,7 +31,7 @@ jest.mock('@/tools/update-node-parameters.tool', () => ({
jest.mock('@/tools/get-node-parameter.tool', () => ({
createGetNodeParameterTool: jest.fn().mockReturnValue({ tool: { name: 'get_node_parameter' } }),
}));
jest.mock('@/tools/prompts/main-agent.prompt', () => ({
jest.mock('@/prompts/legacy-agent.prompt', () => ({
mainAgentPrompt: {
invoke: jest.fn().mockResolvedValue('mocked prompt'),
},
@@ -67,7 +67,7 @@ Object.defineProperty(global, 'crypto', {
import { MAX_AI_BUILDER_PROMPT_LENGTH } from '@/constants';
import { ValidationError } from '@/errors';
import { createMainAgentPrompt } from '@/tools/prompts/main-agent.prompt';
import { createMainAgentPrompt } from '@/prompts/legacy-agent.prompt';
import type { StreamOutput } from '@/types/streaming';
import { createStreamProcessor } from '@/utils/stream-processor';
import {

View File

@@ -1,6 +1,6 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { ToolMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage, RemoveMessage } from '@langchain/core/messages';
import { AIMessage, HumanMessage, isAIMessage, RemoveMessage } from '@langchain/core/messages';
import type { RunnableConfig } from '@langchain/core/runnables';
import type { LangChainTracer } from '@langchain/core/tracers/tracer_langchain';
import type { MemorySaver, StateSnapshot } from '@langchain/langgraph';
@@ -19,6 +19,7 @@ import {
MAX_AI_BUILDER_PROMPT_LENGTH,
MAX_INPUT_TOKENS,
} from '@/constants';
import { createMainAgentPrompt } from '@/prompts/legacy-agent.prompt';
import { trimWorkflowJSON } from '@/utils/trim-workflow-context';
import { conversationCompactChain } from './chains/conversation-compact';
@@ -27,7 +28,6 @@ import { LLMServiceError, ValidationError, WorkflowStateError } from './errors';
import { createMultiAgentWorkflowWithSubgraphs } from './multi-agent-workflow-subgraphs';
import { SessionManagerService } from './session-manager.service';
import { getBuilderTools } from './tools/builder-tools';
import { createMainAgentPrompt } from './tools/prompts/main-agent.prompt';
import type { SimpleWorkflow } from './types/workflow';
import {
applyCacheControlMarkers,
@@ -143,6 +143,8 @@ export interface WorkflowBuilderAgentConfig {
onGenerationSuccess?: () => Promise<void>;
/** Metadata to include in LangSmith traces */
runMetadata?: Record<string, unknown>;
/** Feature flags for enabling/disabling features */
featureFlags?: BuilderFeatureFlags;
}
export interface ExpressionValue {
@@ -287,7 +289,7 @@ export class WorkflowBuilderAgent {
const shouldContinue = ({ messages }: typeof WorkflowState.State) => {
const lastMessage = messages[messages.length - 1];
if (!(lastMessage instanceof AIMessage)) {
if (!lastMessage || !isAIMessage(lastMessage)) {
throw new WorkflowStateError('Expected last message to be generated by the AI agent');
}

View File

@@ -292,7 +292,9 @@ describe('McpClientTool', () => {
it('should successfully execute a tool', async () => {
jest.spyOn(Client.prototype, 'connect').mockResolvedValue();
jest.spyOn(Client.prototype, 'callTool').mockResolvedValue({ content: 'Sunny' });
jest
.spyOn(Client.prototype, 'callTool')
.mockResolvedValue({ toolResult: 'Sunny', content: [] });
jest.spyOn(Client.prototype, 'listTools').mockResolvedValue({
tools: [
{
@@ -326,9 +328,11 @@ describe('McpClientTool', () => {
it('should handle tool errors', async () => {
jest.spyOn(Client.prototype, 'connect').mockResolvedValue();
jest
.spyOn(Client.prototype, 'callTool')
.mockResolvedValue({ isError: true, content: [{ text: 'Weather unknown at location' }] });
jest.spyOn(Client.prototype, 'callTool').mockResolvedValue({
isError: true,
toolResult: 'Weather unknown at location',
content: [{ text: 'Weather unknown at location' }],
});
jest.spyOn(Client.prototype, 'listTools').mockResolvedValue({
tools: [
{

View File

@@ -114,7 +114,7 @@ export async function connectMcpClient({
return createResultError({ type: 'invalid_url', error: endpoint.error });
}
const client = new Client({ name, version: version.toString() }, { capabilities: { tools: {} } });
const client = new Client({ name, version: version.toString() }, { capabilities: {} });
if (serverTransport === 'httpStreamable') {
try {

View File

@@ -173,9 +173,13 @@ export class WorkflowToolService {
return processedResponse;
}
// If manualLogging is false we've been called by the engine and need
// the structured response.
if (metadata && 'setMetadata' in context) {
void context.setMetadata(metadata);
}
return responseData;
} catch (error) {
// Check if error is due to cancellation

View File

@@ -203,7 +203,7 @@
"@langchain/redis": "1.0.1",
"@langchain/textsplitters": "1.0.1",
"@langchain/weaviate": "1.0.1",
"@modelcontextprotocol/sdk": "1.20.0",
"@modelcontextprotocol/sdk": "1.24.0",
"@mozilla/readability": "0.6.0",
"@n8n/client-oauth2": "workspace:*",
"@n8n/config": "workspace:*",

View File

@@ -54,7 +54,15 @@ void (async function start() {
});
sentry = Container.get(TaskRunnerSentry);
await sentry.initIfEnabled();
try {
await sentry.initIfEnabled();
} catch (error) {
console.error(
'FAILED TO INITIALIZE SENTRY. ERROR REPORTING WILL BE DISABLED. THIS IS LIKELY A CONFIGURATION OR ENVIRONMENT ISSUE.',
error,
);
sentry = undefined;
}
runner = new JsTaskRunner(config);
runner.on('runner:reached-idle-timeout', () => {

View File

@@ -6,6 +6,7 @@ import type {
InvalidAuthTokenRepository,
UserRepository,
} from '@n8n/db';
import { GLOBAL_OWNER_ROLE } from '@n8n/db';
import type { NextFunction, Response } from 'express';
import { mock } from 'jest-mock-extended';
import jwt from 'jsonwebtoken';
@@ -15,6 +16,7 @@ import { AUTH_COOKIE_NAME } from '@/constants';
import type { MfaService } from '@/mfa/mfa.service';
import { JwtService } from '@/services/jwt.service';
import type { UrlService } from '@/services/url.service';
import type { License } from '@/license';
describe('AuthService', () => {
const browserId = 'test-browser-id';
@@ -35,10 +37,11 @@ describe('AuthService', () => {
const userRepository = mock<UserRepository>();
const invalidAuthTokenRepository = mock<InvalidAuthTokenRepository>();
const mfaService = mock<MfaService>();
const license = mock<License>();
const authService = new AuthService(
globalConfig,
mock(),
mock(),
license,
jwtService,
urlService,
userRepository,
@@ -61,6 +64,7 @@ describe('AuthService', () => {
globalConfig.userManagement.jwtSessionDurationHours = 168;
globalConfig.userManagement.jwtRefreshTimeoutHours = 0;
globalConfig.auth.cookie = { secure: true, samesite: 'lax' };
license.isWithinUsersLimit.mockReturnValue(true);
});
describe('createJWTHash', () => {
@@ -520,6 +524,29 @@ describe('AuthService', () => {
});
});
describe('when user limit is reached', () => {
it('should block issuance if the user is not the global owner', async () => {
license.isWithinUsersLimit.mockReturnValue(false);
expect(() => {
authService.issueCookie(res, user, false, browserId);
}).toThrowError('Maximum number of users reached');
});
it('should allow issuance if the user is the global owner', async () => {
license.isWithinUsersLimit.mockReturnValue(false);
user.role = GLOBAL_OWNER_ROLE;
expect(() => {
authService.issueCookie(res, user, false, browserId);
}).not.toThrowError('Maximum number of users reached');
expect(res.cookie).toHaveBeenCalledWith('n8n-auth', validToken, {
httpOnly: true,
maxAge: 604800000,
sameSite: 'lax',
secure: true,
});
});
});
it('should issue a cookie with the correct options, when 2FA was used', () => {
authService.issueCookie(res, user, true, browserId);

View File

@@ -10,7 +10,6 @@ import type { NextFunction, Response } from 'express';
import { JsonWebTokenError, TokenExpiredError } from 'jsonwebtoken';
import type { StringValue as TimeUnitValue } from 'ms';
import config from '@/config';
import { AuthError } from '@/errors/response-errors/auth.error';
import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
import { License } from '@/license';
@@ -171,11 +170,7 @@ export class AuthService {
// TODO: move this check to the login endpoint in AuthController
// If the instance has exceeded its user quota, prevent non-owners from logging in
const isWithinUsersLimit = this.license.isWithinUsersLimit();
if (
config.getEnv('userManagement.isInstanceOwnerSetUp') &&
user.role.slug !== GLOBAL_OWNER_ROLE.slug &&
!isWithinUsersLimit
) {
if (user.role.slug !== GLOBAL_OWNER_ROLE.slug && !isWithinUsersLimit) {
throw new ForbiddenError(RESPONSE_ERROR_MESSAGES.USERS_QUOTA_REACHED);
}

View File

@@ -3,7 +3,6 @@ import {
User,
CredentialsRepository,
ProjectRepository,
SettingsRepository,
SharedCredentialsRepository,
SharedWorkflowRepository,
UserRepository,
@@ -19,6 +18,7 @@ const defaultUserProps = {
lastName: null,
email: null,
password: null,
lastActiveAt: null,
role: 'global:owner',
};
@@ -53,11 +53,6 @@ export class Reset extends BaseCommand {
);
await Container.get(SharedCredentialsRepository).save(newSharedCredentials);
await Container.get(SettingsRepository).update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: 'false' },
);
this.logger.info('Successfully reset the database to default user state.');
}

View File

@@ -7,10 +7,12 @@ import { Container } from '@n8n/di';
export const schema = {
userManagement: {
/**
* @important Do not remove until after cloud hooks are updated to stop using convict config.
* @important Do not remove isInstanceOwnerSetUp until after cloud hooks (user-management) are updated to stop using
* this property
* @deprecated
*/
isInstanceOwnerSetUp: {
// n8n loads this setting from DB on startup
// n8n loads this setting from SettingsRepository (DB) on startup
doc: "Whether the instance owner's account has been set up",
format: Boolean,
default: false,

View File

@@ -76,7 +76,6 @@ type ToReturnType<T extends ConfigOptionPath> = T extends NumericPath
type ExceptionPaths = {
'queue.bull.redis': RedisOptions;
processedDataManager: IProcessedDataConfig;
'userManagement.isInstanceOwnerSetUp': boolean;
'ui.banners.dismissed': string[] | undefined;
easyAIWorkflowOnboarded: boolean | undefined;
};

View File

@@ -22,6 +22,7 @@ import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
import config from '@/config';
import type { AuthlessRequest } from '@/requests';
import { v4 as uuidv4 } from 'uuid';
import { OwnershipService } from '@/services/ownership.service';
describe('InvitationController', () => {
const logger: Logger = mockInstance(Logger);
@@ -33,22 +34,29 @@ describe('InvitationController', () => {
const userRepository: UserRepository = mockInstance(UserRepository);
const postHog: PostHogClient = mockInstance(PostHogClient);
const eventService: EventService = mockInstance(EventService);
const ownershipService: OwnershipService = mockInstance(OwnershipService);
function defaultInvitationController() {
return new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
ownershipService,
);
}
describe('inviteUser', () => {
it('throws a BadRequestError if SSO is enabled', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(true);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -77,18 +85,9 @@ describe('InvitationController', () => {
it('throws a ForbiddenError if the user limit quota has been reached', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(license, 'isWithinUsersLimit').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -112,18 +111,9 @@ describe('InvitationController', () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(license, 'isWithinUsersLimit').mockReturnValue(true);
jest.spyOn(config, 'getEnv').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(false));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -148,18 +138,9 @@ describe('InvitationController', () => {
jest.spyOn(license, 'isWithinUsersLimit').mockReturnValue(true);
jest.spyOn(config, 'getEnv').mockReturnValue(true);
jest.spyOn(license, 'isAdvancedPermissionsLicensed').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -209,17 +190,9 @@ describe('InvitationController', () => {
jest.spyOn(config, 'getEnv').mockReturnValue(true);
jest.spyOn(license, 'isAdvancedPermissionsLicensed').mockReturnValue(true);
jest.spyOn(userService, 'inviteUsers').mockResolvedValue(inviteUsersResult);
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitationController = defaultInvitationController();
const user = mock<User>({
id: '123',
@@ -255,19 +228,11 @@ describe('InvitationController', () => {
describe('acceptInvitation', () => {
it('throws a BadRequestError if SSO is enabled', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(true);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const id = uuidv4();
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,
@@ -291,19 +256,11 @@ describe('InvitationController', () => {
it('throws a BadRequestError if the inviter ID and invitee ID are not found in the database', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const id = uuidv4();
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,
@@ -332,6 +289,8 @@ describe('InvitationController', () => {
it('throws a BadRequestError if the invitee already has a password', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const invitee = mock<User>({
id: '123',
email: 'valid@email.com',
@@ -346,17 +305,7 @@ describe('InvitationController', () => {
jest.spyOn(userRepository, 'find').mockResolvedValue([inviter, invitee]);
const id = uuidv4();
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,
@@ -379,6 +328,8 @@ describe('InvitationController', () => {
it('accepts the invitation successfully', async () => {
jest.spyOn(ssoHelpers, 'isSsoCurrentAuthenticationMethod').mockReturnValue(false);
jest.spyOn(ownershipService, 'hasInstanceOwner').mockReturnValue(Promise.resolve(true));
const id = uuidv4();
const inviter = mock<User>({
id: '124',
@@ -400,17 +351,7 @@ describe('InvitationController', () => {
jest.spyOn(userService, 'toPublic').mockResolvedValue(invitee as unknown as PublicUser);
jest.spyOn(externalHooks, 'run').mockResolvedValue(invitee as never);
const invitationController = new InvitationController(
logger,
externalHooks,
authService,
userService,
license,
passwordUtility,
userRepository,
postHog,
eventService,
);
const invitationController = defaultInvitationController();
const payload = new AcceptInvitationRequestDto({
inviterId: id,

View File

@@ -1,103 +1,40 @@
import type { DismissBannerRequestDto, OwnerSetupRequestDto } from '@n8n/api-types';
import type { Logger } from '@n8n/backend-common';
import {
type AuthenticatedRequest,
type User,
type PublicUser,
type SettingsRepository,
type UserRepository,
GLOBAL_OWNER_ROLE,
} from '@n8n/db';
import type { Response } from 'express';
import type { DismissBannerRequestDto } from '@n8n/api-types';
import { mock } from 'jest-mock-extended';
import type { AuthService } from '@/auth/auth.service';
import config from '@/config';
import { OwnerController } from '@/controllers/owner.controller';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import type { EventService } from '@/events/event.service';
import type { BannerService } from '@/services/banner.service';
import type { PasswordUtility } from '@/services/password.utility';
import type { UserService } from '@/services/user.service';
import type { OwnershipService } from '@/services/ownership.service';
import type { PostHogClient } from '@/posthog';
describe('OwnerController', () => {
const configGetSpy = jest.spyOn(config, 'getEnv');
const configSetSpy = jest.spyOn(config, 'set');
const logger = mock<Logger>();
const eventService = mock<EventService>();
const authService = mock<AuthService>();
const bannerService = mock<BannerService>();
const userService = mock<UserService>();
const userRepository = mock<UserRepository>();
const settingsRepository = mock<SettingsRepository>();
const passwordUtility = mock<PasswordUtility>();
const ownershipService = mock<OwnershipService>();
const postHogClient = mock<PostHogClient>();
const controller = new OwnerController(
logger,
eventService,
settingsRepository,
authService,
bannerService,
userService,
passwordUtility,
mock(),
userRepository,
postHogClient,
ownershipService,
);
describe('setupOwner', () => {
it('should throw a BadRequestError if the instance owner is already setup', async () => {
configGetSpy.mockReturnValue(true);
it('should pass on errors from the service', async () => {
jest
.spyOn(ownershipService, 'setupOwner')
.mockRejectedValueOnce(new BadRequestError('Instance owner already setup'));
await expect(controller.setupOwner(mock(), mock(), mock())).rejects.toThrowError(
new BadRequestError('Instance owner already setup'),
);
expect(userRepository.findOneOrFail).not.toHaveBeenCalled();
expect(userRepository.save).not.toHaveBeenCalled();
expect(authService.issueCookie).not.toHaveBeenCalled();
expect(settingsRepository.update).not.toHaveBeenCalled();
expect(configSetSpy).not.toHaveBeenCalled();
expect(eventService.emit).not.toHaveBeenCalled();
expect(logger.debug).toHaveBeenCalledWith(
'Request to claim instance ownership failed because instance owner already exists',
);
});
it('should setup the instance owner successfully', async () => {
const user = mock<User>({
id: 'userId',
role: GLOBAL_OWNER_ROLE,
authIdentities: [],
});
const browserId = 'test-browser-id';
const req = mock<AuthenticatedRequest>({ user, browserId, authInfo: { usedMfa: false } });
const res = mock<Response>();
const payload = mock<OwnerSetupRequestDto>({
email: 'valid@email.com',
password: 'NewPassword123',
firstName: 'Jane',
lastName: 'Doe',
});
configGetSpy.mockReturnValue(false);
userRepository.findOneOrFail.mockResolvedValue(user);
userRepository.save.mockResolvedValue(user);
userService.toPublic.mockResolvedValue(mock<PublicUser>({ id: 'newUserId' }));
const result = await controller.setupOwner(req, res, payload);
expect(userRepository.findOneOrFail).toHaveBeenCalledWith({
where: { role: { slug: GLOBAL_OWNER_ROLE.slug } },
relations: ['role'],
});
expect(userRepository.save).toHaveBeenCalledWith(user, { transaction: false });
expect(authService.issueCookie).toHaveBeenCalledWith(res, user, false, browserId);
expect(settingsRepository.update).toHaveBeenCalledWith(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: JSON.stringify(true) },
);
expect(configSetSpy).toHaveBeenCalledWith('userManagement.isInstanceOwnerSetUp', true);
expect(eventService.emit).toHaveBeenCalledWith('instance-owner-setup', { userId: 'userId' });
expect(result.id).toEqual('newUserId');
});
});

View File

@@ -16,7 +16,6 @@ import { Request } from 'express';
import { v4 as uuid } from 'uuid';
import { ActiveWorkflowManager } from '@/active-workflow-manager';
import config from '@/config';
import { inE2ETests } from '@/constants';
import { MessageEventBus } from '@/eventbus/message-event-bus/message-event-bus';
import type { FeatureReturnType } from '@/license';
@@ -223,8 +222,7 @@ export class E2EController {
@Get('/env-feature-flags', { skipAuth: true })
async getEnvFeatureFlags() {
const currentFlags = this.frontendService.getSettings().envFeatureFlags;
return currentFlags;
return (await this.frontendService.getSettings()).envFeatureFlags;
}
@Patch('/env-feature-flags', { skipAuth: true })
@@ -254,7 +252,7 @@ export class E2EController {
}
// Return the current environment feature flags
const currentFlags = this.frontendService.getSettings().envFeatureFlags;
const currentFlags = (await this.frontendService.getSettings()).envFeatureFlags;
return {
success: true,
message: 'Environment feature flags updated',
@@ -364,13 +362,6 @@ export class E2EController {
mfaRecoveryCodes: encryptedRecoveryCodes,
});
}
await this.settingsRepo.update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: 'true' },
);
config.set('userManagement.isInstanceOwnerSetUp', true);
}
private async resetCache() {

View File

@@ -6,7 +6,6 @@ import { Post, GlobalScope, RestController, Body, Param } from '@n8n/decorators'
import { Response } from 'express';
import { AuthService } from '@/auth/auth.service';
import config from '@/config';
import { RESPONSE_ERROR_MESSAGES } from '@/constants';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { ForbiddenError } from '@/errors/response-errors/forbidden.error';
@@ -17,6 +16,7 @@ import { PostHogClient } from '@/posthog';
import { AuthlessRequest } from '@/requests';
import { PasswordUtility } from '@/services/password.utility';
import { UserService } from '@/services/user.service';
import { OwnershipService } from '@/services/ownership.service';
import { isSsoCurrentAuthenticationMethod } from '@/sso.ee/sso-helpers';
@RestController('/invitations')
@@ -31,6 +31,7 @@ export class InvitationController {
private readonly userRepository: UserRepository,
private readonly postHog: PostHogClient,
private readonly eventService: EventService,
private readonly ownershipService: OwnershipService,
) {}
/**
@@ -64,7 +65,7 @@ export class InvitationController {
throw new ForbiddenError(RESPONSE_ERROR_MESSAGES.USERS_QUOTA_REACHED);
}
if (!config.getEnv('userManagement.isInstanceOwnerSetUp')) {
if (!(await this.ownershipService.hasInstanceOwner())) {
this.logger.debug(
'Request to send email invite(s) to user(s) failed because the owner account is not set up',
);

View File

@@ -1,47 +0,0 @@
import { shouldSkipAuthOnOAuthCallback } from '../abstract-oauth.controller';
describe('shouldSkipAuthOnOAuthCallback', () => {
const originalEnv = process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK;
afterEach(() => {
// Restore original environment variable after each test
if (originalEnv === undefined) {
delete process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK;
} else {
process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK = originalEnv;
}
});
describe('when N8N_SKIP_AUTH_ON_OAUTH_CALLBACK is not set', () => {
beforeEach(() => {
delete process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK;
});
it('should return true', () => {
expect(shouldSkipAuthOnOAuthCallback()).toBe(false);
});
});
describe('with various environment variable values', () => {
const testCases = [
{ value: 'true', expected: true },
{ value: 'TRUE', expected: true },
{ value: 'True', expected: true },
{ value: 'false', expected: false },
{ value: 'FALSE', expected: false },
{ value: 'False', expected: false },
{ value: '', expected: false },
{ value: '1', expected: false },
{ value: 'yes', expected: false },
{ value: 'on', expected: false },
{ value: 'enabled', expected: false },
{ value: ' ', expected: false },
{ value: ' true ', expected: false },
] as const;
test.each(testCases)('"%s" value should return %s', ({ value, expected }) => {
process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK = value;
expect(shouldSkipAuthOnOAuthCallback()).toBe(expected);
});
});
});

View File

@@ -1,57 +1,22 @@
import { Logger } from '@n8n/backend-common';
import { mockInstance } from '@n8n/backend-test-utils';
import { Time } from '@n8n/constants';
import type { CredentialsEntity, User } from '@n8n/db';
import { CredentialsRepository, GLOBAL_OWNER_ROLE } from '@n8n/db';
import { type CredentialsEntity, type User } from '@n8n/db';
import { Container } from '@n8n/di';
import Csrf from 'csrf';
import { mock } from 'jest-mock-extended';
import axios from 'axios';
import type { Response } from 'express';
import { captor, mock } from 'jest-mock-extended';
import { Cipher, type InstanceSettings, ExternalSecretsProxy } from 'n8n-core';
import type { IWorkflowExecuteAdditionalData } from 'n8n-workflow';
import nock from 'nock';
import { OAuth1CredentialController } from '@/controllers/oauth/oauth1-credential.controller';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { CredentialsHelper } from '@/credentials-helper';
import { VariablesService } from '@/environments.ee/variables/variables.service.ee';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { ExternalHooks } from '@/external-hooks';
import type { OAuthRequest } from '@/requests';
import * as WorkflowExecuteAdditionalData from '@/workflow-execute-additional-data';
import { OauthService } from '@/oauth/oauth.service';
import { ExternalHooks } from '@/external-hooks';
jest.mock('@/workflow-execute-additional-data');
jest.mock('axios');
describe('OAuth1CredentialController', () => {
const oauthService = mockInstance(OauthService);
mockInstance(Logger);
mockInstance(ExternalHooks);
mockInstance(ExternalSecretsProxy);
mockInstance(VariablesService, {
getAllCached: async () => [],
});
const additionalData = mock<IWorkflowExecuteAdditionalData>();
(WorkflowExecuteAdditionalData.getBase as jest.Mock).mockReturnValue(additionalData);
const cipher = new Cipher(mock<InstanceSettings>({ encryptionKey: 'password' }));
Container.set(Cipher, cipher);
const credentialsHelper = mockInstance(CredentialsHelper);
const credentialsRepository = mockInstance(CredentialsRepository);
const credentialsFinderService = mockInstance(CredentialsFinderService);
const csrfSecret = 'csrf-secret';
const user = mock<User>({
id: '123',
password: 'password',
authIdentities: [],
role: GLOBAL_OWNER_ROLE,
});
const credential = mock<CredentialsEntity>({
id: '1',
name: 'Test Credential',
type: 'oAuth1Api',
data: cipher.encrypt({}),
});
const controller = Container.get(OAuth1CredentialController);
@@ -64,64 +29,26 @@ describe('OAuth1CredentialController', () => {
});
describe('getAuthUri', () => {
it('should throw a BadRequestError when credentialId is missing in the query', async () => {
const req = mock<OAuthRequest.OAuth1Credential.Auth>({ query: { id: '' } });
await expect(controller.getAuthUri(req)).rejects.toThrowError(
new BadRequestError('Required credential ID is missing'),
);
});
it('should throw a NotFoundError when no matching credential is found for the user', async () => {
credentialsFinderService.findCredentialForUser.mockResolvedValueOnce(null);
const req = mock<OAuthRequest.OAuth1Credential.Auth>({ user, query: { id: '1' } });
await expect(controller.getAuthUri(req)).rejects.toThrowError(
new NotFoundError('Credential not found'),
);
});
it('should return a valid auth URI', async () => {
jest.spyOn(Csrf.prototype, 'secretSync').mockReturnValueOnce(csrfSecret);
jest.spyOn(Csrf.prototype, 'create').mockReturnValueOnce('token');
credentialsFinderService.findCredentialForUser.mockResolvedValueOnce(credential);
credentialsHelper.getDecrypted.mockResolvedValueOnce({});
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValueOnce({
const mockResolvedCredential = mock<CredentialsEntity>({ id: '1' });
oauthService.getCredential.mockResolvedValueOnce(mockResolvedCredential);
oauthService.createCsrfState.mockReturnValueOnce(['csrf-secret', 'state']);
oauthService.getOAuthCredentials.mockResolvedValueOnce({
requestTokenUrl: 'https://example.domain/oauth/request_token',
authUrl: 'https://example.domain/oauth/authorize',
accessTokenUrl: 'https://example.domain/oauth/access_token',
signatureMethod: 'HMAC-SHA1',
signatureMethod: 'HMAC-SHA1' as const,
});
jest.mocked(axios).request.mockResolvedValueOnce({ data: { oauth_token: 'random-token' } });
const req = mock<OAuthRequest.OAuth1Credential.Auth>({
user: mock<User>({ id: '123' }),
query: { id: '1' },
});
nock('https://example.domain')
.post('/oauth/request_token', {
oauth_callback:
'http://localhost:5678/rest/oauth1-credential/callback?state=eyJ0b2tlbiI6InRva2VuIiwiY2lkIjoiMSIsImNyZWF0ZWRBdCI6MTcwNjc1MDYyNTY3OCwidXNlcklkIjoiMTIzIn0=',
})
.once()
.reply(200, { oauth_token: 'random-token' });
const req = mock<OAuthRequest.OAuth1Credential.Auth>({ user, query: { id: '1' } });
const authUri = await controller.getAuthUri(req);
expect(authUri).toEqual('https://example.domain/oauth/authorize?oauth_token=random-token');
const dataCaptor = captor();
expect(credentialsRepository.update).toHaveBeenCalledWith(
'1',
expect.objectContaining({
data: dataCaptor,
id: '1',
name: 'Test Credential',
type: 'oAuth1Api',
}),
);
expect(cipher.decrypt(dataCaptor.value)).toEqual(
JSON.stringify({ csrfSecret: 'csrf-secret' }),
);
expect(credentialsHelper.getDecrypted).toHaveBeenCalledWith(
additionalData,
credential,
credential.type,
'internal',
undefined,
false,
expect(oauthService.encryptAndSaveData).toHaveBeenCalledWith(
mockResolvedCredential,
expect.objectContaining({ csrfSecret: 'csrf-secret' }),
);
});
});
@@ -149,113 +76,40 @@ describe('OAuth1CredentialController', () => {
invalidReq.query = { state: 'test' } as OAuthRequest.OAuth1Credential.Callback['query'];
await controller.handleCallback(invalidReq, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'Insufficient parameters for OAuth1 callback.',
reason: 'Received following query parameters: {"state":"test"}',
},
});
expect(credentialsRepository.findOneBy).not.toHaveBeenCalled();
});
it('should render the error page when `state` query param is invalid', async () => {
const invalidReq = mock<OAuthRequest.OAuth1Credential.Callback>({
query: {
oauth_verifier: 'verifier',
oauth_token: 'token',
state: 'test',
},
});
await controller.handleCallback(invalidReq, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'Invalid state format',
},
});
expect(credentialsRepository.findOneBy).not.toHaveBeenCalled();
});
it('should render the error page when credential is not found in DB', async () => {
credentialsRepository.findOneBy.mockResolvedValueOnce(null);
await controller.handleCallback(req, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'OAuth callback failed because of insufficient permissions',
},
});
expect(credentialsRepository.findOneBy).toHaveBeenCalledTimes(1);
expect(credentialsRepository.findOneBy).toHaveBeenCalledWith({ id: '1' });
});
it('should render the error page when state differs from the stored state in the credential', async () => {
credentialsRepository.findOneBy.mockResolvedValue(credential);
credentialsHelper.getDecrypted.mockResolvedValue({ csrfSecret: 'invalid' });
await controller.handleCallback(req, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'The OAuth callback state is invalid!',
},
});
});
it('should render the error page when state is older than 5 minutes', async () => {
credentialsRepository.findOneBy.mockResolvedValue(credential);
credentialsHelper.getDecrypted.mockResolvedValue({ csrfSecret });
jest.spyOn(Csrf.prototype, 'verify').mockReturnValueOnce(true);
jest.advanceTimersByTime(10 * Time.minutes.toMilliseconds);
await controller.handleCallback(req, res);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: {
message: 'The OAuth callback state is invalid!',
},
});
expect(oauthService.renderCallbackError).toHaveBeenCalledWith(
res,
'Insufficient parameters for OAuth1 callback.',
'Received following query parameters: {"state":"test"}',
);
});
it('should exchange the code for a valid token, and save it to DB', async () => {
credentialsRepository.findOneBy.mockResolvedValue(credential);
credentialsHelper.getDecrypted.mockResolvedValue({ csrfSecret });
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValueOnce({
const mockResolvedCredential = mock<CredentialsEntity>({ id: '1' });
oauthService.getCredential.mockResolvedValueOnce(mockResolvedCredential);
// @ts-ignore
oauthService.getDecryptedData.mockResolvedValue({ csrfSecret: 'invalid' });
oauthService.getOAuthCredentials.mockResolvedValueOnce({
requestTokenUrl: 'https://example.domain/oauth/request_token',
accessTokenUrl: 'https://example.domain/oauth/access_token',
signatureMethod: 'HMAC-SHA1',
});
jest.spyOn(Csrf.prototype, 'verify').mockReturnValueOnce(true);
nock('https://example.domain')
.post('/oauth/access_token', 'oauth_token=token&oauth_verifier=verifier')
.once()
.reply(200, 'access_token=new_token');
oauthService.resolveCredential.mockResolvedValueOnce([
mockResolvedCredential,
{ csrfSecret: 'invalid' },
{ accessTokenUrl: 'https://example.domain/oauth/access_token' },
]);
jest.mocked(axios).post.mockResolvedValueOnce({ data: { access_token: 'new_token' } });
await controller.handleCallback(req, res);
const dataCaptor = captor();
expect(credentialsRepository.update).toHaveBeenCalledWith(
'1',
// @ts-ignore
expect(oauthService.encryptAndSaveData).toHaveBeenCalledWith(
mockResolvedCredential,
expect.objectContaining({
data: dataCaptor,
id: '1',
name: 'Test Credential',
type: 'oAuth1Api',
oauthTokenData: { access_token: 'new_token' },
}),
);
expect(cipher.decrypt(dataCaptor.value)).toEqual(
JSON.stringify({ oauthTokenData: { access_token: 'new_token' } }),
['csrfSecret'],
);
expect(res.render).toHaveBeenCalledWith('oauth-callback');
expect(credentialsHelper.getDecrypted).toHaveBeenCalledWith(
additionalData,
credential,
credential.type,
'internal',
undefined,
true,
);
});
});
});

View File

@@ -9,7 +9,9 @@ import clientOAuth1 from 'oauth-1.0a';
import { OAuthRequest } from '@/requests';
import { AbstractOAuthController, skipAuthOnOAuthCallback } from './abstract-oauth.controller';
import { OauthService, OauthVersion, skipAuthOnOAuthCallback } from '@/oauth/oauth.service';
import { Logger } from '@n8n/backend-common';
import { ExternalHooks } from '@/external-hooks';
interface OAuth1CredentialData {
signatureMethod: 'HMAC-SHA256' | 'HMAC-SHA512' | 'HMAC-SHA1';
@@ -27,24 +29,24 @@ const algorithmMap = {
} as const;
@RestController('/oauth1-credential')
export class OAuth1CredentialController extends AbstractOAuthController {
override oauthVersion = 1;
export class OAuth1CredentialController {
constructor(
private readonly oauthService: OauthService,
private readonly externalHooks: ExternalHooks,
private readonly logger: Logger,
) {}
/** Get Authorization url */
@Get('/auth')
async getAuthUri(req: OAuthRequest.OAuth1Credential.Auth): Promise<string> {
const credential = await this.getCredential(req);
const additionalData = await this.getAdditionalData();
const decryptedDataOriginal = await this.getDecryptedDataForAuthUri(credential, additionalData);
const oauthCredentials = await this.applyDefaultsAndOverwrites<OAuth1CredentialData>(
credential,
decryptedDataOriginal,
additionalData,
);
const [csrfSecret, state] = this.createCsrfState(
credential.id,
skipAuthOnOAuthCallback ? undefined : req.user.id,
);
const credential = await this.oauthService.getCredential(req);
const oauthCredentials =
await this.oauthService.getOAuthCredentials<OAuth1CredentialData>(credential);
const [csrfSecret, state] = this.oauthService.createCsrfState({
cid: credential.id,
userId: skipAuthOnOAuthCallback ? undefined : req.user.id,
});
const signatureMethod = oauthCredentials.signatureMethod;
@@ -62,7 +64,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
};
const oauthRequestData = {
oauth_callback: `${this.baseUrl}/callback?state=${state}`,
oauth_callback: `${this.oauthService.getBaseUrl(OauthVersion.V1)}/callback?state=${state}`,
};
await this.externalHooks.run('oauth1.authenticate', [oAuthOptions, oauthRequestData]);
@@ -91,7 +93,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
const returnUri = `${oauthCredentials.authUrl}?oauth_token=${responseJson.oauth_token}`;
await this.encryptAndSaveData(credential, { csrfSecret });
await this.oauthService.encryptAndSaveData(credential, { csrfSecret });
this.logger.debug('OAuth1 authorization successful for new credential', {
userId: req.user.id,
@@ -108,7 +110,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
const { oauth_verifier, oauth_token, state: encodedState } = req.query;
if (!oauth_verifier || !oauth_token || !encodedState) {
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
'Insufficient parameters for OAuth1 callback.',
`Received following query parameters: ${JSON.stringify(req.query)}`,
@@ -116,7 +118,7 @@ export class OAuth1CredentialController extends AbstractOAuthController {
}
const [credential, _, oauthCredentials] =
await this.resolveCredential<OAuth1CredentialData>(req);
await this.oauthService.resolveCredential<OAuth1CredentialData>(req);
// Form URL encoded body https://datatracker.ietf.org/doc/html/rfc5849#section-3.5.2
const oauthToken = await axios.post<string>(
@@ -131,15 +133,18 @@ export class OAuth1CredentialController extends AbstractOAuthController {
const oauthTokenData = Object.fromEntries(paramParser.entries());
await this.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
console.log('oauthTokenData', oauthTokenData);
await this.oauthService.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
this.logger.debug('OAuth1 callback successful for new credential', {
credentialId: credential.id,
});
return res.render('oauth-callback');
} catch (e) {
console.log('error', e);
const error = ensureError(e);
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
error.message,
'body' in error ? jsonStringify(error.body) : undefined,

View File

@@ -20,44 +20,31 @@ import {
import pkceChallenge from 'pkce-challenge';
import * as qs from 'querystring';
import { AbstractOAuthController, skipAuthOnOAuthCallback } from './abstract-oauth.controller';
import {
oAuthAuthorizationServerMetadataSchema,
dynamicClientRegistrationResponseSchema,
} from './oauth2-dynamic-client-registration.schema';
import { GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE as GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE } from '@/constants';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { OAuthRequest } from '@/requests';
import { OauthService, OauthVersion, skipAuthOnOAuthCallback } from '@/oauth/oauth.service';
import { Logger } from '@n8n/backend-common';
import { ExternalHooks } from '@/external-hooks';
@RestController('/oauth2-credential')
export class OAuth2CredentialController extends AbstractOAuthController {
override oauthVersion = 2;
export class OAuth2CredentialController {
constructor(
private readonly oauthService: OauthService,
private readonly logger: Logger,
private readonly externalHooks: ExternalHooks,
) {}
/** Get Authorization url */
@Get('/auth')
async getAuthUri(req: OAuthRequest.OAuth2Credential.Auth): Promise<string> {
const credential = await this.getCredential(req);
const additionalData = await this.getAdditionalData();
const decryptedDataOriginal = await this.getDecryptedDataForAuthUri(credential, additionalData);
// At some point in the past we saved hidden scopes to credentials (but shouldn't)
// Delete scope before applying defaults to make sure new scopes are present on reconnect
// Generic Oauth2 API is an exception because it needs to save the scope
if (
decryptedDataOriginal?.scope &&
credential.type.includes('OAuth2') &&
!GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE.includes(credential.type)
) {
delete decryptedDataOriginal.scope;
}
const oauthCredentials = await this.applyDefaultsAndOverwrites<OAuth2CredentialData>(
credential,
decryptedDataOriginal,
additionalData,
);
const credential = await this.oauthService.getCredential(req);
const oauthCredentials: OAuth2CredentialData =
await this.oauthService.getOAuthCredentials<OAuth2CredentialData>(credential);
const toUpdate: ICredentialDataDecryptedObject = {};
@@ -102,7 +89,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
authentication,
);
const registerPayload = {
redirect_uris: [`${this.baseUrl}/callback`],
redirect_uris: [`${this.oauthService.getBaseUrl(OauthVersion.V2)}/callback`],
token_endpoint_auth_method,
grant_types,
response_types: ['code'],
@@ -135,10 +122,10 @@ export class OAuth2CredentialController extends AbstractOAuthController {
}
// Generate a CSRF prevention token and send it as an OAuth2 state string
const [csrfSecret, state] = this.createCsrfState(
credential.id,
skipAuthOnOAuthCallback ? undefined : req.user.id,
);
const [csrfSecret, state] = this.oauthService.createCsrfState({
cid: credential.id,
userId: skipAuthOnOAuthCallback ? undefined : req.user.id,
});
const oAuthOptions = {
...this.convertCredentialToOptions(oauthCredentials),
@@ -162,7 +149,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
toUpdate.codeVerifier = code_verifier;
}
await this.encryptAndSaveData(credential, toUpdate);
await this.oauthService.encryptAndSaveData(credential, toUpdate);
const oAuthObj = new ClientOAuth2(oAuthOptions);
const returnUri = oAuthObj.code.getUri();
@@ -181,7 +168,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
try {
const { code, state: encodedState } = req.query;
if (!code || !encodedState) {
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
'Insufficient parameters for OAuth2 callback.',
`Received following query parameters: ${JSON.stringify(req.query)}`,
@@ -189,7 +176,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
}
const [credential, decryptedDataOriginal, oauthCredentials] =
await this.resolveCredential<OAuth2CredentialData>(req);
await this.oauthService.resolveCredential<OAuth2CredentialData>(req);
let options: Partial<ClientOAuth2Options> = {};
@@ -233,7 +220,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
...oauthToken.data,
};
await this.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
await this.oauthService.encryptAndSaveData(credential, { oauthTokenData }, ['csrfSecret']);
this.logger.debug('OAuth2 callback successful for credential', {
credentialId: credential.id,
@@ -242,7 +229,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
return res.render('oauth-callback');
} catch (e) {
const error = ensureError(e);
return this.renderCallbackError(
return this.oauthService.renderCallbackError(
res,
error.message,
'body' in error ? jsonStringify(error.body) : undefined,
@@ -257,7 +244,7 @@ export class OAuth2CredentialController extends AbstractOAuthController {
accessTokenUri: credential.accessTokenUrl ?? '',
authorizationUri: credential.authUrl ?? '',
authentication: credential.authentication ?? 'header',
redirectUri: `${this.baseUrl}/callback`,
redirectUri: `${this.oauthService.getBaseUrl(OauthVersion.V2)}/callback`,
scopes: split(credential.scope ?? 'openid', ','),
scopesSeparator: credential.scope?.includes(',') ? ',' : ' ',
ignoreSSLIssues: credential.ignoreSSLIssues ?? false,

View File

@@ -1,82 +1,31 @@
import { DismissBannerRequestDto, OwnerSetupRequestDto } from '@n8n/api-types';
import { Logger } from '@n8n/backend-common';
import {
AuthenticatedRequest,
GLOBAL_OWNER_ROLE,
SettingsRepository,
UserRepository,
} from '@n8n/db';
import { AuthenticatedRequest } from '@n8n/db';
import { Body, GlobalScope, Post, RestController } from '@n8n/decorators';
import { Response } from 'express';
import { AuthService } from '@/auth/auth.service';
import config from '@/config';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { EventService } from '@/events/event.service';
import { validateEntity } from '@/generic-helpers';
import { PostHogClient } from '@/posthog';
import { BannerService } from '@/services/banner.service';
import { PasswordUtility } from '@/services/password.utility';
import { UserService } from '@/services/user.service';
import { OwnershipService } from '@/services/ownership.service';
@RestController('/owner')
export class OwnerController {
constructor(
private readonly logger: Logger,
private readonly eventService: EventService,
private readonly settingsRepository: SettingsRepository,
private readonly authService: AuthService,
private readonly bannerService: BannerService,
private readonly userService: UserService,
private readonly passwordUtility: PasswordUtility,
private readonly postHog: PostHogClient,
private readonly userRepository: UserRepository,
private readonly ownershipService: OwnershipService,
) {}
/**
* Promote a shell into the owner of the n8n instance,
* and enable `isInstanceOwnerSetUp` setting.
* Promote a shell into the owner of the n8n instance
*/
@Post('/setup', { skipAuth: true })
async setupOwner(req: AuthenticatedRequest, res: Response, @Body payload: OwnerSetupRequestDto) {
const { email, firstName, lastName, password } = payload;
if (config.getEnv('userManagement.isInstanceOwnerSetUp')) {
this.logger.debug(
'Request to claim instance ownership failed because instance owner already exists',
);
throw new BadRequestError('Instance owner already setup');
}
let owner = await this.userRepository.findOneOrFail({
where: { role: { slug: GLOBAL_OWNER_ROLE.slug } },
relations: ['role'],
});
owner.email = email;
owner.firstName = firstName;
owner.lastName = lastName;
owner.password = await this.passwordUtility.hash(password);
// TODO: move XSS validation out into the DTO class
await validateEntity(owner);
owner = await this.userRepository.save(owner, { transaction: false });
this.logger.info('Owner was set up successfully');
await this.settingsRepository.update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: JSON.stringify(true) },
);
config.set('userManagement.isInstanceOwnerSetUp', true);
this.logger.debug('Setting isInstanceOwnerSetUp updated successfully');
const owner = await this.ownershipService.setupOwner(payload);
this.authService.issueCookie(res, owner, req.authInfo?.usedMfa ?? false, req.browserId);
this.eventService.emit('instance-owner-setup', { userId: owner.id });
return await this.userService.toPublic(owner, { posthog: this.postHog, withScopes: true });
}

View File

@@ -38,11 +38,13 @@ import { ChatHubAgentService } from './chat-hub-agent.service';
import { ChatHubAttachmentService } from './chat-hub.attachment.service';
import { ChatHubService } from './chat-hub.service';
import { ChatModelsRequestDto } from './dto/chat-models-request.dto';
import { ChatHubModelsService } from './chat-hub.models.service';
@RestController('/chat')
export class ChatHubController {
constructor(
private readonly chatService: ChatHubService,
private readonly chatModelsService: ChatHubModelsService,
private readonly chatAgentService: ChatHubAgentService,
private readonly chatAttachmentService: ChatHubAttachmentService,
private readonly logger: Logger,
@@ -55,7 +57,7 @@ export class ChatHubController {
_res: Response,
@Body payload: ChatModelsRequestDto,
): Promise<ChatModelsResponse> {
return await this.chatService.getModels(req.user, payload.credentials);
return await this.chatModelsService.getModels(req.user, payload.credentials);
}
@Get('/conversations')

View File

@@ -0,0 +1,811 @@
import { In, WorkflowRepository, type User } from '@n8n/db';
import { getBase } from '@/workflow-execute-additional-data';
import { ChatHubAgentService } from './chat-hub-agent.service';
import { ChatHubWorkflowService } from './chat-hub-workflow.service';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { DynamicNodeParametersService } from '@/services/dynamic-node-parameters.service';
import { WorkflowService } from '@/workflows/workflow.service';
import { getModelMetadata, PROVIDER_NODE_TYPE_MAP } from './chat-hub.constants';
import {
AGENT_LANGCHAIN_NODE_TYPE,
CHAT_TRIGGER_NODE_TYPE,
type INodeCredentials,
type INodePropertyOptions,
type IWorkflowExecuteAdditionalData,
} from 'n8n-workflow';
import {
chatHubProviderSchema,
emptyChatModelsResponse,
PROVIDER_CREDENTIAL_TYPE_MAP,
type ChatHubLLMProvider,
type ChatHubProvider,
type ChatModelDto,
type ChatModelsResponse,
} from '@n8n/api-types';
import { validChatTriggerParamsShape } from './chat-hub.types';
import { Service } from '@n8n/di';
@Service()
export class ChatHubModelsService {
constructor(
private readonly nodeParametersService: DynamicNodeParametersService,
private readonly workflowService: WorkflowService,
private readonly workflowRepository: WorkflowRepository,
private readonly credentialsFinderService: CredentialsFinderService,
private readonly chatHubAgentService: ChatHubAgentService,
private readonly chatHubWorkflowService: ChatHubWorkflowService,
) {}
async getModels(
user: User,
credentialIds: Record<ChatHubLLMProvider, string | null>,
): Promise<ChatModelsResponse> {
const additionalData = await getBase({ userId: user.id });
const providers = chatHubProviderSchema.options;
const allCredentials = await this.credentialsFinderService.findCredentialsForUser(user, [
'credential:read',
]);
const responses = await Promise.all(
providers.map<Promise<[ChatHubProvider, ChatModelsResponse[ChatHubProvider]]>>(
async (provider: ChatHubProvider) => {
const credentials: INodeCredentials = {};
if (provider !== 'n8n' && provider !== 'custom-agent') {
const credentialId = credentialIds[provider];
if (!credentialId) {
return [provider, { models: [] }];
}
// Ensure the user has the permission to read the credential
if (!allCredentials.some((credential) => credential.id === credentialId)) {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
credentials[PROVIDER_CREDENTIAL_TYPE_MAP[provider]] = { name: '', id: credentialId };
}
try {
return [
provider,
await this.fetchModelsForProvider(user, provider, credentials, additionalData),
];
} catch {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
},
),
);
return responses.reduce<ChatModelsResponse>(
(acc, [provider, res]) => {
acc[provider] = res;
return acc;
},
{ ...emptyChatModelsResponse },
);
}
private async fetchModelsForProvider(
user: User,
provider: ChatHubProvider,
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse[ChatHubProvider]> {
switch (provider) {
case 'openai': {
const rawModels = await this.fetchOpenAiModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'openai') };
}
case 'anthropic': {
const rawModels = await this.fetchAnthropicModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'anthropic') };
}
case 'google': {
const rawModels = await this.fetchGoogleModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'google') };
}
case 'ollama': {
const rawModels = await this.fetchOllamaModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'ollama') };
}
case 'azureOpenAi': {
const rawModels = this.fetchAzureOpenAiModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'azureOpenAi') };
}
case 'azureEntraId': {
const rawModels = this.fetchAzureEntraIdModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'azureEntraId') };
}
case 'awsBedrock': {
const rawModels = await this.fetchAwsBedrockModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'awsBedrock') };
}
case 'vercelAiGateway': {
const rawModels = await this.fetchVercelAiGatewayModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'vercelAiGateway') };
}
case 'xAiGrok': {
const rawModels = await this.fetchXAiGrokModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'xAiGrok') };
}
case 'groq': {
const rawModels = await this.fetchGroqModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'groq') };
}
case 'openRouter': {
const rawModels = await this.fetchOpenRouterModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'openRouter') };
}
case 'deepSeek': {
const rawModels = await this.fetchDeepSeekModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'deepSeek') };
}
case 'cohere': {
const rawModels = await this.fetchCohereModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'cohere') };
}
case 'mistralCloud': {
const rawModels = await this.fetchMistralCloudModels(credentials, additionalData);
return { models: this.transformAndFilterModels(rawModels, 'mistralCloud') };
}
case 'n8n':
return await this.fetchAgentWorkflowsAsModels(user);
case 'custom-agent':
return await this.chatHubAgentService.getAgentsByUserIdAsModels(user.id);
}
}
private async fetchOpenAiModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.openai,
{},
credentials,
);
return resourceLocatorResults.results;
}
private async fetchAnthropicModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.anthropic,
{},
credentials,
);
return resourceLocatorResults.results;
}
private async fetchGoogleModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Gemini node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts#L75
routing: {
request: {
method: 'GET',
url: '/v1beta/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.name.includes('embedding') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.google,
{},
credentials,
);
}
private async fetchOllamaModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Ollama Model node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LMOllama/description.ts#L24
routing: {
request: {
method: 'GET',
url: '/api/tags',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.ollama,
{},
credentials,
);
}
private fetchAzureOpenAiModels(
_credentials: INodeCredentials,
_additionalData: IWorkflowExecuteAdditionalData,
): INodePropertyOptions[] {
// Azure doesn't appear to offer a way to list available models via API.
// If we add support for this in the future on the Azure OpenAI node we should copy that
// implementation here too.
return [];
}
private fetchAzureEntraIdModels(
_credentials: INodeCredentials,
_additionalData: IWorkflowExecuteAdditionalData,
): INodePropertyOptions[] {
return [];
}
private async fetchAwsBedrockModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
// From AWS Bedrock node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L100
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L155
const foundationModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'modelSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.modelName}}',
description: '={{$responseItem.modelArn}}',
value: '={{$responseItem.modelId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const inferenceProfileModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/inference-profiles?maxResults=1000',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'inferenceProfileSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.inferenceProfileName}}',
description:
'={{$responseItem.description || $responseItem.inferenceProfileArn}}',
value: '={{$responseItem.inferenceProfileId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const [foundationModels, inferenceProfileModels] = await Promise.all([
foundationModelsRequest,
inferenceProfileModelsRequest,
]);
return foundationModels.concat(inferenceProfileModels);
}
private async fetchMistralCloudModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.id.includes('embed') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{ $responseItem.id }}',
value: '={{ $responseItem.id }}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.mistralCloud,
{},
credentials,
);
}
private async fetchCohereModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/v1/models?page_size=100&endpoint=chat',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.cohere,
{},
credentials,
);
}
private async fetchDeepSeekModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.deepSeek,
{},
credentials,
);
}
private async fetchOpenRouterModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.openRouter,
{},
credentials,
);
}
private async fetchGroqModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: '={{ $responseItem.active === true && $responseItem.object === "model" }}',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.groq,
{},
credentials,
);
}
private async fetchXAiGrokModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.xAiGrok,
{},
credentials,
);
}
private async fetchVercelAiGatewayModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<INodePropertyOptions[]> {
return await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.vercelAiGateway,
{},
credentials,
);
}
private async fetchAgentWorkflowsAsModels(user: User): Promise<ChatModelsResponse['n8n']> {
// Workflows are scanned by their latest version for chat trigger nodes.
// This means that we might miss some active workflow versions that had chat triggers but
// the latest version does not, but this trade-off is done for performance.
const workflowsWithChatTrigger = await this.workflowService.getWorkflowsWithNodesIncluded(
user,
[CHAT_TRIGGER_NODE_TYPE],
true,
);
const activeWorkflows = workflowsWithChatTrigger
// Ensure the user has at least read access to the workflows
.filter((workflow) => workflow.scopes.includes('workflow:read'))
.filter((workflow) => !!workflow.activeVersionId);
const workflows = await this.workflowRepository.find({
select: { id: true },
where: { id: In(activeWorkflows.map((workflow) => workflow.id)) },
relations: { activeVersion: true },
});
const models: ChatModelDto[] = [];
for (const { id, activeVersion } of workflows) {
if (!activeVersion) {
continue;
}
const chatTrigger = activeVersion.nodes?.find((node) => node.type === CHAT_TRIGGER_NODE_TYPE);
if (!chatTrigger) {
continue;
}
const chatTriggerParams = validChatTriggerParamsShape.safeParse(chatTrigger.parameters).data;
if (!chatTriggerParams) {
continue;
}
const agentNodes = activeVersion.nodes?.filter(
(node) => node.type === AGENT_LANGCHAIN_NODE_TYPE,
);
// Agents older than this can't do streaming
if (agentNodes.some((node) => node.typeVersion < 2.1)) {
continue;
}
const inputModalities = this.chatHubWorkflowService.parseInputModalities(
chatTriggerParams.options,
);
models.push({
name: chatTriggerParams.agentName ?? activeVersion.name ?? 'Unknown Agent',
description: chatTriggerParams.agentDescription ?? null,
model: {
provider: 'n8n',
workflowId: id,
},
createdAt: activeVersion.createdAt ? activeVersion.createdAt.toISOString() : null,
updatedAt: activeVersion.updatedAt ? activeVersion.updatedAt.toISOString() : null,
metadata: {
inputModalities,
capabilities: {
functionCalling: false,
},
},
});
}
return {
models,
};
}
private transformAndFilterModels(
rawModels: INodePropertyOptions[],
provider: ChatHubLLMProvider,
): ChatModelDto[] {
return rawModels.map((model) => {
const id = String(model.value);
return {
id,
name: model.name,
description: model.description ?? null,
model: {
provider,
model: id,
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata(provider, id),
};
});
}
}

View File

@@ -2,7 +2,6 @@ import {
PROVIDER_CREDENTIAL_TYPE_MAP,
type ChatHubProvider,
type ChatHubLLMProvider,
type ChatModelsResponse,
type ChatHubConversationsResponse,
type ChatHubConversationResponse,
ChatHubMessageDto,
@@ -10,17 +9,14 @@ import {
type ChatSessionId,
ChatHubConversationModel,
ChatHubMessageStatus,
chatHubProviderSchema,
type EnrichedStructuredChunk,
ChatHubBaseLLMModel,
ChatHubN8nModel,
ChatHubCustomAgentModel,
emptyChatModelsResponse,
type ChatHubUpdateConversationRequest,
ChatModelDto,
} from '@n8n/api-types';
import { Logger } from '@n8n/backend-common';
import { ExecutionRepository, IExecutionResponse, User, WorkflowRepository, In } from '@n8n/db';
import { ExecutionRepository, IExecutionResponse, User, WorkflowRepository } from '@n8n/db';
import { Service } from '@n8n/di';
import type { EntityManager } from '@n8n/typeorm';
import { GlobalConfig } from '@n8n/config';
@@ -28,12 +24,10 @@ import type { Response } from 'express';
import { ErrorReporter, InstanceSettings } from 'n8n-core';
import {
CHAT_TRIGGER_NODE_TYPE,
AGENT_LANGCHAIN_NODE_TYPE,
OperationalError,
ManualExecutionCancelledError,
type INodeCredentials,
type IWorkflowBase,
type IWorkflowExecuteAdditionalData,
jsonParse,
jsonStringify,
StructuredChunk,
@@ -48,15 +42,11 @@ import {
} from 'n8n-workflow';
import { ActiveExecutions } from '@/active-executions';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { ExecutionService } from '@/executions/execution.service';
import { DynamicNodeParametersService } from '@/services/dynamic-node-parameters.service';
import { getBase } from '@/workflow-execute-additional-data';
import { WorkflowExecutionService } from '@/workflows/workflow-execution.service';
import { WorkflowFinderService } from '@/workflows/workflow-finder.service';
import { WorkflowService } from '@/workflows/workflow.service';
import { ChatHubAgentService } from './chat-hub-agent.service';
import { ChatHubCredentialsService } from './chat-hub-credentials.service';
@@ -64,18 +54,12 @@ import type { ChatHubMessage } from './chat-hub-message.entity';
import type { ChatHubSession } from './chat-hub-session.entity';
import { ChatHubWorkflowService } from './chat-hub-workflow.service';
import { ChatHubAttachmentService } from './chat-hub.attachment.service';
import {
JSONL_STREAM_HEADERS,
NODE_NAMES,
PROVIDER_NODE_TYPE_MAP,
getModelMetadata,
} from './chat-hub.constants';
import { JSONL_STREAM_HEADERS, NODE_NAMES, PROVIDER_NODE_TYPE_MAP } from './chat-hub.constants';
import { ChatHubSettingsService } from './chat-hub.settings.service';
import {
HumanMessagePayload,
RegenerateMessagePayload,
EditMessagePayload,
validChatTriggerParamsShape,
} from './chat-hub.types';
import { ChatHubMessageRepository } from './chat-message.repository';
import { ChatHubSessionRepository } from './chat-session.repository';
@@ -90,16 +74,13 @@ export class ChatHubService {
private readonly logger: Logger,
private readonly errorReporter: ErrorReporter,
private readonly executionService: ExecutionService,
private readonly nodeParametersService: DynamicNodeParametersService,
private readonly executionRepository: ExecutionRepository,
private readonly workflowExecutionService: WorkflowExecutionService,
private readonly workflowService: WorkflowService,
private readonly workflowFinderService: WorkflowFinderService,
private readonly workflowRepository: WorkflowRepository,
private readonly activeExecutions: ActiveExecutions,
private readonly sessionRepository: ChatHubSessionRepository,
private readonly messageRepository: ChatHubMessageRepository,
private readonly credentialsFinderService: CredentialsFinderService,
private readonly chatHubAgentService: ChatHubAgentService,
private readonly chatHubCredentialsService: ChatHubCredentialsService,
private readonly chatHubWorkflowService: ChatHubWorkflowService,
@@ -109,883 +90,6 @@ export class ChatHubService {
private readonly globalConfig: GlobalConfig,
) {}
async getModels(
user: User,
credentialIds: Record<ChatHubLLMProvider, string | null>,
): Promise<ChatModelsResponse> {
const additionalData = await getBase({ userId: user.id });
const providers = chatHubProviderSchema.options;
const allCredentials = await this.credentialsFinderService.findCredentialsForUser(user, [
'credential:read',
]);
const responses = await Promise.all(
providers.map<Promise<[ChatHubProvider, ChatModelsResponse[ChatHubProvider]]>>(
async (provider: ChatHubProvider) => {
const credentials: INodeCredentials = {};
if (provider !== 'n8n' && provider !== 'custom-agent') {
const credentialId = credentialIds[provider];
if (!credentialId) {
return [provider, { models: [] }];
}
// Ensure the user has the permission to read the credential
if (!allCredentials.some((credential) => credential.id === credentialId)) {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
credentials[PROVIDER_CREDENTIAL_TYPE_MAP[provider]] = { name: '', id: credentialId };
}
try {
return [
provider,
await this.fetchModelsForProvider(user, provider, credentials, additionalData),
];
} catch {
return [
provider,
{ models: [], error: 'Could not retrieve models. Verify credentials.' },
];
}
},
),
);
return responses.reduce<ChatModelsResponse>(
(acc, [provider, res]) => {
acc[provider] = res;
return acc;
},
{ ...emptyChatModelsResponse },
);
}
private async fetchModelsForProvider(
user: User,
provider: ChatHubProvider,
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse[ChatHubProvider]> {
switch (provider) {
case 'openai':
return await this.fetchOpenAiModels(credentials, additionalData);
case 'anthropic':
return await this.fetchAnthropicModels(credentials, additionalData);
case 'google':
return await this.fetchGoogleModels(credentials, additionalData);
case 'ollama':
return await this.fetchOllamaModels(credentials, additionalData);
case 'azureOpenAi':
case 'azureEntraId':
return this.fetchAzureOpenAiModels(credentials, additionalData);
case 'awsBedrock':
return await this.fetchAwsBedrockModels(credentials, additionalData);
case 'vercelAiGateway':
return await this.fetchVercelAiGatewayModels(credentials, additionalData);
case 'xAiGrok':
return await this.fetchXAiGrokModels(credentials, additionalData);
case 'groq':
return await this.fetchGroqModels(credentials, additionalData);
case 'openRouter':
return await this.fetchOpenRouterModels(credentials, additionalData);
case 'deepSeek':
return await this.fetchDeepSeekModels(credentials, additionalData);
case 'cohere':
return await this.fetchCohereModels(credentials, additionalData);
case 'mistralCloud':
return await this.fetchMistralCloudModels(credentials, additionalData);
case 'n8n':
return await this.fetchAgentWorkflowsAsModels(user);
case 'custom-agent':
return await this.chatHubAgentService.getAgentsByUserIdAsModels(user.id);
}
}
private async fetchOpenAiModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['openai']> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.openai,
{},
credentials,
);
return {
models: resourceLocatorResults.results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'openai',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('openai', String(result.value)),
})),
};
}
private async fetchAnthropicModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['anthropic']> {
const resourceLocatorResults = await this.nodeParametersService.getResourceLocatorResults(
'searchModels',
'parameters.model',
additionalData,
PROVIDER_NODE_TYPE_MAP.anthropic,
{},
credentials,
);
return {
models: resourceLocatorResults.results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'anthropic',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('anthropic', String(result.value)),
})),
};
}
private async fetchGoogleModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['google']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Gemini node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts#L75
routing: {
request: {
method: 'GET',
url: '/v1beta/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.name.includes('embedding') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.google,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'google',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('google', String(result.value)),
})),
};
}
private async fetchOllamaModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['ollama']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
// From Ollama Model node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LMOllama/description.ts#L24
routing: {
request: {
method: 'GET',
url: '/api/tags',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.ollama,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'ollama',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('ollama', String(result.value)),
})),
};
}
private fetchAzureOpenAiModels(
_credentials: INodeCredentials,
_additionalData: IWorkflowExecuteAdditionalData,
): ChatModelsResponse['azureOpenAi'] {
// Azure doesn't appear to offer a way to list available models via API.
// If we add support for this in the future on the Azure OpenAI node we should copy that
// implementation here too.
return {
models: [],
};
}
private async fetchAwsBedrockModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['awsBedrock']> {
// From AWS Bedrock node
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L100
// https://github.com/n8n-io/n8n/blob/master/packages/%40n8n/nodes-langchain/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.ts#L155
const foundationModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/foundation-models?&byOutputModality=TEXT&byInferenceType=ON_DEMAND',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'modelSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.modelName}}',
description: '={{$responseItem.modelArn}}',
value: '={{$responseItem.modelId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const inferenceProfileModelsRequest = this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/inference-profiles?maxResults=1000',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'inferenceProfileSummaries',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.inferenceProfileName}}',
description:
'={{$responseItem.description || $responseItem.inferenceProfileArn}}',
value: '={{$responseItem.inferenceProfileId}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.awsBedrock,
{},
credentials,
);
const [foundationModels, inferenceProfileModels] = await Promise.all([
foundationModelsRequest,
inferenceProfileModelsRequest,
]);
return {
models: foundationModels.concat(inferenceProfileModels).map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'awsBedrock',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('awsBedrock', String(result.value)),
})),
};
}
private async fetchMistralCloudModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['mistralCloud']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.id.includes('embed') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{ $responseItem.id }}',
value: '={{ $responseItem.id }}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.mistralCloud,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'mistralCloud',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('mistralCloud', String(result.value)),
})),
};
}
private async fetchCohereModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['cohere']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/v1/models?page_size=100&endpoint=chat',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.cohere,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'cohere',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('cohere', String(result.value)),
})),
};
}
private async fetchDeepSeekModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['deepSeek']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.deepSeek,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'deepSeek',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('deepSeek', String(result.value)),
})),
};
}
private async fetchOpenRouterModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['openRouter']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.openRouter,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'openRouter',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('openRouter', String(result.value)),
})),
};
}
private async fetchGroqModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['groq']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: '={{ $responseItem.active === true && $responseItem.object === "model" }}',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.groq,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'groq',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('groq', String(result.value)),
})),
};
}
private async fetchXAiGrokModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['xAiGrok']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.xAiGrok,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? null,
model: {
provider: 'xAiGrok',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('xAiGrok', String(result.value)),
})),
};
}
private async fetchVercelAiGatewayModels(
credentials: INodeCredentials,
additionalData: IWorkflowExecuteAdditionalData,
): Promise<ChatModelsResponse['vercelAiGateway']> {
const results = await this.nodeParametersService.getOptionsViaLoadOptions(
{
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
additionalData,
PROVIDER_NODE_TYPE_MAP.vercelAiGateway,
{},
credentials,
);
return {
models: results.map((result) => ({
name: result.name,
description: result.description ?? String(result.value),
model: {
provider: 'vercelAiGateway',
model: String(result.value),
},
createdAt: null,
updatedAt: null,
metadata: getModelMetadata('vercelAiGateway', String(result.value)),
})),
};
}
private async fetchAgentWorkflowsAsModels(user: User): Promise<ChatModelsResponse['n8n']> {
// Workflows are scanned by their latest version for chat trigger nodes.
// This means that we might miss some active workflow versions that had chat triggers but
// the latest version does not, but this trade-off is done for performance.
const workflowsWithChatTrigger = await this.workflowService.getWorkflowsWithNodesIncluded(
user,
[CHAT_TRIGGER_NODE_TYPE],
true,
);
const activeWorkflows = workflowsWithChatTrigger
// Ensure the user has at least read access to the workflows
.filter((workflow) => workflow.scopes.includes('workflow:read'))
.filter((workflow) => !!workflow.activeVersionId);
const workflows = await this.workflowRepository.find({
select: { id: true },
where: { id: In(activeWorkflows.map((workflow) => workflow.id)) },
relations: { activeVersion: true },
});
const models: ChatModelDto[] = [];
for (const { id, activeVersion } of workflows) {
if (!activeVersion) {
continue;
}
const chatTrigger = activeVersion.nodes?.find((node) => node.type === CHAT_TRIGGER_NODE_TYPE);
if (!chatTrigger) {
continue;
}
const chatTriggerParams = validChatTriggerParamsShape.safeParse(chatTrigger.parameters).data;
if (!chatTriggerParams) {
continue;
}
const agentNodes = activeVersion.nodes?.filter(
(node) => node.type === AGENT_LANGCHAIN_NODE_TYPE,
);
// Agents older than this can't do streaming
if (agentNodes.some((node) => node.typeVersion < 2.1)) {
continue;
}
const inputModalities = this.chatHubWorkflowService.parseInputModalities(
chatTriggerParams.options,
);
models.push({
name: chatTriggerParams.agentName ?? activeVersion.name ?? 'Unknown Agent',
description: chatTriggerParams.agentDescription ?? null,
model: {
provider: 'n8n',
workflowId: id,
},
createdAt: activeVersion.createdAt ? activeVersion.createdAt.toISOString() : null,
updatedAt: activeVersion.updatedAt ? activeVersion.updatedAt.toISOString() : null,
metadata: {
inputModalities,
capabilities: {
functionCalling: false,
},
},
});
}
return {
models,
};
}
private async deleteChatWorkflow(workflowId: string): Promise<void> {
await this.workflowRepository.delete(workflowId);
}

View File

@@ -738,7 +738,7 @@ describe('execute-workflow MCP tool', () => {
);
// Call through the tool handler to test telemetry
await tool.handler({ workflowId: 'error-tracking' }, {} as any);
await tool.handler({ workflowId: 'error-tracking', inputs: undefined }, {} as any);
expect(telemetry.track).toHaveBeenCalledWith(
'User called mcp tool',

View File

@@ -68,6 +68,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: SUPPORTED_SCOPES.join(' '),
logo_uri: undefined,
tos_uri: undefined,
});
});
@@ -111,6 +113,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read write',
logo_uri: undefined,
tos_uri: undefined,
};
oauthClientRepository.insert.mockResolvedValue({} as any);
@@ -140,6 +144,8 @@ describe('McpOAuthService', () => {
client_secret_expires_at: 1234567890,
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
oauthClientRepository.insert.mockResolvedValue({} as any);
@@ -166,6 +172,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const error = new Error('Database error');
@@ -192,6 +200,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read write',
logo_uri: undefined,
tos_uri: undefined,
};
const params = {
@@ -222,6 +232,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const params = {
@@ -250,6 +262,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const params = {
@@ -291,6 +305,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
authorizationCodeService.getCodeChallenge.mockResolvedValue('challenge-123');
@@ -315,6 +331,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const authRecord = {
@@ -365,6 +383,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const authRecord = {
@@ -398,6 +418,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
const newTokens = {
@@ -447,6 +469,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(true);
@@ -469,6 +493,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeRefreshToken.mockResolvedValue(true);
@@ -491,6 +517,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(true);
@@ -512,6 +540,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(false);
@@ -534,6 +564,8 @@ describe('McpOAuthService', () => {
token_endpoint_auth_method: 'none',
response_types: ['code'],
scope: 'read',
logo_uri: undefined,
tos_uri: undefined,
};
tokenService.revokeAccessToken.mockResolvedValue(false);

View File

@@ -57,6 +57,8 @@ export class McpOAuthService implements OAuthServerProvider {
}),
response_types: ['code'],
scope: SUPPORTED_SCOPES.join(' '),
logo_uri: undefined,
tos_uri: undefined,
};
},
registerClient: async (

View File

@@ -0,0 +1,693 @@
import { Logger } from '@n8n/backend-common';
import { GlobalConfig } from '@n8n/config';
import { Time } from '@n8n/constants';
import { mockInstance } from '@n8n/backend-test-utils';
import type { AuthenticatedRequest, CredentialsEntity, ICredentialsDb, User } from '@n8n/db';
import { CredentialsRepository } from '@n8n/db';
import { Container } from '@n8n/di';
import { mock } from 'jest-mock-extended';
import type { Response } from 'express';
import type { IWorkflowExecuteAdditionalData } from 'n8n-workflow';
import { UnexpectedError } from 'n8n-workflow';
import { OauthService, OauthVersion, shouldSkipAuthOnOAuthCallback } from '@/oauth/oauth.service';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { CredentialsHelper } from '@/credentials-helper';
import { AuthError } from '@/errors/response-errors/auth.error';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import type { OAuthRequest } from '@/requests';
import { UrlService } from '@/services/url.service';
import * as WorkflowExecuteAdditionalData from '@/workflow-execute-additional-data';
jest.mock('@/workflow-execute-additional-data');
describe('OauthService', () => {
const logger = mockInstance(Logger);
const credentialsHelper = mockInstance(CredentialsHelper);
const credentialsRepository = mockInstance(CredentialsRepository);
const credentialsFinderService = mockInstance(CredentialsFinderService);
const urlService = mockInstance(UrlService);
const globalConfig = mockInstance(GlobalConfig);
let service: OauthService;
const timestamp = 1706750625678;
jest.useFakeTimers({ advanceTimers: true });
beforeEach(() => {
jest.setSystemTime(new Date(timestamp));
jest.clearAllMocks();
globalConfig.endpoints = { rest: 'rest' } as any;
urlService.getInstanceBaseUrl.mockReturnValue('http://localhost:5678');
jest
.mocked(WorkflowExecuteAdditionalData.getBase)
.mockResolvedValue(mock<IWorkflowExecuteAdditionalData>());
service = new OauthService(
logger,
credentialsHelper,
credentialsRepository,
credentialsFinderService,
urlService,
globalConfig,
);
});
describe('shouldSkipAuthOnOAuthCallback', () => {
it('should return false when env var is not set', () => {
delete process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK;
expect(shouldSkipAuthOnOAuthCallback()).toBe(false);
});
it('should return false when env var is "false"', () => {
process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK = 'false';
expect(shouldSkipAuthOnOAuthCallback()).toBe(false);
});
it('should return true when env var is "true"', () => {
process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK = 'true';
expect(shouldSkipAuthOnOAuthCallback()).toBe(true);
});
it('should return true when env var is "TRUE" (case insensitive)', () => {
process.env.N8N_SKIP_AUTH_ON_OAUTH_CALLBACK = 'TRUE';
expect(shouldSkipAuthOnOAuthCallback()).toBe(true);
});
});
describe('getBaseUrl', () => {
it('should return correct URL for OAuth1', () => {
const url = service.getBaseUrl(OauthVersion.V1);
expect(url).toBe('http://localhost:5678/rest/oauth1-credential');
expect(urlService.getInstanceBaseUrl).toHaveBeenCalled();
});
it('should return correct URL for OAuth2', () => {
const url = service.getBaseUrl(OauthVersion.V2);
expect(url).toBe('http://localhost:5678/rest/oauth2-credential');
});
});
describe('getCredential', () => {
it('should throw BadRequestError when credential ID is missing', async () => {
const req = {
query: {},
user: mock<User>({ id: '123' }),
} as unknown as OAuthRequest.OAuth2Credential.Auth;
Object.defineProperty(req.query, 'id', {
value: undefined,
writable: true,
enumerable: true,
});
const promise = service.getCredential(req);
await expect(promise).rejects.toThrow(BadRequestError);
await expect(promise).rejects.toThrow('Required credential ID is missing');
});
it('should throw NotFoundError when credential is not found', async () => {
const req = mock<OAuthRequest.OAuth2Credential.Auth>({
query: { id: 'credential-id' },
user: mock<User>({ id: '123' }),
});
credentialsFinderService.findCredentialForUser.mockResolvedValue(null);
await expect(service.getCredential(req)).rejects.toThrow(NotFoundError);
expect(logger.error).toHaveBeenCalledWith(
'OAuth credential authorization failed because the current user does not have the correct permissions',
{ userId: '123' },
);
});
it('should return credential when found', async () => {
const mockCredential = mock<CredentialsEntity>({ id: 'credential-id' });
const req = mock<OAuthRequest.OAuth2Credential.Auth>({
query: { id: 'credential-id' },
user: mock<User>({ id: '123' }),
});
credentialsFinderService.findCredentialForUser.mockResolvedValue(mockCredential);
const result = await service.getCredential(req);
expect(result).toBe(mockCredential);
expect(credentialsFinderService.findCredentialForUser).toHaveBeenCalledWith(
'credential-id',
req.user,
['credential:read'],
);
});
});
describe('getAdditionalData', () => {
it('should return workflow execute additional data', async () => {
const mockAdditionalData = mock<IWorkflowExecuteAdditionalData>();
jest.mocked(WorkflowExecuteAdditionalData.getBase).mockResolvedValue(mockAdditionalData);
const result = await (service as any).getAdditionalData();
expect(result).toBe(mockAdditionalData);
expect(WorkflowExecuteAdditionalData.getBase).toHaveBeenCalled();
});
});
describe('getDecryptedDataForAuthUri', () => {
it('should call getDecryptedData with raw=false', async () => {
const credential = mock<ICredentialsDb>({ id: '1', type: 'test' });
const additionalData = mock<IWorkflowExecuteAdditionalData>();
const mockDecryptedData = { clientId: 'test' };
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
const result = await (service as any).getDecryptedDataForAuthUri(credential, additionalData);
expect(result).toBe(mockDecryptedData);
expect(credentialsHelper.getDecrypted).toHaveBeenCalledWith(
additionalData,
credential,
credential.type,
'internal',
undefined,
false,
);
});
});
describe('getDecryptedDataForCallback', () => {
it('should call getDecryptedData with raw=true', async () => {
const credential = mock<ICredentialsDb>({ id: '1', type: 'test' });
const additionalData = mock<IWorkflowExecuteAdditionalData>();
const mockDecryptedData = { csrfSecret: 'secret' };
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
const result = await (service as any).getDecryptedDataForCallback(credential, additionalData);
expect(result).toBe(mockDecryptedData);
expect(credentialsHelper.getDecrypted).toHaveBeenCalledWith(
additionalData,
credential,
credential.type,
'internal',
undefined,
true,
);
});
});
describe('applyDefaultsAndOverwrites', () => {
it('should apply defaults and overwrites', async () => {
const credential = mock<ICredentialsDb>({ id: '1', type: 'test' });
const decryptedData = { clientId: 'test' };
const additionalData = mock<IWorkflowExecuteAdditionalData>();
const mockResult = { clientId: 'test', clientSecret: 'secret' };
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValue(mockResult);
const result = await (service as any).applyDefaultsAndOverwrites(
credential,
decryptedData,
additionalData,
);
expect(result).toBe(mockResult);
expect(credentialsHelper.applyDefaultsAndOverwrites).toHaveBeenCalledWith(
additionalData,
decryptedData,
credential,
credential.type,
'internal',
undefined,
undefined,
);
});
});
describe('encryptAndSaveData', () => {
it('should encrypt and save data to repository', async () => {
const { Cipher } = await import('n8n-core');
const cipher = Container.get(Cipher);
const encryptedData = cipher.encrypt({ existing: 'data' });
const credential = mock<ICredentialsDb>({
id: '1',
type: 'test',
data: encryptedData,
});
const toUpdate = { clientId: 'new-id' };
const toDelete = ['oldField'];
await service.encryptAndSaveData(credential, toUpdate, toDelete);
expect(credentialsRepository.update).toHaveBeenCalledWith('1', {
id: '1',
name: expect.anything(),
type: 'test',
data: expect.any(String),
updatedAt: expect.any(Date),
});
});
it('should use empty array for toDelete when not provided', async () => {
const { Cipher } = await import('n8n-core');
const cipher = Container.get(Cipher);
const encryptedData = cipher.encrypt({ existing: 'data' });
const credential = mock<ICredentialsDb>({
id: '1',
type: 'test',
data: encryptedData,
});
const toUpdate = { clientId: 'new-id' };
await service.encryptAndSaveData(credential, toUpdate);
expect(credentialsRepository.update).toHaveBeenCalledWith('1', {
id: '1',
name: expect.anything(),
type: 'test',
data: expect.any(String),
updatedAt: expect.any(Date),
});
});
});
describe('getCredentialWithoutUser', () => {
it('should return credential from repository', async () => {
const mockCredential = mock<ICredentialsDb>({ id: '1' });
credentialsRepository.findOneBy.mockResolvedValue(mockCredential as any);
const result = await (service as any).getCredentialWithoutUser('1');
expect(result).toBe(mockCredential);
expect(credentialsRepository.findOneBy).toHaveBeenCalledWith({ id: '1' });
});
it('should return null when credential not found', async () => {
credentialsRepository.findOneBy.mockResolvedValue(null);
const result = await (service as any).getCredentialWithoutUser('1');
expect(result).toBeNull();
});
});
describe('createCsrfState', () => {
it('should create CSRF state with correct structure', () => {
const data = { cid: 'credential-id', userId: 'user-id' };
jest.setSystemTime(new Date(timestamp));
const [csrfSecret, encodedState] = service.createCsrfState(data);
expect(typeof csrfSecret).toBe('string');
expect(csrfSecret.length).toBeGreaterThan(0);
const decoded = JSON.parse(Buffer.from(encodedState, 'base64').toString());
expect(decoded.cid).toBe('credential-id');
expect(decoded.userId).toBe('user-id');
expect(decoded.token).toBeDefined();
expect(decoded.createdAt).toBe(timestamp);
});
it('should include additional data in state', () => {
const data = { cid: 'credential-id', customField: 'custom-value' };
jest.setSystemTime(new Date(timestamp));
const [, encodedState] = service.createCsrfState(data);
const decoded = JSON.parse(Buffer.from(encodedState, 'base64').toString());
expect(decoded.customField).toBe('custom-value');
});
});
describe('decodeCsrfState', () => {
it('should decode valid CSRF state', () => {
const state = {
token: 'token',
cid: 'credential-id',
userId: 'user-id',
createdAt: timestamp,
};
const encodedState = Buffer.from(JSON.stringify(state)).toString('base64');
const req = mock<AuthenticatedRequest>({
user: mock<User>({ id: 'user-id' }),
});
const result = (service as any).decodeCsrfState(encodedState, req);
expect(result).toEqual(state);
});
it('should throw error when state format is invalid', () => {
const invalidState = 'not-base64-json';
const req = mock<AuthenticatedRequest>({
user: mock<User>({ id: 'user-id' }),
});
expect(() => (service as any).decodeCsrfState(invalidState, req)).toThrow(
'Invalid state format',
);
});
it('should throw UnexpectedError when cid is missing', () => {
const state = {
token: 'token',
createdAt: timestamp,
};
const encodedState = Buffer.from(JSON.stringify(state)).toString('base64');
const req = mock<AuthenticatedRequest>({
user: mock<User>({ id: 'user-id' }),
});
expect(() => (service as any).decodeCsrfState(encodedState, req)).toThrow(UnexpectedError);
});
it('should throw UnexpectedError when token is missing', () => {
const state = {
cid: 'credential-id',
createdAt: timestamp,
};
const encodedState = Buffer.from(JSON.stringify(state)).toString('base64');
const req = mock<AuthenticatedRequest>({
user: mock<User>({ id: 'user-id' }),
});
expect(() => (service as any).decodeCsrfState(encodedState, req)).toThrow(UnexpectedError);
});
it('should throw AuthError when userId does not match', () => {
const state = {
token: 'token',
cid: 'credential-id',
userId: 'different-user-id',
createdAt: timestamp,
};
const encodedState = Buffer.from(JSON.stringify(state)).toString('base64');
const req = mock<AuthenticatedRequest>({
user: mock<User>({ id: 'user-id' }),
});
expect(() => (service as any).decodeCsrfState(encodedState, req)).toThrow(AuthError);
expect(() => (service as any).decodeCsrfState(encodedState, req)).toThrow('Unauthorized');
});
it('should throw AuthError when req.user is undefined', () => {
const state = {
token: 'token',
cid: 'credential-id',
userId: 'user-id',
createdAt: timestamp,
};
const encodedState = Buffer.from(JSON.stringify(state)).toString('base64');
const req = mock<AuthenticatedRequest>({
user: undefined,
});
expect(() => (service as any).decodeCsrfState(encodedState, req)).toThrow(AuthError);
});
});
describe('verifyCsrfState', () => {
it('should return true for valid CSRF state', () => {
const csrfSecret = 'csrf-secret';
const token = new (require('csrf'))();
const stateToken = token.create(csrfSecret);
const state = {
token: stateToken,
cid: 'credential-id',
createdAt: Date.now(),
};
const decrypted = { csrfSecret };
const result = (service as any).verifyCsrfState(decrypted, state);
expect(result).toBe(true);
});
it('should return false when CSRF state is expired', () => {
const csrfSecret = 'csrf-secret';
const token = new (require('csrf'))();
const stateToken = token.create(csrfSecret);
const expiredTime = Date.now() - 6 * Time.minutes.toMilliseconds;
const state = {
token: stateToken,
cid: 'credential-id',
createdAt: expiredTime,
};
const decrypted = { csrfSecret };
const result = (service as any).verifyCsrfState(decrypted, state);
expect(result).toBe(false);
});
it('should return false when csrfSecret is missing', () => {
const token = new (require('csrf'))();
const csrfSecret = 'csrf-secret';
const stateToken = token.create(csrfSecret);
const state = {
token: stateToken,
cid: 'credential-id',
createdAt: Date.now(),
};
const decrypted = {};
const result = (service as any).verifyCsrfState(decrypted, state);
expect(result).toBe(false);
});
it('should return false when token verification fails', () => {
const state = {
token: 'invalid-token',
cid: 'credential-id',
createdAt: Date.now(),
};
const decrypted = { csrfSecret: 'csrf-secret' };
const result = (service as any).verifyCsrfState(decrypted, state);
expect(result).toBe(false);
});
});
describe('resolveCredential', () => {
it('should resolve credential successfully', async () => {
const state = {
token: 'token',
cid: 'credential-id',
userId: 'user-id',
createdAt: timestamp,
};
const mockCredential = mock<CredentialsEntity>({ id: 'credential-id' });
const mockDecryptedData = { csrfSecret: 'csrf-secret' };
const mockOAuthCredentials = { clientId: 'client-id' };
const mockAdditionalData = mock<IWorkflowExecuteAdditionalData>();
const token = new (require('csrf'))();
const stateToken = token.create('csrf-secret');
state.token = stateToken;
const req = mock<OAuthRequest.OAuth2Credential.Callback>({
query: { state: Buffer.from(JSON.stringify(state)).toString('base64') },
user: mock<User>({ id: 'user-id' }),
});
credentialsRepository.findOneBy.mockResolvedValue(mockCredential);
jest.mocked(WorkflowExecuteAdditionalData.getBase).mockResolvedValue(mockAdditionalData);
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValue(mockOAuthCredentials);
jest.spyOn(service as any, 'verifyCsrfState').mockReturnValue(true);
const result = await service.resolveCredential(req);
expect(result).toEqual([mockCredential, mockDecryptedData, mockOAuthCredentials]);
});
it('should throw UnexpectedError when credential is not found', async () => {
const state = {
token: 'token',
cid: 'credential-id',
userId: 'user-id',
createdAt: timestamp,
};
const encodedState = Buffer.from(JSON.stringify(state)).toString('base64');
const req = mock<OAuthRequest.OAuth2Credential.Callback>({
query: { state: encodedState },
user: mock<User>({ id: 'user-id' }),
});
credentialsRepository.findOneBy.mockResolvedValue(null);
await expect(service.resolveCredential(req)).rejects.toThrow(UnexpectedError);
await expect(service.resolveCredential(req)).rejects.toThrow(
'OAuth callback failed because of insufficient permissions',
);
});
it('should throw UnexpectedError when CSRF state is invalid', async () => {
const state = {
token: 'token',
cid: 'credential-id',
userId: 'user-id',
createdAt: timestamp,
};
const encodedState = Buffer.from(JSON.stringify(state)).toString('base64');
const mockCredential = mock<CredentialsEntity>({ id: 'credential-id' });
const mockDecryptedData = { csrfSecret: 'csrf-secret' };
const mockOAuthCredentials = { clientId: 'client-id' };
const mockAdditionalData = mock<IWorkflowExecuteAdditionalData>();
const req = mock<OAuthRequest.OAuth2Credential.Callback>({
query: { state: encodedState },
user: mock<User>({ id: 'user-id' }),
});
credentialsRepository.findOneBy.mockResolvedValue(mockCredential);
jest.mocked(WorkflowExecuteAdditionalData.getBase).mockResolvedValue(mockAdditionalData);
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValue(mockOAuthCredentials);
jest.spyOn(service as any, 'verifyCsrfState').mockReturnValue(false);
await expect(service.resolveCredential(req)).rejects.toThrow(UnexpectedError);
await expect(service.resolveCredential(req)).rejects.toThrow(
'The OAuth callback state is invalid!',
);
});
});
describe('renderCallbackError', () => {
it('should render error page with message', () => {
const res = mock<Response>();
const message = 'Test error message';
service.renderCallbackError(res, message);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: { message },
});
});
it('should render error page with message and reason', () => {
const res = mock<Response>();
const message = 'Test error message';
const reason = 'Test reason';
service.renderCallbackError(res, message, reason);
expect(res.render).toHaveBeenCalledWith('oauth-error-callback', {
error: { message, reason },
});
});
});
describe('getOAuthCredentials', () => {
it('should return OAuth credentials', async () => {
const credential = mock<CredentialsEntity>({
id: '1',
type: 'testOAuth2Api',
});
const mockDecryptedData = { clientId: 'client-id' };
const mockOAuthCredentials = { clientId: 'client-id', clientSecret: 'secret' };
const mockAdditionalData = mock<IWorkflowExecuteAdditionalData>();
jest.mocked(WorkflowExecuteAdditionalData.getBase).mockResolvedValue(mockAdditionalData);
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValue(mockOAuthCredentials);
const result = await service.getOAuthCredentials(credential);
expect(result).toBe(mockOAuthCredentials);
});
it('should delete scope for non-generic OAuth2 credentials', async () => {
const credential = mock<CredentialsEntity>({
id: '1',
type: 'testOAuth2Api',
});
const mockDecryptedData = { clientId: 'client-id', scope: 'old-scope' };
const mockOAuthCredentials = { clientId: 'client-id' };
const mockAdditionalData = mock<IWorkflowExecuteAdditionalData>();
jest.mocked(WorkflowExecuteAdditionalData.getBase).mockResolvedValue(mockAdditionalData);
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValue(mockOAuthCredentials);
await service.getOAuthCredentials(credential);
expect(credentialsHelper.applyDefaultsAndOverwrites).toHaveBeenCalledWith(
mockAdditionalData,
{ clientId: 'client-id' },
credential,
credential.type,
'internal',
undefined,
undefined,
);
});
it('should not delete scope for generic OAuth2 credentials with editable scope', async () => {
const credential = mock<CredentialsEntity>({
id: '1',
type: 'oAuth2Api',
});
const mockDecryptedData = { clientId: 'client-id', scope: 'old-scope' };
const mockOAuthCredentials = { clientId: 'client-id', scope: 'old-scope' };
const mockAdditionalData = mock<IWorkflowExecuteAdditionalData>();
jest.mocked(WorkflowExecuteAdditionalData.getBase).mockResolvedValue(mockAdditionalData);
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValue(mockOAuthCredentials);
await service.getOAuthCredentials(credential);
expect(credentialsHelper.applyDefaultsAndOverwrites).toHaveBeenCalledWith(
mockAdditionalData,
{ clientId: 'client-id', scope: 'old-scope' },
credential,
credential.type,
'internal',
undefined,
undefined,
);
});
it('should not delete scope for non-OAuth2 credentials', async () => {
const credential = mock<CredentialsEntity>({
id: '1',
type: 'testApi',
});
const mockDecryptedData = { clientId: 'client-id', scope: 'old-scope' };
const mockOAuthCredentials = { clientId: 'client-id', scope: 'old-scope' };
const mockAdditionalData = mock<IWorkflowExecuteAdditionalData>();
jest.mocked(WorkflowExecuteAdditionalData.getBase).mockResolvedValue(mockAdditionalData);
credentialsHelper.getDecrypted.mockResolvedValue(mockDecryptedData);
credentialsHelper.applyDefaultsAndOverwrites.mockResolvedValue(mockOAuthCredentials);
await service.getOAuthCredentials(credential);
expect(credentialsHelper.applyDefaultsAndOverwrites).toHaveBeenCalledWith(
mockAdditionalData,
{ clientId: 'client-id', scope: 'old-scope' },
credential,
credential.type,
'internal',
undefined,
undefined,
);
});
});
});

View File

@@ -10,28 +10,33 @@ import { Credentials } from 'n8n-core';
import type { ICredentialDataDecryptedObject, IWorkflowExecuteAdditionalData } from 'n8n-workflow';
import { jsonParse, UnexpectedError } from 'n8n-workflow';
import { RESPONSE_ERROR_MESSAGES } from '@/constants';
import {
GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE,
RESPONSE_ERROR_MESSAGES,
} from '@/constants';
import { CredentialsFinderService } from '@/credentials/credentials-finder.service';
import { CredentialsHelper } from '@/credentials-helper';
import { AuthError } from '@/errors/response-errors/auth.error';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { NotFoundError } from '@/errors/response-errors/not-found.error';
import { ExternalHooks } from '@/external-hooks';
import type { OAuthRequest } from '@/requests';
import { UrlService } from '@/services/url.service';
import * as WorkflowExecuteAdditionalData from '@/workflow-execute-additional-data';
type CsrfStateParam = {
/** Id of the oAuth credential in the DB */
cid: string;
type CsrfStateRequired = {
/** Random CSRF token, used to verify the signature of the CSRF state */
token: string;
/** Creation timestamp of the CSRF state. Used for expiration. */
createdAt: number;
/** User who initiated OAuth flow, included to prevent cross-user credential hijacking. Optional only if `skipAuthOnOAuthCallback` is enabled. */
userId?: string;
};
type CreateCsrfStateData = {
cid: string;
[key: string]: unknown;
};
type CsrfState = CsrfStateRequired & CreateCsrfStateData;
const MAX_CSRF_AGE = 5 * Time.minutes.toMilliseconds;
export function shouldSkipAuthOnOAuthCallback() {
@@ -41,13 +46,15 @@ export function shouldSkipAuthOnOAuthCallback() {
export const skipAuthOnOAuthCallback = shouldSkipAuthOnOAuthCallback();
@Service()
export abstract class AbstractOAuthController {
abstract oauthVersion: number;
export const enum OauthVersion {
V1 = 1,
V2 = 2,
}
@Service()
export class OauthService {
constructor(
protected readonly logger: Logger,
protected readonly externalHooks: ExternalHooks,
private readonly credentialsHelper: CredentialsHelper,
private readonly credentialsRepository: CredentialsRepository,
private readonly credentialsFinderService: CredentialsFinderService,
@@ -55,14 +62,12 @@ export abstract class AbstractOAuthController {
private readonly globalConfig: GlobalConfig,
) {}
get baseUrl() {
getBaseUrl(oauthVersion: OauthVersion) {
const restUrl = `${this.urlService.getInstanceBaseUrl()}/${this.globalConfig.endpoints.rest}`;
return `${restUrl}/oauth${this.oauthVersion}-credential`;
return `${restUrl}/oauth${oauthVersion}-credential`;
}
protected async getCredential(
req: OAuthRequest.OAuth2Credential.Auth,
): Promise<CredentialsEntity> {
async getCredential(req: OAuthRequest.OAuth2Credential.Auth): Promise<CredentialsEntity> {
const { id: credentialId } = req.query;
if (!credentialId) {
@@ -77,7 +82,7 @@ export abstract class AbstractOAuthController {
if (!credential) {
this.logger.error(
`OAuth${this.oauthVersion} credential authorization failed because the current user does not have the correct permissions`,
'OAuth credential authorization failed because the current user does not have the correct permissions',
{ userId: req.user.id },
);
throw new NotFoundError(RESPONSE_ERROR_MESSAGES.NO_CREDENTIAL);
@@ -141,7 +146,7 @@ export abstract class AbstractOAuthController {
)) as unknown as T;
}
protected async encryptAndSaveData(
async encryptAndSaveData(
credential: ICredentialsDb,
toUpdate: ICredentialDataDecryptedObject,
toDelete: string[] = [],
@@ -159,21 +164,20 @@ export abstract class AbstractOAuthController {
return await this.credentialsRepository.findOneBy({ id: credentialId });
}
createCsrfState(credentialsId: string, userId?: string): [string, string] {
createCsrfState(data: CreateCsrfStateData): [string, string] {
const token = new Csrf();
const csrfSecret = token.secretSync();
const state: CsrfStateParam = {
const state: CsrfState = {
token: token.create(csrfSecret),
cid: credentialsId,
createdAt: Date.now(),
userId,
...data,
};
return [csrfSecret, Buffer.from(JSON.stringify(state)).toString('base64')];
}
protected decodeCsrfState(encodedState: string, req: AuthenticatedRequest): CsrfStateParam {
protected decodeCsrfState(encodedState: string, req: AuthenticatedRequest): CsrfState {
const errorMessage = 'Invalid state format';
const decoded = jsonParse<CsrfStateParam>(Buffer.from(encodedState, 'base64').toString(), {
const decoded = jsonParse<CsrfState>(Buffer.from(encodedState, 'base64').toString(), {
errorMessage,
});
@@ -190,7 +194,7 @@ export abstract class AbstractOAuthController {
protected verifyCsrfState(
decrypted: ICredentialDataDecryptedObject & { csrfSecret?: string },
state: CsrfStateParam,
state: CsrfState,
) {
const token = new Csrf();
@@ -201,7 +205,7 @@ export abstract class AbstractOAuthController {
);
}
protected async resolveCredential<T>(
async resolveCredential<T>(
req: OAuthRequest.OAuth1Credential.Callback | OAuthRequest.OAuth2Credential.Callback,
): Promise<[ICredentialsDb, ICredentialDataDecryptedObject, T]> {
const { state: encodedState } = req.query;
@@ -230,7 +234,31 @@ export abstract class AbstractOAuthController {
return [credential, decryptedDataOriginal, oauthCredentials];
}
protected renderCallbackError(res: Response, message: string, reason?: string) {
renderCallbackError(res: Response, message: string, reason?: string) {
res.render('oauth-error-callback', { error: { message, reason } });
}
async getOAuthCredentials<T>(credential: CredentialsEntity): Promise<T> {
const additionalData = await this.getAdditionalData();
const decryptedDataOriginal = await this.getDecryptedDataForAuthUri(credential, additionalData);
// At some point in the past we saved hidden scopes to credentials (but shouldn't)
// Delete scope before applying defaults to make sure new scopes are present on reconnect
// Generic Oauth2 API is an exception because it needs to save the scope
if (
decryptedDataOriginal?.scope &&
credential.type.includes('OAuth2') &&
!GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE.includes(credential.type)
) {
delete decryptedDataOriginal.scope;
}
const oauthCredentials = await this.applyDefaultsAndOverwrites<T>(
credential,
decryptedDataOriginal,
additionalData,
);
return oauthCredentials;
}
}

View File

@@ -200,7 +200,7 @@ export class Server extends AbstractServer {
const { frontendService } = this;
if (frontendService) {
await this.externalHooks.run('frontend.settings', [frontendService.getSettings()]);
await this.externalHooks.run('frontend.settings', [await frontendService.getSettings()]);
}
await this.postHogClient.init();
@@ -215,7 +215,7 @@ export class Server extends AbstractServer {
const { apiRouters, apiLatestVersion } = await loadPublicApiVersions(publicApiEndpoint);
this.app.use(...apiRouters);
if (frontendService) {
frontendService.settings.publicApi.latestVersion = apiLatestVersion;
(await frontendService.getSettings()).publicApi.latestVersion = apiLatestVersion;
}
}
@@ -487,7 +487,9 @@ export class Server extends AbstractServer {
`/${this.restEndpoint}/settings`,
authService.createAuthMiddleware({ allowSkipMFA: false, allowUnauthenticated: true }),
ResponseHelper.send(async (req: AuthenticatedRequest) => {
return req.user ? frontendService.getSettings() : frontendService.getPublicSettings();
return req.user
? await frontendService.getSettings()
: await frontendService.getPublicSettings();
}),
);
}

View File

@@ -14,6 +14,7 @@ import type { PushConfig } from '@/push/push.config';
import { FrontendService, type PublicFrontendSettings } from '@/services/frontend.service';
import type { UrlService } from '@/services/url.service';
import type { UserManagementMailer } from '@/user-management/email';
import type { OwnershipService } from '../ownership.service';
// Mock the workflow history helper functions to avoid DI container issues in tests
jest.mock('@/workflows/workflow-history/workflow-history-helper', () => ({
@@ -148,6 +149,10 @@ describe('FrontendService', () => {
isMFAEnforced: jest.fn().mockReturnValue(false),
});
const ownershipService = mock<OwnershipService>({
hasInstanceOwner: jest.fn().mockReturnValue(false),
});
const createMockService = () => {
Container.set(
CommunityPackagesConfig,
@@ -173,6 +178,7 @@ describe('FrontendService', () => {
licenseState,
moduleRegistry,
mfaService,
ownershipService,
),
license,
};
@@ -188,9 +194,9 @@ describe('FrontendService', () => {
});
describe('getSettings', () => {
it('should return frontend settings', () => {
it('should return frontend settings', async () => {
const { service } = createMockService();
const settings = service.getSettings();
const settings = await service.getSettings();
expect(settings).toEqual(
expect.objectContaining({
@@ -201,7 +207,7 @@ describe('FrontendService', () => {
});
describe('getPublicSettings', () => {
it('should return public settings', () => {
it('should return public settings', async () => {
const expectedPublicSettings: PublicFrontendSettings = {
settingsMode: 'public',
userManagement: {
@@ -223,7 +229,7 @@ describe('FrontendService', () => {
};
const { service } = createMockService();
const settings = service.getPublicSettings();
const settings = await service.getPublicSettings();
expect(settings).toEqual(expectedPublicSettings);
});
@@ -282,29 +288,31 @@ describe('FrontendService', () => {
});
describe('settings integration', () => {
it('should include envFeatureFlags in initial settings', () => {
it('should include envFeatureFlags in initial settings', async () => {
process.env = {
N8N_ENV_FEAT_INIT_FLAG: 'true',
N8N_ENV_FEAT_ANOTHER_FLAG: 'false',
};
const { service } = createMockService();
const settings = await service.getSettings();
expect(service.settings.envFeatureFlags).toEqual({
expect(settings.envFeatureFlags).toEqual({
N8N_ENV_FEAT_INIT_FLAG: 'true',
N8N_ENV_FEAT_ANOTHER_FLAG: 'false',
});
});
it('should refresh envFeatureFlags when getSettings is called', () => {
it('should refresh envFeatureFlags when getSettings is called', async () => {
process.env = {
N8N_ENV_FEAT_INITIAL_FLAG: 'true',
};
const { service } = createMockService();
const initialSettings = await service.getSettings();
// Verify initial state
expect(service.settings.envFeatureFlags).toEqual({
expect(initialSettings.envFeatureFlags).toEqual({
N8N_ENV_FEAT_INITIAL_FLAG: 'true',
});
@@ -315,7 +323,7 @@ describe('FrontendService', () => {
};
// getSettings should refresh the flags
const settings = service.getSettings();
const settings = await service.getSettings();
expect(settings.envFeatureFlags).toEqual({
N8N_ENV_FEAT_INITIAL_FLAG: 'false',
@@ -326,33 +334,33 @@ describe('FrontendService', () => {
});
describe('aiBuilder setting', () => {
it('should initialize aiBuilder setting as disabled by default', () => {
it('should initialize aiBuilder setting as disabled by default', async () => {
const { service } = createMockService();
expect(service.settings.aiBuilder).toEqual({
const initialSettings = await service.getSettings();
expect(initialSettings.aiBuilder).toEqual({
enabled: false,
setup: false,
});
});
it('should set aiBuilder.enabled to true when license has feat:aiBuilder', () => {
it('should set aiBuilder.enabled to true when license has feat:aiBuilder', async () => {
const { service, license } = createMockService();
license.isLicensed.mockImplementation((feature) => {
return feature === 'feat:aiBuilder';
});
const settings = service.getSettings();
const settings = await service.getSettings();
expect(settings.aiBuilder.enabled).toBe(true);
});
it('should keep aiBuilder.enabled as false when license does not have feat:aiBuilder', () => {
it('should keep aiBuilder.enabled as false when license does not have feat:aiBuilder', async () => {
const { service, license } = createMockService();
license.isLicensed.mockReturnValue(false);
const settings = service.getSettings();
const settings = await service.getSettings();
expect(settings.aiBuilder.enabled).toBe(false);
});

View File

@@ -0,0 +1,60 @@
import { testDb } from '@n8n/backend-test-utils';
import { GLOBAL_OWNER_ROLE } from '@n8n/db';
import { Container } from '@n8n/di';
import { HooksService } from '@/services/hooks.service';
import { OwnershipService } from '@/services/ownership.service';
import { createUserShell } from '@test-integration/db/users';
let hookService: HooksService;
let ownershipService: OwnershipService;
// See PAY-4247 - This test case can be deleted when the ticket is complete
describe('Ownership Service integration test', () => {
beforeEach(async () => {
await testDb.truncate(['User']);
await createUserShell(GLOBAL_OWNER_ROLE);
jest.clearAllMocks();
});
beforeAll(async () => {
await testDb.init();
hookService = Container.get(HooksService);
ownershipService = Container.get(OwnershipService);
});
afterAll(async () => {
await testDb.terminate();
});
it('should recognise ownership creation from cloud hooks', async () => {
expect(await ownershipService.hasInstanceOwner()).toBeFalsy();
const shellOwnerUser = await hookService.findOneUser({
where: {
role: {
slug: GLOBAL_OWNER_ROLE.slug,
},
},
});
// @ts-expect-error - this is how this function is called in the cloud hook so I match it here
await hookService.saveUser({
firstName: 'FN',
lastName: 'LN',
email: 'fn@ln.com',
password: '<hashed_password>',
id: shellOwnerUser!.id,
});
expect(await ownershipService.hasInstanceOwner()).toBeTruthy();
});
it('should recognise ownership creation from api', async () => {
expect(await ownershipService.hasInstanceOwner()).toBeFalsy();
await ownershipService.setupOwner({
firstName: 'TEST',
lastName: 'LN',
password: 'PW',
email: 'EM@em.com',
});
expect(await ownershipService.hasInstanceOwner()).toBeTruthy();
});
});

View File

@@ -1,5 +1,5 @@
import { Logger } from '@n8n/backend-common';
import { mockInstance } from '@n8n/backend-test-utils';
import type { SharedCredentials } from '@n8n/db';
import {
Project,
SharedWorkflow,
@@ -12,10 +12,15 @@ import {
GLOBAL_OWNER_ROLE,
PROJECT_OWNER_ROLE,
} from '@n8n/db';
import type { SharedCredentials, SettingsRepository } from '@n8n/db';
import { PROJECT_OWNER_ROLE_SLUG } from '@n8n/permissions';
import { mock } from 'jest-mock-extended';
import { v4 as uuid } from 'uuid';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import type { EventService } from '@/events/event.service';
import { OwnershipService } from '@/services/ownership.service';
import { PasswordUtility } from '@/services/password.utility';
import { mockCredential, mockProject } from '@test/mock-objects';
import { CacheService } from '../cache/cache.service';
@@ -25,11 +30,20 @@ describe('OwnershipService', () => {
const sharedWorkflowRepository = mockInstance(SharedWorkflowRepository);
const projectRelationRepository = mockInstance(ProjectRelationRepository);
const cacheService = mockInstance(CacheService);
const passwordUtility = mockInstance(PasswordUtility);
const logger = mockInstance(Logger);
const eventService = mock<EventService>();
const settingsRepository = mock<SettingsRepository>();
const ownershipService = new OwnershipService(
cacheService,
userRepository,
eventService,
logger,
passwordUtility,
projectRelationRepository,
sharedWorkflowRepository,
userRepository,
settingsRepository,
);
beforeEach(() => {
@@ -67,7 +81,8 @@ describe('OwnershipService', () => {
owner.role = GLOBAL_OWNER_ROLE;
const projectRelation = new ProjectRelation();
projectRelation.role = PROJECT_OWNER_ROLE;
(projectRelation.project = project), (projectRelation.user = owner);
projectRelation.project = project;
projectRelation.user = owner;
projectRelationRepository.getPersonalProjectOwners.mockResolvedValueOnce([projectRelation]);
@@ -94,8 +109,9 @@ describe('OwnershipService', () => {
owner.id = uuid();
owner.role = GLOBAL_OWNER_ROLE;
const projectRelation = new ProjectRelation();
projectRelation.role = { slug: PROJECT_OWNER_ROLE_SLUG } as any;
(projectRelation.project = project), (projectRelation.user = owner);
projectRelation.role = PROJECT_OWNER_ROLE;
projectRelation.project = project;
projectRelation.user = owner;
cacheService.getHashValue.mockResolvedValueOnce(owner);
userRepository.create.mockReturnValueOnce(owner);
@@ -226,4 +242,50 @@ describe('OwnershipService', () => {
});
});
});
describe('setupOwner()', () => {
it('should throw a BadRequestError if the instance owner is already setup', async () => {
jest.spyOn(userRepository, 'exists').mockResolvedValueOnce(true);
await expect(ownershipService.setupOwner(mock())).rejects.toThrowError(
new BadRequestError('Instance owner already setup'),
);
expect(userRepository.save).not.toHaveBeenCalled();
expect(eventService.emit).not.toHaveBeenCalled();
expect(logger.debug).toHaveBeenCalledWith(
'Request to claim instance ownership failed because instance owner already exists',
);
});
it('should setup the instance owner successfully', async () => {
const user = mock<User>({
id: 'userId',
role: GLOBAL_OWNER_ROLE,
authIdentities: [],
});
const payload = {
email: 'valid@email.com',
password: 'NewPassword123',
firstName: 'Jane',
lastName: 'Doe',
};
// not quite perfect as we hash the password.
const expected = { ...user, ...payload, id: 'newUserId' };
userRepository.exists.mockResolvedValueOnce(false);
userRepository.findOneOrFail.mockResolvedValueOnce(user);
userRepository.save.mockResolvedValueOnce(expected);
const actual = await ownershipService.setupOwner(payload);
expect(userRepository.save).toHaveBeenCalledWith(user, { transaction: false });
expect(eventService.emit).toHaveBeenCalledWith('instance-owner-setup', {
userId: 'newUserId',
});
expect(actual.id).toEqual('newUserId');
});
});
});

View File

@@ -20,6 +20,7 @@ import { getLdapLoginLabel } from '@/ldap.ee/helpers.ee';
import { License } from '@/license';
import { LoadNodesAndCredentials } from '@/load-nodes-and-credentials';
import { MfaService } from '@/mfa/mfa.service';
import { OwnershipService } from '@/services/ownership.service';
import { CommunityPackagesConfig } from '@/modules/community-packages/community-packages.config';
import type { CommunityPackagesService } from '@/modules/community-packages/community-packages.service';
import { isApiEnabled } from '@/public-api';
@@ -93,7 +94,7 @@ export type PublicFrontendSettings = {
@Service()
export class FrontendService {
settings: FrontendSettings;
private settings: FrontendSettings;
private communityPackagesService?: CommunityPackagesService;
@@ -113,12 +114,10 @@ export class FrontendService {
private readonly licenseState: LicenseState,
private readonly moduleRegistry: ModuleRegistry,
private readonly mfaService: MfaService,
private readonly ownershipService: OwnershipService,
) {
loadNodesAndCredentials.addPostProcessor(async () => await this.generateTypes());
void this.generateTypes();
this.initSettings();
// @TODO: Move to community-packages module
if (Container.get(CommunityPackagesConfig).enabled) {
void import('@/modules/community-packages/community-packages.service').then(
@@ -141,7 +140,7 @@ export class FrontendService {
return envFeatureFlags;
}
private initSettings() {
private async initSettings() {
const instanceBaseUrl = this.urlService.getInstanceBaseUrl();
const restEndpoint = this.globalConfig.endpoints.rest;
@@ -230,7 +229,7 @@ export class FrontendService {
defaultLocale: this.globalConfig.defaultLocale,
userManagement: {
quota: this.license.getUsersLimit(),
showSetupOnFirstLoad: !config.getEnv('userManagement.isInstanceOwnerSetUp'),
showSetupOnFirstLoad: !(await this.ownershipService.hasInstanceOwner()),
smtpSetup: this.mailer.isEmailSetUp,
authenticationMethod: getCurrentAuthenticationMethod(),
},
@@ -374,7 +373,10 @@ export class FrontendService {
this.writeStaticJSON('credentials', credentials);
}
getSettings(): FrontendSettings {
async getSettings(): Promise<FrontendSettings> {
if (!this.settings) {
await this.initSettings();
}
const restEndpoint = this.globalConfig.endpoints.rest;
// Update all urls, in case `WEBHOOK_URL` was updated by `--tunnel`
@@ -390,7 +392,7 @@ export class FrontendService {
Object.assign(this.settings.userManagement, {
quota: this.license.getUsersLimit(),
authenticationMethod: getCurrentAuthenticationMethod(),
showSetupOnFirstLoad: !config.getEnv('userManagement.isInstanceOwnerSetUp'),
showSetupOnFirstLoad: !(await this.ownershipService.hasInstanceOwner()),
});
let dismissedBanners: string[] = [];
@@ -517,7 +519,7 @@ export class FrontendService {
* Only add settings that are absolutely necessary for non-authenticated pages
* @returns Public settings for unauthenticated users
*/
getPublicSettings(): PublicFrontendSettings {
async getPublicSettings(): Promise<PublicFrontendSettings> {
// Get full settings to ensure all required properties are initialized
const {
userManagement: { authenticationMethod, showSetupOnFirstLoad, smtpSetup },
@@ -525,7 +527,7 @@ export class FrontendService {
authCookie,
previewMode,
enterprise: { saml, ldap, oidc },
} = this.getSettings();
} = await this.getSettings();
const publicSettings: PublicFrontendSettings = {
settingsMode: 'public',

View File

@@ -7,19 +7,31 @@ import {
SharedWorkflowRepository,
UserRepository,
Role,
SettingsRepository,
Scope,
} from '@n8n/db';
import { Service } from '@n8n/di';
import { Logger } from '@n8n/backend-common';
import { CacheService } from '@/services/cache/cache.service';
import { OwnerSetupRequestDto } from '@n8n/api-types';
import { BadRequestError } from '@/errors/response-errors/bad-request.error';
import { EventService } from '@/events/event.service';
import { PasswordUtility } from './password.utility';
import { IsNull } from '@n8n/typeorm/find-options/operator/IsNull';
import { Not } from '@n8n/typeorm/find-options/operator/Not';
import config from '@/config';
@Service()
export class OwnershipService {
constructor(
private cacheService: CacheService,
private userRepository: UserRepository,
private eventService: EventService,
private logger: Logger,
private passwordUtility: PasswordUtility,
private projectRelationRepository: ProjectRelationRepository,
private sharedWorkflowRepository: SharedWorkflowRepository,
private userRepository: UserRepository,
private settingsRepository: SettingsRepository,
) {}
// To make use of the cache service we should store POJOs, these
@@ -179,4 +191,64 @@ export class OwnershipService {
where: { role: { slug: GLOBAL_OWNER_ROLE.slug } },
});
}
async hasInstanceOwner() {
return await this.userRepository.exists({
where: [
{
role: { slug: GLOBAL_OWNER_ROLE.slug },
// We use this to avoid selecting the "shell" user
lastActiveAt: Not(IsNull()),
},
// OR
// This condition only exists because of PAY-4247
{
role: { slug: GLOBAL_OWNER_ROLE.slug },
// We use this to avoid selecting the "shell" user
password: Not(IsNull()),
},
],
relations: ['role'],
});
}
async setupOwner(payload: OwnerSetupRequestDto) {
const { email, firstName, lastName, password } = payload;
if (await this.hasInstanceOwner()) {
this.logger.debug(
'Request to claim instance ownership failed because instance owner already exists',
);
throw new BadRequestError('Instance owner already setup');
}
let shellUser = await this.userRepository.findOneOrFail({
where: { role: { slug: GLOBAL_OWNER_ROLE.slug } },
relations: ['role'],
});
shellUser.email = email;
shellUser.firstName = firstName;
shellUser.lastName = lastName;
shellUser.lastActiveAt = new Date();
shellUser.password = await this.passwordUtility.hash(password);
shellUser = await this.userRepository.save(shellUser, { transaction: false });
this.logger.info('Owner was set up successfully');
this.eventService.emit('instance-owner-setup', { userId: shellUser.id });
// The next block needs to be deleted and is temporary for now
// See packages/cli/src/config/schema.ts for more info
// We update the SettingsRepository so when we "startup" next time
// the config state is restored.
// #region Delete me
await this.settingsRepository.update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: JSON.stringify(true) },
);
config.set('userManagement.isInstanceOwnerSetUp', true);
// #endregion
return shellUser;
}
}

View File

@@ -326,7 +326,6 @@ describe('Member', () => {
password: memberPassword,
role: GLOBAL_MEMBER_ROLE,
});
await utils.setInstanceOwnerSetUp(true);
});
test('POST /api-keys should create an api key with no expiration', async () => {

View File

@@ -31,7 +31,6 @@ beforeAll(async () => {
beforeEach(async () => {
await testDb.truncate(['User']);
config.set('ldap.disabled', true);
await utils.setInstanceOwnerSetUp(true);
});
describe('POST /login', () => {

View File

@@ -7,7 +7,6 @@ import {
} from '@n8n/backend-test-utils';
import {
CredentialsEntity,
SettingsRepository,
CredentialsRepository,
SharedCredentialsRepository,
SharedWorkflowRepository,
@@ -54,12 +53,6 @@ test('user-management:reset should reset DB to default user state', async () =>
await encryptCredentialData(Object.assign(new CredentialsEntity(), randomCredentialPayload())),
);
// mark instance as set up
await Container.get(SettingsRepository).update(
{ key: 'userManagement.isInstanceOwnerSetUp' },
{ value: 'true' },
);
//
// ACT
//
@@ -100,9 +93,4 @@ test('user-management:reset should reset DB to default user state', async () =>
await expect(
Container.get(SharedCredentialsRepository).findBy({ credentialsId: danglingCredential.id }),
).resolves.toMatchObject([{ projectId: ownerProject.id, role: 'credential:owner' }]);
// the instance is marked as not set up:
await expect(
Container.get(SettingsRepository).findBy({ key: 'userManagement.isInstanceOwnerSetUp' }),
).resolves.toMatchObject([{ value: 'false' }]);
});

View File

@@ -5,8 +5,8 @@ import { response as Response } from 'express';
import nock from 'nock';
import { parse as parseQs } from 'querystring';
import { OAuth2CredentialController } from '@/controllers/oauth/oauth2-credential.controller';
import { CredentialsHelper } from '@/credentials-helper';
import { OauthService } from '@/oauth/oauth.service';
import { saveCredential } from '@test-integration/db/credentials';
import { createMember, createOwner } from '@test-integration/db/users';
import type { SuperAgentTest } from '@test-integration/types';
@@ -52,8 +52,8 @@ describe('OAuth2 API', () => {
});
it('should return a valid auth URL when the auth flow is initiated', async () => {
const controller = Container.get(OAuth2CredentialController);
const csrfSpy = jest.spyOn(controller, 'createCsrfState').mockClear();
const oauthService = Container.get(OauthService);
const csrfSpy = jest.spyOn(oauthService, 'createCsrfState').mockClear();
const response = await ownerAgent
.get('/oauth2-credential/auth')
@@ -76,8 +76,8 @@ describe('OAuth2 API', () => {
});
it('should fail on auth when callback is called as another user', async () => {
const controller = Container.get(OAuth2CredentialController);
const csrfSpy = jest.spyOn(controller, 'createCsrfState').mockClear();
const oauthService = Container.get(OauthService);
const csrfSpy = jest.spyOn(oauthService, 'createCsrfState').mockClear();
const renderSpy = (Response.render = jest.fn(function () {
this.end();
}));
@@ -98,8 +98,8 @@ describe('OAuth2 API', () => {
});
it('should handle a valid callback without auth', async () => {
const controller = Container.get(OAuth2CredentialController);
const csrfSpy = jest.spyOn(controller, 'createCsrfState').mockClear();
const oauthService = Container.get(OauthService);
const csrfSpy = jest.spyOn(oauthService, 'createCsrfState').mockClear();
const renderSpy = (Response.render = jest.fn(function () {
this.end();
}));

View File

@@ -6,7 +6,6 @@ import { mock } from 'jest-mock-extended';
import { Cipher } from 'n8n-core';
import type { IDataObject } from 'n8n-workflow';
import config from '@/config';
import { CREDENTIAL_BLANKING_VALUE } from '@/constants';
import type { EventService } from '@/events/event.service';
import { License } from '@/license';
@@ -112,7 +111,6 @@ beforeAll(async () => {
authOwnerAgent = testServer.authAgentFor(owner);
const member = await createUser();
authMemberAgent = testServer.authAgentFor(member);
config.set('userManagement.isInstanceOwnerSetUp', true);
Container.set(
ExternalSecretsManager,
new ExternalSecretsManager(

View File

@@ -64,8 +64,6 @@ beforeEach(async () => {
jest.mock('@/telemetry');
config.set('userManagement.isInstanceOwnerSetUp', true);
await setCurrentAuthenticationMethod('email');
});

View File

@@ -144,7 +144,6 @@ describe('Member', () => {
role: { slug: 'global:member' },
});
authMemberAgent = testServer.authAgentFor(member);
await utils.setInstanceOwnerSetUp(true);
});
test('PATCH /me should succeed with valid inputs', async () => {
@@ -286,7 +285,6 @@ describe('Chat User', () => {
role: { slug: 'global:chatUser' },
});
authMemberAgent = testServer.authAgentFor(member);
await utils.setInstanceOwnerSetUp(true);
});
test('PATCH /me should succeed with valid inputs', async () => {

View File

@@ -7,11 +7,10 @@ import {
} from '@n8n/backend-test-utils';
import type { User } from '@n8n/db';
import { GLOBAL_OWNER_ROLE, UserRepository } from '@n8n/db';
import { OwnershipService } from '@/services/ownership.service';
import { Container } from '@n8n/di';
import validator from 'validator';
import config from '@/config';
import { createUserShell } from './shared/db/users';
import * as utils from './shared/utils/';
@@ -21,7 +20,6 @@ let ownerShell: User;
beforeEach(async () => {
ownerShell = await createUserShell(GLOBAL_OWNER_ROLE);
config.set('userManagement.isInstanceOwnerSetUp', false);
});
afterEach(async () => {
@@ -71,10 +69,7 @@ describe('POST /owner/setup', () => {
expect(storedOwner.firstName).toBe(newOwnerData.firstName);
expect(storedOwner.lastName).toBe(newOwnerData.lastName);
const isInstanceOwnerSetUpConfig = config.getEnv('userManagement.isInstanceOwnerSetUp');
expect(isInstanceOwnerSetUpConfig).toBe(true);
const isInstanceOwnerSetUpSetting = await utils.isInstanceOwnerSetUp();
const isInstanceOwnerSetUpSetting = await Container.get(OwnershipService).hasInstanceOwner();
expect(isInstanceOwnerSetUpSetting).toBe(true);
});

View File

@@ -43,13 +43,14 @@ async function handlePasswordSetup(password: string | null | undefined): Promise
/** Store a new user object, defaulting to a `member` */
export async function newUser(attributes: DeepPartial<User> = {}): Promise<User> {
const { email, password, firstName, lastName, role, ...rest } = attributes;
const { email, password, firstName, lastName, role, lastActiveAt, ...rest } = attributes;
return Container.get(UserRepository).create({
email: email ?? randomEmail(),
password: await handlePasswordSetup(password),
firstName: firstName ?? randomName(),
lastName: lastName ?? randomName(),
role: role ?? GLOBAL_MEMBER_ROLE,
lastActiveAt: lastActiveAt ?? new Date(),
...rest,
});
}

Some files were not shown because too many files have changed in this diff Show More