chore(ai-builder): Add template usage to multi-agent architecture (#22600)

This commit is contained in:
Michael Drury
2025-12-03 10:25:26 +00:00
committed by GitHub
parent 763b858548
commit 0c04e9294a
9 changed files with 261 additions and 42 deletions

View File

@@ -20,6 +20,7 @@ import type { SubgraphPhase } from './types/coordination';
import { createErrorMetadata } from './types/coordination';
import { getNextPhaseFromLog } from './utils/coordination-log';
import { processOperations } from './utils/operations-processor';
import type { BuilderFeatureFlags } from './workflow-builder-agent';
/**
* Maps routing decisions to graph node names.
@@ -42,6 +43,7 @@ export interface MultiAgentSubgraphConfig {
logger?: Logger;
instanceUrl?: string;
checkpointer?: MemorySaver;
featureFlags?: BuilderFeatureFlags;
}
/**
@@ -105,7 +107,8 @@ function createSubgraphNodeHandler<
* Parent graph orchestrates between subgraphs with minimal shared state.
*/
export function createMultiAgentWorkflowWithSubgraphs(config: MultiAgentSubgraphConfig) {
const { parsedNodeTypes, llmComplexTask, logger, instanceUrl, checkpointer } = config;
const { parsedNodeTypes, llmComplexTask, logger, instanceUrl, checkpointer, featureFlags } =
config;
const supervisorAgent = new SupervisorAgent({ llm: llmComplexTask });
const responderAgent = new ResponderAgent({ llm: llmComplexTask });
@@ -120,6 +123,7 @@ export function createMultiAgentWorkflowWithSubgraphs(config: MultiAgentSubgraph
parsedNodeTypes,
llm: llmComplexTask,
logger,
featureFlags,
});
const compiledBuilder = builderSubgraph.create({ parsedNodeTypes, llm: llmComplexTask, logger });
const compiledConfigurator = configuratorSubgraph.create({

View File

@@ -3,7 +3,9 @@ import { Annotation } from '@langchain/langgraph';
import type { CoordinationLogEntry } from './types/coordination';
import type { DiscoveryContext } from './types/discovery-types';
import type { NodeConfigurationsMap } from './types/tools';
import type { SimpleWorkflow, WorkflowOperation } from './types/workflow';
import { appendArrayReducer, nodeConfigurationsReducer } from './utils/state-reducers';
import type { ChatPayload } from './workflow-builder-agent';
/**
@@ -53,4 +55,17 @@ export const ParentGraphState = Annotation.Root({
reducer: (x, y) => x.concat(y),
default: () => [],
}),
// Template IDs fetched from workflow examples for telemetry
templateIds: Annotation<number[]>({
reducer: appendArrayReducer,
default: () => [],
}),
// Node configurations collected from workflow examples
// Used to provide example parameter configurations when calling tools
nodeConfigurations: Annotation<NodeConfigurationsMap>({
reducer: nodeConfigurationsReducer,
default: () => ({}),
}),
});

View File

@@ -15,16 +15,20 @@ import {
WorkflowTechnique,
type WorkflowTechniqueType,
} from '@/types/categorization';
import type { BuilderFeatureFlags } from '@/workflow-builder-agent';
import { BaseSubgraph } from './subgraph-interface';
import type { ParentGraphState } from '../parent-graph-state';
import { createGetBestPracticesTool } from '../tools/get-best-practices.tool';
import { createGetWorkflowExamplesTool } from '../tools/get-workflow-examples.tool';
import { createNodeDetailsTool } from '../tools/node-details.tool';
import { createNodeSearchTool } from '../tools/node-search.tool';
import type { CoordinationLogEntry } from '../types/coordination';
import { createDiscoveryMetadata } from '../types/coordination';
import type { NodeConfigurationsMap } from '../types/tools';
import { applySubgraphCacheMarkers } from '../utils/cache-control';
import { buildWorkflowSummary, createContextMessage } from '../utils/context-builders';
import { appendArrayReducer, nodeConfigurationsReducer } from '../utils/state-reducers';
import { executeSubgraphTools, extractUserRequest } from '../utils/subgraph-helpers';
/**
@@ -189,29 +193,77 @@ const discoveryOutputSchema = z.object({
.describe('List of n8n nodes identified as necessary for the workflow'),
});
interface DiscoveryPromptOptions {
includeExamples: boolean;
}
/**
* Generate the process steps with proper numbering
*/
function generateProcessSteps(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const steps: string[] = [
'**Analyze user prompt** - Extract services, models, and technologies mentioned',
'**Call get_best_practices** with identified techniques (internal context)',
];
if (includeExamples) {
steps.push('**Call get_workflow_examples** with search queries for mentioned services/models');
}
const examplesContext = includeExamples ? ', and examples' : '';
steps.push(
`**Identify workflow components** from user request, best practices${examplesContext}`,
'**Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")',
'**Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)',
`**Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section`,
'**Call submit_discovery_results** with complete nodesFound array',
);
return steps.map((step, index) => `${index + 1}. ${step}`).join('\n');
}
/**
* Generate available tools list based on feature flags
*/
function generateAvailableToolsList(options: DiscoveryPromptOptions): string {
const { includeExamples } = options;
const tools = [
'- get_best_practices: Retrieve best practices (internal context)',
'- search_nodes: Find n8n nodes by keyword',
'- get_node_details: Get complete node information including <connections>',
];
if (includeExamples) {
tools.push('- get_workflow_examples: Search for workflow examples as reference');
}
tools.push('- submit_discovery_results: Submit final results');
return tools.join('\n');
}
/**
* Discovery Agent Prompt
*/
const DISCOVERY_PROMPT = `You are a Discovery Agent for n8n AI Workflow Builder.
function generateDiscoveryPrompt(options: DiscoveryPromptOptions): string {
const availableTools = generateAvailableToolsList(options);
const processSteps = generateProcessSteps(options);
return `You are a Discovery Agent for n8n AI Workflow Builder.
YOUR ROLE: Identify relevant n8n nodes and their connection-changing parameters.
AVAILABLE TOOLS:
- get_best_practices: Retrieve best practices (internal context)
- search_nodes: Find n8n nodes by keyword
- get_node_details: Get complete node information including <connections>
- submit_discovery_results: Submit final results
${availableTools}
PROCESS:
1. **Call get_best_practices** with identified techniques (internal context)
2. **Identify workflow components** from user request and best practices
3. **Call search_nodes IN PARALLEL** for all components (e.g., "Gmail", "OpenAI", "Schedule")
4. **Call get_node_details IN PARALLEL** for ALL promising nodes (batch multiple calls)
5. **Extract node information** from each node_details response:
- Node name from <name> tag
- Version number from <version> tag (required - extract the number)
- Connection-changing parameters from <connections> section
6. **Call submit_discovery_results** with complete nodesFound array
${processSteps}
TECHNIQUE CATEGORIZATION:
When calling get_best_practices, select techniques that match the user's workflow intent.
@@ -297,6 +349,7 @@ DO NOT:
- Flag parameters that don't affect connections
- Stop without calling submit_discovery_results
`;
}
/**
* Discovery Subgraph State
@@ -334,12 +387,26 @@ export const DiscoverySubgraphState = Annotation.Root({
bestPractices: Annotation<string | undefined>({
reducer: (x, y) => y ?? x,
}),
// Output: Template IDs fetched from workflow examples for telemetry
templateIds: Annotation<number[]>({
reducer: appendArrayReducer,
default: () => [],
}),
// Output: Node configurations collected from workflow examples
// Used to provide example parameter configurations when get_node_details is called
nodeConfigurations: Annotation<NodeConfigurationsMap>({
reducer: nodeConfigurationsReducer,
default: () => ({}),
}),
});
export interface DiscoverySubgraphConfig {
parsedNodeTypes: INodeTypeDescription[];
llm: BaseChatModel;
logger?: Logger;
featureFlags?: BuilderFeatureFlags;
}
export class DiscoverySubgraph extends BaseSubgraph<
@@ -357,12 +424,21 @@ export class DiscoverySubgraph extends BaseSubgraph<
create(config: DiscoverySubgraphConfig) {
this.logger = config.logger;
// Create tools
const tools = [
// Check if template examples are enabled
const includeExamples = config.featureFlags?.templateExamples === true;
// Create base tools
const baseTools = [
createGetBestPracticesTool(),
createNodeSearchTool(config.parsedNodeTypes),
createNodeDetailsTool(config.parsedNodeTypes),
];
// Conditionally add workflow examples tool if feature flag is enabled
const tools = includeExamples
? [...baseTools, createGetWorkflowExamplesTool(config.logger)]
: baseTools;
this.toolMap = new Map(tools.map((bt) => [bt.tool.name, bt.tool]));
// Define output tool
@@ -372,6 +448,9 @@ export class DiscoverySubgraph extends BaseSubgraph<
schema: discoveryOutputSchema,
});
// Generate prompt based on feature flags
const discoveryPrompt = generateDiscoveryPrompt({ includeExamples });
// Create agent with tools bound (including submit tool)
const systemPrompt = ChatPromptTemplate.fromMessages([
[
@@ -379,7 +458,7 @@ export class DiscoverySubgraph extends BaseSubgraph<
[
{
type: 'text',
text: DISCOVERY_PROMPT,
text: discoveryPrompt,
cache_control: { type: 'ephemeral' },
},
],
@@ -458,16 +537,20 @@ export class DiscoverySubgraph extends BaseSubgraph<
this.logger?.error('[Discovery] No submit tool call found in last message');
return {
nodesFound: [],
templateIds: [],
};
}
const bestPracticesTool = state.messages.find(
(m): m is ToolMessage => m.getType() === 'tool' && m?.text?.startsWith('<best_practices>'),
);
// Return raw output without hydration
// Return raw output without hydration, including templateIds and nodeConfigurations from workflow examples
return {
nodesFound: output.nodesFound,
bestPractices: bestPracticesTool?.text,
templateIds: state.templateIds ?? [],
nodeConfigurations: state.nodeConfigurations ?? {},
};
}
@@ -533,9 +616,12 @@ export class DiscoverySubgraph extends BaseSubgraph<
_parentState: typeof ParentGraphState.State,
) {
const nodesFound = subgraphOutput.nodesFound || [];
const templateIds = subgraphOutput.templateIds || [];
const nodeConfigurations = subgraphOutput.nodeConfigurations || {};
const discoveryContext = {
nodesFound,
bestPractices: subgraphOutput.bestPractices,
nodeConfigurations,
};
// Create coordination log entry (not a message)
@@ -554,6 +640,10 @@ export class DiscoverySubgraph extends BaseSubgraph<
return {
discoveryContext,
coordinationLog: [logEntry],
// Pass template IDs for telemetry
templateIds,
// Pass node configurations for example parameters in node details
nodeConfigurations,
};
}
}

View File

@@ -11,6 +11,42 @@ export class ContentGenerationBestPractices implements BestPracticesDocument {
Break complex tasks into sequential steps (e.g., generate text, create image, compose video) for modularity and easier troubleshooting.
## Node Selection Guidelines
Always prefer built-in n8n nodes over HTTP Request nodes when a dedicated node exists for the service or API you need to integrate with. Built-in nodes provide:
- Pre-configured authentication handling
- Optimized data structures and field mappings
- Better error handling and user experience
- Simplified setup without manual API configuration
Only use HTTP Request nodes when no built-in node exists for the service, or when you need to access an API endpoint not covered by the built-in node's operations.
## Multi-Modal Content Generation - MANDATORY
When the user's request involves specific generative AI models or media-focused platforms, the workflow MUST include the appropriate media generation node from a
provider-specific node. The finished workflow MUST contain the relevant video, audio, or image generation capability.
Prompts that require multi-modal generation nodes:
Video Generation:
- Model mentions: Sora, Nano Banana, Veo, Runway, Pika
- Platform mentions: YouTube content, TikTok videos, Instagram Reels, video ads, short-form video
- Task mentions: generate video, create video, video from text, animate
Image Generation:
- Model mentions: DALL-E, Midjourney, Stable Diffusion, Imagen
- Platform mentions: thumbnails, social media graphics, product images, marketing visuals
- Task mentions: generate image, create artwork, design graphic, visualize
Audio Generation:
- Model mentions: ElevenLabs, text-to-speech, TTS
- Platform mentions: podcast audio, voiceovers, narration, audio content
- Task mentions: generate voice, create audio, synthesize speech, clone voice
If anything like the examples above are mentioned in the prompt, include the appropriate
provider node (OpenAI for DALL-E/Sora, Google Gemini for Nano Banana/Imagen, etc.)
with the media generation operation configured.
## Content-Specific Guidance
For text generation, validate and sanitize input/output to avoid malformed data. When generating images, prefer binary data over URLs for uploads to avoid media type errors.
@@ -19,7 +55,7 @@ For text generation, validate and sanitize input/output to avoid malformed data.
### OpenAI (@n8n/n8n-nodes-langchain.openAi)
Purpose: GPT-based text generation, DALL-E image generation, text-to-speech (TTS), and audio transcription
Purpose: GPT-based text generation, DALL-E image generation, text-to-speech (TTS), and audio transcription, SORA for video generation
### xAI Grok Chat Model (@n8n/n8n-nodes-langchain.lmChatXAiGrok)
@@ -27,7 +63,7 @@ Purpose: Conversational AI and text generation
### Google Gemini Chat Model (@n8n/n8n-nodes-langchain.lmChatGoogleGemini)
Purpose: Image analysis and generation, video generation from text prompts, multimodal content creation
Purpose: Image analysis and generation, video generation from text prompts using nano banana, multimodal content creation
### ElevenLabs

View File

@@ -1,3 +1,5 @@
import type { NodeConfigurationsMap } from './tools';
export interface DiscoveryContext {
nodesFound: Array<{
nodeName: string;
@@ -9,4 +11,5 @@ export interface DiscoveryContext {
}>;
}>;
bestPractices?: string;
nodeConfigurations?: NodeConfigurationsMap;
}

View File

@@ -0,0 +1,41 @@
import type { NodeConfigurationsMap } from '../types/tools';
/**
* Reducer for appending arrays with null/empty check.
* Only appends if the update is a non-empty array.
*/
export function appendArrayReducer<T>(current: T[], update: T[] | undefined | null): T[] {
return update && update.length > 0 ? [...current, ...update] : current;
}
/**
* Merge node configurations by type, appending new configs to existing ones.
* Used as a standalone utility function for merging node configurations outside of reducers.
*/
export function mergeNodeConfigurations(
target: NodeConfigurationsMap,
source: NodeConfigurationsMap,
): void {
for (const [nodeType, configs] of Object.entries(source)) {
if (!target[nodeType]) {
target[nodeType] = [];
}
target[nodeType].push(...configs);
}
}
/**
* Reducer for merging node configurations by type.
* Appends new configurations to existing ones for each node type.
*/
export function nodeConfigurationsReducer(
current: NodeConfigurationsMap,
update: NodeConfigurationsMap | undefined | null,
): NodeConfigurationsMap {
if (!update || Object.keys(update).length === 0) {
return current;
}
const merged = { ...current };
mergeNodeConfigurations(merged, update);
return merged;
}

View File

@@ -3,12 +3,16 @@ import { isAIMessage, ToolMessage, HumanMessage } from '@langchain/core/messages
import type { StructuredTool } from '@langchain/core/tools';
import { isCommand, END } from '@langchain/langgraph';
import { mergeNodeConfigurations } from './state-reducers';
import { isBaseMessage } from '../types/langchain';
import type { NodeConfigurationsMap } from '../types/tools';
import type { WorkflowOperation } from '../types/workflow';
interface CommandUpdate {
messages?: BaseMessage[];
workflowOperations?: WorkflowOperation[];
templateIds?: number[];
nodeConfigurations?: NodeConfigurationsMap;
}
/**
@@ -31,6 +35,18 @@ function isCommandUpdate(value: unknown): value is CommandUpdate {
) {
return false;
}
// templateIds is optional, but if present must be an array
if ('templateIds' in obj && obj.templateIds !== undefined && !Array.isArray(obj.templateIds)) {
return false;
}
// nodeConfigurations is optional, but if present must be an object
if (
'nodeConfigurations' in obj &&
obj.nodeConfigurations !== undefined &&
(typeof obj.nodeConfigurations !== 'object' || obj.nodeConfigurations === null)
) {
return false;
}
return true;
}
@@ -47,7 +63,12 @@ function isCommandUpdate(value: unknown): value is CommandUpdate {
export async function executeSubgraphTools(
state: { messages: BaseMessage[] },
toolMap: Map<string, StructuredTool>,
): Promise<{ messages?: BaseMessage[]; workflowOperations?: WorkflowOperation[] | null }> {
): Promise<{
messages?: BaseMessage[];
workflowOperations?: WorkflowOperation[] | null;
templateIds?: number[];
nodeConfigurations?: NodeConfigurationsMap;
}> {
const lastMessage = state.messages[state.messages.length - 1];
if (!lastMessage || !isAIMessage(lastMessage) || !lastMessage.tool_calls?.length) {
@@ -85,9 +106,11 @@ export async function executeSubgraphTools(
}),
);
// Unwrap Command objects and collect messages/operations
// Unwrap Command objects and collect messages/operations/templateIds/nodeConfigurations
const messages: BaseMessage[] = [];
const operations: WorkflowOperation[] = [];
const templateIds: number[] = [];
const nodeConfigurations: NodeConfigurationsMap = {};
for (const result of toolResults) {
if (isCommand(result)) {
@@ -99,6 +122,12 @@ export async function executeSubgraphTools(
if (result.update.workflowOperations) {
operations.push(...result.update.workflowOperations);
}
if (result.update.templateIds) {
templateIds.push(...result.update.templateIds);
}
if (result.update.nodeConfigurations) {
mergeNodeConfigurations(nodeConfigurations, result.update.nodeConfigurations);
}
}
} else if (isBaseMessage(result)) {
// Direct message (ToolMessage, AIMessage, etc.)
@@ -106,8 +135,12 @@ export async function executeSubgraphTools(
}
}
const stateUpdate: { messages?: BaseMessage[]; workflowOperations?: WorkflowOperation[] | null } =
{};
const stateUpdate: {
messages?: BaseMessage[];
workflowOperations?: WorkflowOperation[] | null;
templateIds?: number[];
nodeConfigurations?: NodeConfigurationsMap;
} = {};
if (messages.length > 0) {
stateUpdate.messages = messages;
@@ -117,6 +150,14 @@ export async function executeSubgraphTools(
stateUpdate.workflowOperations = operations;
}
if (templateIds.length > 0) {
stateUpdate.templateIds = templateIds;
}
if (Object.keys(nodeConfigurations).length > 0) {
stateUpdate.nodeConfigurations = nodeConfigurations;
}
return stateUpdate;
}

View File

@@ -210,7 +210,7 @@ export class WorkflowBuilderAgent {
* Create the multi-agent workflow graph
* Uses supervisor pattern with specialized agents
*/
private createMultiAgentGraph() {
private createMultiAgentGraph(featureFlags?: BuilderFeatureFlags) {
return createMultiAgentWorkflowWithSubgraphs({
parsedNodeTypes: this.parsedNodeTypes,
llmSimpleTask: this.llmSimpleTask,
@@ -218,6 +218,7 @@ export class WorkflowBuilderAgent {
logger: this.logger,
instanceUrl: this.instanceUrl,
checkpointer: this.checkpointer,
featureFlags,
});
}
@@ -443,7 +444,7 @@ export class WorkflowBuilderAgent {
private createWorkflow(featureFlags?: BuilderFeatureFlags) {
if (this.enableMultiAgent) {
this.logger?.debug('Using multi-agent supervisor architecture');
return this.createMultiAgentGraph();
return this.createMultiAgentGraph(featureFlags);
}
this.logger?.debug('Using legacy single-agent architecture');

View File

@@ -3,6 +3,7 @@ import { HumanMessage } from '@langchain/core/messages';
import { Annotation, messagesStateReducer } from '@langchain/langgraph';
import type { NodeConfigurationsMap, SimpleWorkflow, WorkflowOperation } from './types';
import { appendArrayReducer, nodeConfigurationsReducer } from './utils/state-reducers';
import type { ProgrammaticEvaluationResult, TelemetryValidationStatus } from './validation/types';
import type { ChatPayload } from './workflow-builder-agent';
@@ -105,26 +106,13 @@ export const WorkflowState = Annotation.Root({
// Node configurations collected from workflow examples
// Used to provide context when updating node parameters
nodeConfigurations: Annotation<NodeConfigurationsMap>({
reducer: (current, update) => {
if (!update || Object.keys(update).length === 0) {
return current;
}
// Merge configurations by node type, appending new configs to existing ones
const merged = { ...current };
for (const [nodeType, configs] of Object.entries(update)) {
if (!merged[nodeType]) {
merged[nodeType] = [];
}
merged[nodeType] = [...merged[nodeType], ...configs];
}
return merged;
},
reducer: nodeConfigurationsReducer,
default: () => ({}),
}),
// Template IDs fetched from workflow examples for telemetry
templateIds: Annotation<number[]>({
reducer: (current, update) => (update && update.length > 0 ? [...current, ...update] : current),
reducer: appendArrayReducer,
default: () => [],
}),
});