feat: 优化工作流参数编辑与告警交互
- 新增 ParamTokenEditor,支持参数选择插入、token 高亮、整段删除与光标避让 - 参数候选改为动态监测,未映射参数可选择并在下拉与输入框顶部告警 - 接入知识库/搜索引擎/LLM/动态代码/HTTP Body 及 SQL、查询数据自定义节点 - 优化 Http 节点布局并补充参数解析工具与单测
This commit is contained in:
@@ -0,0 +1,175 @@
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import {
|
||||
flattenParameterCandidates,
|
||||
findBackspaceTokenRange,
|
||||
findTokenRangeAtCursor,
|
||||
flattenParameterNames,
|
||||
insertTextAtCursor,
|
||||
parseTokenParts,
|
||||
splitTokenDisplay
|
||||
} from './paramToken';
|
||||
|
||||
describe('paramToken utils', () => {
|
||||
it('should flatten parameter names with nested paths', () => {
|
||||
const result = flattenParameterNames([
|
||||
{
|
||||
name: 'input'
|
||||
},
|
||||
{
|
||||
name: 'documents',
|
||||
children: [
|
||||
{
|
||||
name: 'title'
|
||||
},
|
||||
{
|
||||
name: 'meta',
|
||||
children: [
|
||||
{
|
||||
name: 'author'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]);
|
||||
|
||||
expect(result).toEqual([
|
||||
'input',
|
||||
'documents',
|
||||
'documents.title',
|
||||
'documents.meta',
|
||||
'documents.meta.author'
|
||||
]);
|
||||
});
|
||||
|
||||
it('should keep unresolved candidates in parameter list', () => {
|
||||
const result = flattenParameterCandidates([
|
||||
{
|
||||
name: 'input',
|
||||
refType: 'ref',
|
||||
ref: ''
|
||||
},
|
||||
{
|
||||
name: 'docs',
|
||||
refType: 'ref',
|
||||
ref: 'documents'
|
||||
},
|
||||
{
|
||||
name: 'runtimeInput',
|
||||
refType: 'input'
|
||||
}
|
||||
]);
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
name: 'input',
|
||||
resolved: false
|
||||
},
|
||||
{
|
||||
name: 'docs',
|
||||
resolved: true
|
||||
},
|
||||
{
|
||||
name: 'runtimeInput',
|
||||
resolved: true
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it('should insert token text in the middle by cursor range', () => {
|
||||
const result = insertTextAtCursor('hello world', '{{input}}', 6, 11);
|
||||
expect(result).toEqual({
|
||||
value: 'hello {{input}}',
|
||||
cursor: 15
|
||||
});
|
||||
});
|
||||
|
||||
it('should append token text when cursor info is missing', () => {
|
||||
const result = insertTextAtCursor('hello', '{{name}}');
|
||||
expect(result).toEqual({
|
||||
value: 'hello{{name}}',
|
||||
cursor: 13
|
||||
});
|
||||
});
|
||||
|
||||
it('should parse token parts and mark valid tokens', () => {
|
||||
const parts = parseTokenParts(
|
||||
'你好 {{ user.name }} 与 {{unknown}}',
|
||||
['user.name', 'docs']
|
||||
);
|
||||
|
||||
expect(parts).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: '你好 '
|
||||
},
|
||||
{
|
||||
type: 'token',
|
||||
text: '{{ user.name }}',
|
||||
key: 'user.name',
|
||||
valid: true
|
||||
},
|
||||
{
|
||||
type: 'text',
|
||||
text: ' 与 '
|
||||
},
|
||||
{
|
||||
type: 'token',
|
||||
text: '{{unknown}}',
|
||||
key: 'unknown',
|
||||
valid: false
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it('should keep plain text when token syntax is invalid', () => {
|
||||
const parts = parseTokenParts('abc {{}} def', ['a']);
|
||||
expect(parts).toEqual([
|
||||
{
|
||||
type: 'text',
|
||||
text: 'abc {{}} def'
|
||||
}
|
||||
]);
|
||||
});
|
||||
|
||||
it('should split token display and hide braces text', () => {
|
||||
const result = splitTokenDisplay('{{ user.name }}', 'user.name');
|
||||
expect(result).toEqual({
|
||||
hiddenPrefix: '{{ ',
|
||||
visibleText: 'user.name',
|
||||
hiddenSuffix: ' }}'
|
||||
});
|
||||
});
|
||||
|
||||
it('should find full token range for backspace delete', () => {
|
||||
const content = 'hello {{input}} world';
|
||||
const tokenEndCursor = 'hello {{input}}'.length;
|
||||
const range = findBackspaceTokenRange(content, tokenEndCursor);
|
||||
|
||||
expect(range).toEqual({
|
||||
start: 6,
|
||||
end: 15,
|
||||
text: '{{input}}',
|
||||
key: 'input'
|
||||
});
|
||||
});
|
||||
|
||||
it('should support boundary match for arrow skip behavior', () => {
|
||||
const content = 'x{{docs}}y';
|
||||
const tokenStart = 1;
|
||||
const tokenEnd = 9;
|
||||
|
||||
const rightBoundary = findTokenRangeAtCursor(content, tokenStart, {
|
||||
includeStart: true,
|
||||
includeEnd: false
|
||||
});
|
||||
const leftBoundary = findTokenRangeAtCursor(content, tokenEnd, {
|
||||
includeStart: false,
|
||||
includeEnd: true
|
||||
});
|
||||
|
||||
expect(rightBoundary?.key).toBe('docs');
|
||||
expect(leftBoundary?.key).toBe('docs');
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,272 @@
|
||||
export interface ParameterLike {
|
||||
name?: string;
|
||||
ref?: string;
|
||||
refType?: string;
|
||||
children?: ParameterLike[];
|
||||
}
|
||||
|
||||
export interface TokenRange {
|
||||
start: number;
|
||||
end: number;
|
||||
text: string;
|
||||
key: string;
|
||||
}
|
||||
|
||||
export type TokenPart =
|
||||
| {
|
||||
type: 'text';
|
||||
text: string;
|
||||
}
|
||||
| {
|
||||
type: 'token';
|
||||
text: string;
|
||||
key: string;
|
||||
valid: boolean;
|
||||
};
|
||||
|
||||
export interface ParameterCandidate {
|
||||
name: string;
|
||||
resolved: boolean;
|
||||
}
|
||||
|
||||
const TOKEN_PATTERN = /\{\{\s*([^{}]+?)\s*}}/g;
|
||||
|
||||
export function normalizeTokenKey(tokenKey: string): string {
|
||||
return tokenKey.trim();
|
||||
}
|
||||
|
||||
export function flattenParameterNames(parameters?: ParameterLike[] | null): string[] {
|
||||
return flattenParameterCandidates(parameters).map((item) => item.name);
|
||||
}
|
||||
|
||||
function isParameterResolved(parameter?: ParameterLike): boolean {
|
||||
if (!parameter) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const refType = (parameter.refType || '').trim();
|
||||
if (refType === 'fixed' || refType === 'input') {
|
||||
return true;
|
||||
}
|
||||
|
||||
const ref = (parameter.ref || '').trim();
|
||||
return !!ref;
|
||||
}
|
||||
|
||||
export function flattenParameterCandidates(parameters?: ParameterLike[] | null): ParameterCandidate[] {
|
||||
if (!parameters || parameters.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const candidates: ParameterCandidate[] = [];
|
||||
const indexMap = new Map<string, number>();
|
||||
|
||||
const addCandidate = (name: string, resolved: boolean) => {
|
||||
const normalized = name.trim();
|
||||
if (!normalized) {
|
||||
return;
|
||||
}
|
||||
const exists = indexMap.get(normalized);
|
||||
if (exists === undefined) {
|
||||
indexMap.set(normalized, candidates.length);
|
||||
candidates.push({
|
||||
name: normalized,
|
||||
resolved
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// 同名参数只要有一个可解析,就视为可解析
|
||||
if (resolved) {
|
||||
candidates[exists].resolved = true;
|
||||
}
|
||||
};
|
||||
|
||||
const walk = (items: ParameterLike[], parentPath = '', inheritedResolved = true) => {
|
||||
for (const item of items) {
|
||||
const rawName = item?.name?.trim();
|
||||
if (!rawName) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const currentPath = parentPath ? `${parentPath}.${rawName}` : rawName;
|
||||
const currentResolved = inheritedResolved && isParameterResolved(item);
|
||||
addCandidate(currentPath, currentResolved);
|
||||
|
||||
if (item.children && item.children.length > 0) {
|
||||
walk(item.children, currentPath, currentResolved);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
walk(parameters);
|
||||
return candidates;
|
||||
}
|
||||
|
||||
export function parseTokenParts(content: string, validParams: string[] = []): TokenPart[] {
|
||||
const source = content ?? '';
|
||||
const validSet = new Set(validParams.map(normalizeTokenKey));
|
||||
const parts: TokenPart[] = [];
|
||||
|
||||
let lastIndex = 0;
|
||||
TOKEN_PATTERN.lastIndex = 0;
|
||||
let match: RegExpExecArray | null = TOKEN_PATTERN.exec(source);
|
||||
|
||||
while (match) {
|
||||
if (match.index > lastIndex) {
|
||||
parts.push({
|
||||
type: 'text',
|
||||
text: source.slice(lastIndex, match.index)
|
||||
});
|
||||
}
|
||||
|
||||
const rawToken = match[0];
|
||||
const tokenKey = normalizeTokenKey(match[1] || '');
|
||||
parts.push({
|
||||
type: 'token',
|
||||
text: rawToken,
|
||||
key: tokenKey,
|
||||
valid: validSet.has(tokenKey)
|
||||
});
|
||||
|
||||
lastIndex = match.index + rawToken.length;
|
||||
match = TOKEN_PATTERN.exec(source);
|
||||
}
|
||||
|
||||
if (lastIndex < source.length) {
|
||||
parts.push({
|
||||
type: 'text',
|
||||
text: source.slice(lastIndex)
|
||||
});
|
||||
}
|
||||
|
||||
if (parts.length === 0) {
|
||||
parts.push({
|
||||
type: 'text',
|
||||
text: source
|
||||
});
|
||||
}
|
||||
|
||||
return parts;
|
||||
}
|
||||
|
||||
export function getTokenRanges(content: string): TokenRange[] {
|
||||
const source = content ?? '';
|
||||
const ranges: TokenRange[] = [];
|
||||
TOKEN_PATTERN.lastIndex = 0;
|
||||
let match: RegExpExecArray | null = TOKEN_PATTERN.exec(source);
|
||||
|
||||
while (match) {
|
||||
const rawToken = match[0];
|
||||
ranges.push({
|
||||
start: match.index,
|
||||
end: match.index + rawToken.length,
|
||||
text: rawToken,
|
||||
key: normalizeTokenKey(match[1] || '')
|
||||
});
|
||||
match = TOKEN_PATTERN.exec(source);
|
||||
}
|
||||
|
||||
return ranges;
|
||||
}
|
||||
|
||||
export function findTokenRangeAtCursor(
|
||||
content: string,
|
||||
cursor: number,
|
||||
options?: {
|
||||
includeStart?: boolean;
|
||||
includeEnd?: boolean;
|
||||
}
|
||||
): TokenRange | null {
|
||||
if (!Number.isInteger(cursor) || cursor < 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const includeStart = options?.includeStart ?? false;
|
||||
const includeEnd = options?.includeEnd ?? false;
|
||||
const ranges = getTokenRanges(content);
|
||||
for (const range of ranges) {
|
||||
const leftValid = includeStart ? cursor >= range.start : cursor > range.start;
|
||||
const rightValid = includeEnd ? cursor <= range.end : cursor < range.end;
|
||||
if (leftValid && rightValid) {
|
||||
return range;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export function findBackspaceTokenRange(content: string, cursor: number): TokenRange | null {
|
||||
return findTokenRangeAtCursor(content, cursor, {
|
||||
includeStart: false,
|
||||
includeEnd: true
|
||||
});
|
||||
}
|
||||
|
||||
export function splitTokenDisplay(rawToken: string, normalizedKey?: string): {
|
||||
hiddenPrefix: string;
|
||||
visibleText: string;
|
||||
hiddenSuffix: string;
|
||||
} {
|
||||
const source = rawToken ?? '';
|
||||
if (!source.startsWith('{{') || !source.endsWith('}}')) {
|
||||
return {
|
||||
hiddenPrefix: '',
|
||||
visibleText: normalizedKey || source,
|
||||
hiddenSuffix: ''
|
||||
};
|
||||
}
|
||||
|
||||
const inner = source.slice(2, -2);
|
||||
const visibleText = normalizeTokenKey(normalizedKey || inner);
|
||||
if (!visibleText) {
|
||||
return {
|
||||
hiddenPrefix: '',
|
||||
visibleText: source,
|
||||
hiddenSuffix: ''
|
||||
};
|
||||
}
|
||||
|
||||
const innerStart = inner.indexOf(visibleText);
|
||||
const leading = innerStart >= 0 ? inner.slice(0, innerStart) : '';
|
||||
const trailing = innerStart >= 0 ? inner.slice(innerStart + visibleText.length) : '';
|
||||
|
||||
return {
|
||||
hiddenPrefix: `{{${leading}`,
|
||||
visibleText,
|
||||
hiddenSuffix: `${trailing}}}`
|
||||
};
|
||||
}
|
||||
|
||||
export function insertTextAtCursor(
|
||||
content: string,
|
||||
insertedText: string,
|
||||
selectionStart?: number | null,
|
||||
selectionEnd?: number | null
|
||||
): {
|
||||
value: string;
|
||||
cursor: number;
|
||||
} {
|
||||
const source = content ?? '';
|
||||
const start = Number.isInteger(selectionStart)
|
||||
? Math.max(0, Math.min(selectionStart as number, source.length))
|
||||
: source.length;
|
||||
const end = Number.isInteger(selectionEnd)
|
||||
? Math.max(start, Math.min(selectionEnd as number, source.length))
|
||||
: start;
|
||||
|
||||
const nextValue = source.slice(0, start) + insertedText + source.slice(end);
|
||||
return {
|
||||
value: nextValue,
|
||||
cursor: start + insertedText.length
|
||||
};
|
||||
}
|
||||
|
||||
export function escapeHtml(text: string): string {
|
||||
return text
|
||||
.replaceAll('&', '&')
|
||||
.replaceAll('<', '<')
|
||||
.replaceAll('>', '>')
|
||||
.replaceAll('"', '"')
|
||||
.replaceAll("'", ''');
|
||||
}
|
||||
Reference in New Issue
Block a user