Files
AIclinicalresearch/backend/src/modules/ssa/services/ChatHandlerService.ts
HaHafeng 3446909ff7 feat(ssa): Complete Phase I-IV intelligent dialogue and tool system development
Phase I - Session Blackboard + READ Layer:
- SessionBlackboardService with Postgres-Only cache
- DataProfileService for data overview generation
- PicoInferenceService for LLM-driven PICO extraction
- Frontend DataContextCard and VariableDictionaryPanel
- E2E tests: 31/31 passed

Phase II - Conversation Layer LLM + Intent Router:
- ConversationService with SSE streaming
- IntentRouterService (rule-first + LLM fallback, 6 intents)
- SystemPromptService with 6-segment dynamic assembly
- TokenTruncationService for context management
- ChatHandlerService as unified chat entry
- Frontend SSAChatPane and useSSAChat hook
- E2E tests: 38/38 passed

Phase III - Method Consultation + AskUser Standardization:
- ToolRegistryService with Repository Pattern
- MethodConsultService with DecisionTable + LLM enhancement
- AskUserService with global interrupt handling
- Frontend AskUserCard component
- E2E tests: 13/13 passed

Phase IV - Dialogue-Driven Analysis + QPER Integration:
- ToolOrchestratorService (plan/execute/report)
- analysis_plan SSE event for WorkflowPlan transmission
- Dual-channel confirmation (ask_user card + workspace button)
- PICO as optional hint for LLM parsing
- E2E tests: 25/25 passed

R Statistics Service:
- 5 new R tools: anova_one, baseline_table, fisher, linear_reg, wilcoxon
- Enhanced guardrails and block helpers
- Comprehensive test suite (run_all_tools_test.js)

Documentation:
- Updated system status document (v5.9)
- Updated SSA module status and development plan (v1.8)

Total E2E: 107/107 passed (Phase I: 31, Phase II: 38, Phase III: 13, Phase IV: 25)

Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-22 18:53:39 +08:00

511 lines
18 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
/**
* Phase II — 意图处理器Intent Handlers
*
* 按意图类型分发处理逻辑:
* - chat: 直接 LLM 对话
* - explore: READ 工具 + LLM 解读
* - analyze: 转入 QPER 流水线 + LLM 摘要
* - consult: LLM 方法推荐Phase III 增强)
* - discuss: LLM 结果解读Phase V 增强)
* - feedback: LLM 改进建议Phase V 增强)
*/
import { logger } from '../../../common/logging/index.js';
import { conversationService, type StreamWriter } from './ConversationService.js';
import { sessionBlackboardService } from './SessionBlackboardService.js';
import { tokenTruncationService } from './TokenTruncationService.js';
import { methodConsultService } from './MethodConsultService.js';
import { askUserService, type AskUserResponse } from './AskUserService.js';
import { toolOrchestratorService } from './ToolOrchestratorService.js';
import type { IntentType } from './SystemPromptService.js';
import type { IntentResult } from './IntentRouterService.js';
export interface HandleResult {
messageId: string;
intent: IntentType;
success: boolean;
error?: string;
}
export class ChatHandlerService {
/**
* 统一处理入口:按意图分发
*/
async handle(
sessionId: string,
conversationId: string,
userContent: string,
intentResult: IntentResult,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
const intent = intentResult.intent;
try {
// 如果上下文守卫被触发且有提示消息,直接作为 LLM 上下文的一部分
let toolOutputs: string | undefined;
if (intentResult.guardTriggered && intentResult.guardMessage) {
toolOutputs = `[系统提示] ${intentResult.guardMessage}`;
}
switch (intent) {
case 'chat':
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, intent, toolOutputs);
case 'explore':
return await this.handleExplore(sessionId, conversationId, writer, placeholderMessageId, toolOutputs);
case 'consult':
return await this.handleConsult(sessionId, conversationId, writer, placeholderMessageId, toolOutputs);
case 'analyze':
return await this.handleAnalyze(sessionId, conversationId, userContent, writer, placeholderMessageId, toolOutputs);
case 'discuss':
return await this.handleDiscuss(sessionId, conversationId, writer, placeholderMessageId, toolOutputs);
case 'feedback':
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, 'feedback', toolOutputs);
default:
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, 'chat', toolOutputs);
}
} catch (error: any) {
logger.error('[SSA:ChatHandler] Handler error', {
sessionId, intent, error: error.message,
});
await conversationService.markAssistantError(placeholderMessageId, error.message);
return {
messageId: placeholderMessageId,
intent,
success: false,
error: error.message,
};
}
}
// ────────────────────────────────────────────
// chat / consult / feedback — 直接 LLM 对话
// ────────────────────────────────────────────
private async handleChat(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
intent: IntentType,
toolOutputs?: string,
): Promise<HandleResult> {
const messages = await conversationService.buildContext(
sessionId, conversationId, intent, toolOutputs,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 2000,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return {
messageId: placeholderMessageId,
intent,
success: true,
};
}
// ────────────────────────────────────────────
// explore — 数据探索READ 工具 + LLM 解读)
// ────────────────────────────────────────────
private async handleExplore(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
// 从 SessionBlackboard 提取数据摘要作为 tool output
const blackboard = await sessionBlackboardService.get(sessionId);
let toolOutputs = guardToolOutput || '';
if (blackboard) {
const truncated = tokenTruncationService.truncate(blackboard, {
maxTokens: 1500,
strategy: 'balanced',
});
const exploreData: string[] = [];
if (truncated.overview) {
exploreData.push(`数据概览:\n${truncated.overview}`);
}
if (truncated.variables) {
exploreData.push(`变量列表:\n${truncated.variables}`);
}
if (truncated.pico) {
exploreData.push(`PICO 推断:\n${truncated.pico}`);
}
if (truncated.report) {
exploreData.push(`数据诊断:\n${truncated.report}`);
}
if (exploreData.length > 0) {
toolOutputs = (toolOutputs ? toolOutputs + '\n\n' : '') + exploreData.join('\n\n');
}
}
const messages = await conversationService.buildContext(
sessionId, conversationId, 'explore', toolOutputs || undefined,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 2000,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return {
messageId: placeholderMessageId,
intent: 'explore',
success: true,
};
}
// ────────────────────────────────────────────
// analyze — 转入 QPER混合模式LLM 摘要 + WorkspacePane 详情)
// ────────────────────────────────────────────
/**
* Phase IV: analyze 意图 — 对话驱动分析
*
* 流程: plan 生成 → SSE 推 analysis_plan → LLM 方案说明 → ask_user 确认
* 执行由前端通过 /workflow/{id}/stream 触发D1: 保留独立 workflow SSE
*/
private async handleAnalyze(
sessionId: string,
conversationId: string,
userMessage: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
// 1. 调用 ToolOrchestratorService 生成计划D5: PICO hint 自动注入)
const planResult = await toolOrchestratorService.plan(sessionId, userMessage);
if (!planResult.success || !planResult.plan) {
const fallbackHint = [
guardToolOutput,
`[系统提示] 分析计划生成失败: ${planResult.error || '未知错误'}`,
'请友好地告知用户需要更明确的分析需求描述,例如需要指明要分析哪些变量、比较什么。',
].filter(Boolean).join('\n');
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze', fallbackHint,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5, maxTokens: 800,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
const plan = planResult.plan;
// 2. SSE 推送 analysis_plan 事件D2: 前端自动创建 AnalysisRecord
const planEvent = `data: ${JSON.stringify({
type: 'analysis_plan',
plan,
})}\n\n`;
writer.write(planEvent);
logger.info('[SSA:ChatHandler] analysis_plan pushed via SSE', {
sessionId, workflowId: plan.workflow_id, totalSteps: plan.total_steps,
});
// 3. LLM 流式生成方案说明
const planSummary = toolOrchestratorService.formatPlanForLLM(plan);
const toolOutputs = [
guardToolOutput,
planSummary,
'[系统提示] 你刚刚为用户制定了上述分析方案。请用自然语言向用户解释这个方案:包括为什么选这些方法、分析步骤的逻辑。不要重复列步骤编号和工具代码,要用用户能理解的语言说明。最后提示用户确认方案后即可执行。',
].filter(Boolean).join('\n\n');
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze', toolOutputs,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5, maxTokens: 1200,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
// 4. 推送 ask_user 确认卡片(复用 Phase III AskUserService
const confirmQ = {
inputType: 'confirm' as const,
question: '请确认上述分析方案',
context: `方案: ${plan.title},共 ${plan.total_steps} 个步骤`,
options: [
{ id: 'confirm_plan', label: '确认执行', value: 'confirm_plan' },
{ id: 'change_method', label: '修改方案', value: 'change_method' },
],
metadata: {
workflowId: plan.workflow_id,
planTitle: plan.title,
},
};
const event = await askUserService.createQuestion(sessionId, confirmQ);
writer.write(askUserService.formatSSE(event));
return {
messageId: placeholderMessageId,
intent: 'analyze',
success: true,
};
}
// ────────────────────────────────────────────
// discuss — 结果讨论(注入分析结果上下文)
// ────────────────────────────────────────────
private async handleDiscuss(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
const blackboard = await sessionBlackboardService.get(sessionId);
let toolOutputs = guardToolOutput || '';
// 注入 QPER trace 摘要
if (blackboard?.qperTrace && blackboard.qperTrace.length > 0) {
const traceItems = blackboard.qperTrace
.filter(t => t.status === 'success')
.slice(-5)
.map(t => `- 步骤${t.stepIndex}: ${t.toolCode}${t.summary}`)
.join('\n');
if (traceItems) {
toolOutputs = (toolOutputs ? toolOutputs + '\n\n' : '') +
`最近分析结果:\n${traceItems}`;
}
}
const messages = await conversationService.buildContext(
sessionId, conversationId, 'discuss', toolOutputs || undefined,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 2000,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return {
messageId: placeholderMessageId,
intent: 'discuss',
success: true,
};
}
// ────────────────────────────────────────────
// consult — 方法推荐Phase III: method_consult + ask_user
// ────────────────────────────────────────────
private async handleConsult(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
let toolOutputs = guardToolOutput || '';
// 1. 调用 MethodConsultService 获取推荐
const recommendation = await methodConsultService.recommend(sessionId);
const recText = methodConsultService.formatForLLM(recommendation);
toolOutputs = (toolOutputs ? toolOutputs + '\n\n' : '') + recText;
logger.info('[SSA:ChatHandler] Method consult result', {
sessionId,
matched: recommendation.matched,
primaryMethod: recommendation.primaryMethod?.code,
matchScore: recommendation.matchScore,
needsClarification: recommendation.needsClarification,
});
// 2. LLM 生成自然语言推荐P1: 结论先行 + 结构化列表)
const messages = await conversationService.buildContext(
sessionId, conversationId, 'consult', toolOutputs,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5,
maxTokens: 1500,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
// 3. 如果有明确推荐,推送 ask_user 确认卡片
if (recommendation.matched && recommendation.primaryMethod) {
const confirmQ = askUserService.buildMethodConfirmQuestion(
recommendation.primaryMethod.name,
recommendation.primaryMethod.code,
recommendation.fallbackMethod?.name,
);
const event = await askUserService.createQuestion(sessionId, confirmQ);
writer.write(askUserService.formatSSE(event));
}
return {
messageId: placeholderMessageId,
intent: 'consult',
success: true,
};
}
// ────────────────────────────────────────────
// ask_user 响应处理Phase III
// ────────────────────────────────────────────
async handleAskUserResponse(
sessionId: string,
conversationId: string,
response: AskUserResponse,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
// 清除 pending 状态
await askUserService.clearPending(sessionId);
if (response.action === 'skip') {
// 用户跳过 — 生成友好回复
const messages = await conversationService.buildContext(
sessionId, conversationId, 'chat',
'[系统提示] 用户跳过了上一个确认问题。请友好地回应,表示随时可以继续。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 500,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'chat', success: true };
}
// 用户选择了具体选项
const selectedValue = response.selectedValues?.[0];
if (selectedValue === 'confirm_plan') {
// Phase IV: 确认分析方案 → 前端将触发 executeWorkflow
const workflowId = response.metadata?.workflowId || '';
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze',
`[系统提示] 用户已确认分析方案workflow: ${workflowId})。请简要确认:"好的,方案已确认,正在准备执行分析..."。`,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.3, maxTokens: 300,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
// 推送 plan_confirmed 事件,前端据此触发 executeWorkflow
const confirmEvent = `data: ${JSON.stringify({
type: 'plan_confirmed',
workflowId,
})}\n\n`;
writer.write(confirmEvent);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
} else if (selectedValue === 'confirm') {
// Phase III: 确认使用推荐方法 → 提示可以开始分析
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze',
'[系统提示] 用户已确认使用推荐的统计方法。请简要确认方案,告知用户可以在对话中说"开始分析"或在右侧面板触发执行。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5,
maxTokens: 800,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
} else if (selectedValue === 'use_fallback') {
// 使用备选方案
const messages = await conversationService.buildContext(
sessionId, conversationId, 'consult',
'[系统提示] 用户选择使用备选方案。请确认切换,并简要说明备选方案的适用场景。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5,
maxTokens: 800,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'consult', success: true };
} else if (selectedValue === 'change_method') {
// 用户想换方法 → 引导重新描述
const messages = await conversationService.buildContext(
sessionId, conversationId, 'consult',
'[系统提示] 用户不满意当前推荐,想换方法。请询问用户希望使用什么方法,或引导其更详细地描述分析需求。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 800,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'consult', success: true };
}
// 其他情况fallback
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, 'chat');
}
}
export const chatHandlerService = new ChatHandlerService();