Files
AIclinicalresearch/backend/src/modules/ssa/services/ChatHandlerService.ts
HaHafeng 52989cd03f feat(ssa): Agent channel UX optimization (Solution B) + Plan-and-Execute architecture design
SSA Agent channel improvements (12 code files, +931/-203 lines):
- Solution B: left/right separation of concerns (gaze guiding + state mutex + time-travel)
- JWT token refresh mechanism (ensureFreshToken) to fix HTTP 401 during pipeline
- Code truncation fix: LLM maxTokens 4000->8000 + CSS max-height 60vh
- Retry streaming code generation with generateCodeStream()
- R Docker structured errors: 20+ pattern matching + format_agent_error + line extraction
- Prompt iron rules: strict output format in CoderAgent System Prompt
- parseCode robustness: XML/Markdown/inference 3-tier matching + length validation
- consoleOutput type defense: handle both array and scalar from R Docker unboxedJSON
- Agent progress bar sync: derive phase from agentExecution.status
- Export report / view code buttons restored for Agent mode
- ExecutingProgress component: real-time timer + dynamic tips + step pulse animation

Architecture design (3 review reports):
- Plan-and-Execute step-by-step execution architecture approved
- Code accumulation strategy (R Docker stays stateless)
- 5 engineering guardrails: XML tags, AST pre-check, defensive prompts, high-fidelity schema, error classification circuit breaker

Docs: update SSA module status v4.1, system status v6.7, deployment changelist
Made-with: Cursor
2026-03-07 22:32:32 +08:00

924 lines
36 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
/**
* Phase II — 意图处理器Intent Handlers
*
* 按意图类型分发处理逻辑:
* - chat: 直接 LLM 对话
* - explore: READ 工具 + LLM 解读
* - analyze: 转入 QPER 流水线 + LLM 摘要
* - consult: LLM 方法推荐Phase III 增强)
* - discuss: LLM 结果解读Phase V 增强)
* - feedback: LLM 改进建议Phase V 增强)
*/
import { logger } from '../../../common/logging/index.js';
import { conversationService, type StreamWriter } from './ConversationService.js';
import { sessionBlackboardService } from './SessionBlackboardService.js';
import { tokenTruncationService } from './TokenTruncationService.js';
import { methodConsultService } from './MethodConsultService.js';
import { askUserService, type AskUserResponse } from './AskUserService.js';
import { toolOrchestratorService } from './ToolOrchestratorService.js';
import { agentPlannerService } from './AgentPlannerService.js';
import { agentCoderService } from './AgentCoderService.js';
import { codeRunnerService } from './CodeRunnerService.js';
import { prisma } from '../../../config/database.js';
import type { IntentType } from './SystemPromptService.js';
import type { IntentResult } from './IntentRouterService.js';
export interface HandleResult {
messageId: string;
intent: IntentType;
success: boolean;
error?: string;
}
export class ChatHandlerService {
/**
* 统一处理入口:按意图分发
*/
async handle(
sessionId: string,
conversationId: string,
userContent: string,
intentResult: IntentResult,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
const intent = intentResult.intent;
try {
// 如果上下文守卫被触发且有提示消息,直接作为 LLM 上下文的一部分
let toolOutputs: string | undefined;
if (intentResult.guardTriggered && intentResult.guardMessage) {
toolOutputs = `[系统提示] ${intentResult.guardMessage}`;
}
switch (intent) {
case 'chat':
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, intent, toolOutputs);
case 'explore':
return await this.handleExplore(sessionId, conversationId, writer, placeholderMessageId, toolOutputs);
case 'consult':
return await this.handleConsult(sessionId, conversationId, writer, placeholderMessageId, toolOutputs);
case 'analyze':
return await this.handleAnalyze(sessionId, conversationId, userContent, writer, placeholderMessageId, toolOutputs);
case 'discuss':
return await this.handleDiscuss(sessionId, conversationId, writer, placeholderMessageId, toolOutputs);
case 'feedback':
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, 'feedback', toolOutputs);
default:
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, 'chat', toolOutputs);
}
} catch (error: any) {
logger.error('[SSA:ChatHandler] Handler error', {
sessionId, intent, error: error.message,
});
await conversationService.markAssistantError(placeholderMessageId, error.message);
return {
messageId: placeholderMessageId,
intent,
success: false,
error: error.message,
};
}
}
// ────────────────────────────────────────────
// chat / consult / feedback — 直接 LLM 对话
// ────────────────────────────────────────────
private async handleChat(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
intent: IntentType,
toolOutputs?: string,
): Promise<HandleResult> {
const messages = await conversationService.buildContext(
sessionId, conversationId, intent, toolOutputs,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 2000,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return {
messageId: placeholderMessageId,
intent,
success: true,
};
}
// ────────────────────────────────────────────
// explore — 数据探索READ 工具 + LLM 解读)
// ────────────────────────────────────────────
private async handleExplore(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
// 从 SessionBlackboard 提取数据摘要作为 tool output
const blackboard = await sessionBlackboardService.get(sessionId);
let toolOutputs = guardToolOutput || '';
if (blackboard) {
const truncated = tokenTruncationService.truncate(blackboard, {
maxTokens: 1500,
strategy: 'balanced',
});
const exploreData: string[] = [];
if (truncated.overview) {
exploreData.push(`数据概览:\n${truncated.overview}`);
}
if (truncated.variables) {
exploreData.push(`变量列表:\n${truncated.variables}`);
}
if (truncated.pico) {
exploreData.push(`PICO 推断:\n${truncated.pico}`);
}
if (truncated.report) {
exploreData.push(`数据诊断:\n${truncated.report}`);
}
if (exploreData.length > 0) {
toolOutputs = (toolOutputs ? toolOutputs + '\n\n' : '') + exploreData.join('\n\n');
}
}
const messages = await conversationService.buildContext(
sessionId, conversationId, 'explore', toolOutputs || undefined,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 2000,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return {
messageId: placeholderMessageId,
intent: 'explore',
success: true,
};
}
// ────────────────────────────────────────────
// analyze — 转入 QPER混合模式LLM 摘要 + WorkspacePane 详情)
// ────────────────────────────────────────────
/**
* Phase IV: analyze 意图 — 对话驱动分析
*
* 流程: plan 生成 → SSE 推 analysis_plan → LLM 方案说明 → ask_user 确认
* 执行由前端通过 /workflow/{id}/stream 触发D1: 保留独立 workflow SSE
*/
private async handleAnalyze(
sessionId: string,
conversationId: string,
userMessage: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
// 1. 调用 ToolOrchestratorService 生成计划D5: PICO hint 自动注入)
const planResult = await toolOrchestratorService.plan(sessionId, userMessage);
if (!planResult.success || !planResult.plan) {
const fallbackHint = [
guardToolOutput,
`[系统提示] 分析计划生成失败: ${planResult.error || '未知错误'}`,
'请友好地告知用户需要更明确的分析需求描述,例如需要指明要分析哪些变量、比较什么。',
].filter(Boolean).join('\n');
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze', fallbackHint,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5, maxTokens: 800,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
const plan = planResult.plan;
// 2. SSE 推送 analysis_plan 事件D2: 前端自动创建 AnalysisRecord
const planEvent = `data: ${JSON.stringify({
type: 'analysis_plan',
plan,
})}\n\n`;
writer.write(planEvent);
logger.info('[SSA:ChatHandler] analysis_plan pushed via SSE', {
sessionId, workflowId: plan.workflow_id, totalSteps: plan.total_steps,
});
// 3. LLM 流式生成方案说明
const planSummary = toolOrchestratorService.formatPlanForLLM(plan);
const toolOutputs = [
guardToolOutput,
planSummary,
'[系统指令] 你刚刚为用户制定了上述分析方案。请用自然语言向用户解释这个方案:包括为什么选这些方法、分析步骤的逻辑。不要重复列步骤编号和工具代码,要用用户能理解的语言说明。最后提示用户确认方案后即可执行。',
'【禁止事项】不要预测、模拟或编造任何分析结果、数值或表格。方案只是计划R 引擎尚未执行,你不知道结果是什么。',
].filter(Boolean).join('\n\n');
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze', toolOutputs,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5, maxTokens: 1200,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
// 4. 推送 ask_user 确认卡片(复用 Phase III AskUserService
const confirmQ = {
inputType: 'confirm' as const,
question: '请确认上述分析方案',
context: `方案: ${plan.title},共 ${plan.total_steps} 个步骤`,
options: [
{ id: 'confirm_plan', label: '确认执行', value: 'confirm_plan' },
{ id: 'change_method', label: '修改方案', value: 'change_method' },
],
metadata: {
workflowId: plan.workflow_id,
planTitle: plan.title,
},
};
const event = await askUserService.createQuestion(sessionId, confirmQ);
writer.write(askUserService.formatSSE(event));
return {
messageId: placeholderMessageId,
intent: 'analyze',
success: true,
};
}
// ────────────────────────────────────────────
// discuss — 结果讨论(注入分析结果上下文)
// ────────────────────────────────────────────
private async handleDiscuss(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
const blackboard = await sessionBlackboardService.get(sessionId);
let toolOutputs = guardToolOutput || '';
// 注入 QPER trace 摘要
if (blackboard?.qperTrace && blackboard.qperTrace.length > 0) {
const traceItems = blackboard.qperTrace
.filter(t => t.status === 'success')
.slice(-5)
.map(t => `- 步骤${t.stepIndex}: ${t.toolCode}${t.summary}`)
.join('\n');
if (traceItems) {
toolOutputs = (toolOutputs ? toolOutputs + '\n\n' : '') +
`最近分析结果:\n${traceItems}`;
}
}
const messages = await conversationService.buildContext(
sessionId, conversationId, 'discuss', toolOutputs || undefined,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 2000,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return {
messageId: placeholderMessageId,
intent: 'discuss',
success: true,
};
}
// ────────────────────────────────────────────
// consult — 方法推荐Phase III: method_consult + ask_user
// ────────────────────────────────────────────
private async handleConsult(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
guardToolOutput?: string,
): Promise<HandleResult> {
let toolOutputs = guardToolOutput || '';
// 1. 调用 MethodConsultService 获取推荐
const recommendation = await methodConsultService.recommend(sessionId);
const recText = methodConsultService.formatForLLM(recommendation);
toolOutputs = (toolOutputs ? toolOutputs + '\n\n' : '') + recText;
logger.info('[SSA:ChatHandler] Method consult result', {
sessionId,
matched: recommendation.matched,
primaryMethod: recommendation.primaryMethod?.code,
matchScore: recommendation.matchScore,
needsClarification: recommendation.needsClarification,
});
// 2. LLM 生成自然语言推荐P1: 结论先行 + 结构化列表)
const messages = await conversationService.buildContext(
sessionId, conversationId, 'consult', toolOutputs,
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5,
maxTokens: 1500,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
// 3. 如果有明确推荐,推送 ask_user 确认卡片
if (recommendation.matched && recommendation.primaryMethod) {
const confirmQ = askUserService.buildMethodConfirmQuestion(
recommendation.primaryMethod.name,
recommendation.primaryMethod.code,
recommendation.fallbackMethod?.name,
);
const event = await askUserService.createQuestion(sessionId, confirmQ);
writer.write(askUserService.formatSSE(event));
}
return {
messageId: placeholderMessageId,
intent: 'consult',
success: true,
};
}
// ────────────────────────────────────────────
// Agent 通道入口(双通道架构 Phase 1 骨架)
// ────────────────────────────────────────────
/**
* Agent 模式入口 — 三步确认式管线
*
* 状态机:
* 新请求 → agentGeneratePlan → plan_pending等用户确认
* 用户确认计划 → agentStreamCode → code_pending等用户确认
* 用户确认代码 → agentExecuteCode → completed
*/
async handleAgentMode(
sessionId: string,
conversationId: string,
userContent: string,
writer: StreamWriter,
placeholderMessageId: string,
metadata?: Record<string, any>,
): Promise<HandleResult> {
try {
const agentAction = metadata?.agentAction as string | undefined;
// 1. 右侧工作区按钮操作confirm_plan / confirm_code / cancel
if (agentAction) {
const activeExec = await (prisma as any).ssaAgentExecution.findFirst({
where: { sessionId, status: { in: ['plan_pending', 'code_pending'] } },
orderBy: { createdAt: 'desc' },
});
if (!activeExec) {
return await this.handleAgentChat(sessionId, conversationId, writer, placeholderMessageId, null);
}
if (agentAction === 'confirm_plan' && activeExec.status === 'plan_pending') {
return await this.agentStreamCode(activeExec, sessionId, conversationId, writer, placeholderMessageId);
}
if (agentAction === 'confirm_code' && activeExec.status === 'code_pending') {
return await this.agentExecuteCode(activeExec, sessionId, conversationId, writer, placeholderMessageId);
}
if (agentAction === 'cancel') {
return await this.agentCancel(activeExec, sessionId, conversationId, writer, placeholderMessageId);
}
}
// 2. 检查是否有等待确认的执行记录(用户在对话区直接打字确认)
const activeExec = await (prisma as any).ssaAgentExecution.findFirst({
where: { sessionId, status: { in: ['plan_pending', 'code_pending'] } },
orderBy: { createdAt: 'desc' },
});
if (activeExec) {
const action = this.parseAgentAction(userContent);
if (activeExec.status === 'plan_pending' && action === 'confirm') {
return await this.agentStreamCode(activeExec, sessionId, conversationId, writer, placeholderMessageId);
}
if (activeExec.status === 'code_pending' && action === 'confirm') {
return await this.agentExecuteCode(activeExec, sessionId, conversationId, writer, placeholderMessageId);
}
if (action === 'cancel') {
return await this.agentCancel(activeExec, sessionId, conversationId, writer, placeholderMessageId);
}
}
// 3. 无挂起确认 — 检查是否是分析请求
const blackboard = await sessionBlackboardService.get(sessionId);
const hasData = !!blackboard?.dataOverview;
if (hasData && this.looksLikeAnalysisRequest(userContent)) {
return await this.agentGeneratePlan(sessionId, conversationId, userContent, writer, placeholderMessageId);
}
return await this.handleAgentChat(sessionId, conversationId, writer, placeholderMessageId, blackboard);
} catch (error: any) {
logger.error('[SSA:ChatHandler] Agent mode error', { sessionId, error: error.message });
await conversationService.markAssistantError(placeholderMessageId, error.message);
return { messageId: placeholderMessageId, intent: 'analyze', success: false, error: error.message };
}
}
private parseAgentAction(content: string): 'confirm' | 'cancel' | 'other' {
const lc = content.toLowerCase();
if (lc.includes('agent_confirm') || lc.includes('确认') || lc.includes('执行代码') || lc.includes('开始生成') || lc.includes('执行')) return 'confirm';
if (lc.includes('agent_cancel') || lc.includes('取消')) return 'cancel';
return 'other';
}
// ── Agent Step 1: 生成分析计划 → 等用户确认 ──
private async agentGeneratePlan(
sessionId: string,
conversationId: string,
userContent: string,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
const sendEvent = (type: string, data: Record<string, any>) => {
writer.write(`data: ${JSON.stringify({ type, ...data })}\n\n`);
};
const execution = await (prisma as any).ssaAgentExecution.create({
data: { sessionId, query: userContent, status: 'planning' },
});
sendEvent('agent_planning', { executionId: execution.id, message: '正在制定分析计划...' });
const conversationHistory = await conversationService.buildContext(sessionId, conversationId, 'analyze');
const plan = await agentPlannerService.generatePlan(sessionId, userContent, conversationHistory);
// planText 存原始文本, reviewResult(JSON) 存结构化计划以便恢复
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: {
planText: plan.rawText,
reviewResult: JSON.parse(JSON.stringify(plan)),
status: 'plan_pending',
},
});
sendEvent('agent_plan_ready', {
executionId: execution.id,
plan: { title: plan.title, designType: plan.designType, steps: plan.steps },
planText: plan.rawText,
});
// 简短视线牵引语(方案 B左侧只输出简短提示详细内容在右侧工作区
const hint = [
`[系统指令——严格遵守] 分析计划已生成(${plan.steps.length} 步),标题「${plan.title}」。`,
'你只需回复 1-2 句话,引导用户去右侧工作区查看。',
'示例回复:「我已为您拟定了详细的分析计划,请在右侧工作区核对并点击确认。」',
'【铁律】禁止在对话中重复计划内容、禁止编造任何数值。回复不超过 2 句话。',
].join('\n');
const msgs = await conversationService.buildContext(sessionId, conversationId, 'analyze', hint);
const sr = await conversationService.streamToSSE(msgs, writer, { temperature: 0.3, maxTokens: 150 });
await conversationService.finalizeAssistantMessage(placeholderMessageId, sr.content, sr.thinking, sr.tokens);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
// ── Agent Step 2: 流式生成代码 → 等用户确认 ──
private async agentStreamCode(
execution: any,
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
const sendEvent = (type: string, data: Record<string, any>) => {
writer.write(`data: ${JSON.stringify({ type, ...data })}\n\n`);
};
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: { status: 'coding' },
});
sendEvent('code_generating', { executionId: execution.id, partialCode: '', message: '正在生成 R 代码...' });
const plan = execution.reviewResult as any;
const generated = await agentCoderService.generateCodeStream(
sessionId, plan,
(accumulated: string) => {
sendEvent('code_generating', { executionId: execution.id, partialCode: accumulated });
},
);
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: { generatedCode: generated.code, status: 'code_pending' },
});
sendEvent('code_generated', {
executionId: execution.id,
code: generated.code,
explanation: generated.explanation,
});
// 简短视线牵引语
const hint = [
`[系统指令——严格遵守] R 代码已生成(${generated.code.split('\n').length} 行),使用 ${generated.requiredPackages.join(', ') || '基础包'}`,
'你只需回复 1-2 句话,引导用户去右侧工作区核对代码并点击执行。',
'示例回复「R 代码已生成,请在右侧工作区核对代码并点击 "确认并执行"。」',
'【铁律】禁止在对话中贴代码、禁止编造结果。回复不超过 2 句话。',
].join('\n');
const msgs = await conversationService.buildContext(sessionId, conversationId, 'analyze', hint);
const sr = await conversationService.streamToSSE(msgs, writer, { temperature: 0.3, maxTokens: 150 });
await conversationService.finalizeAssistantMessage(placeholderMessageId, sr.content, sr.thinking, sr.tokens);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
// ── Agent Step 3: 执行代码 + 重试循环 ──
private async agentExecuteCode(
execution: any,
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
const sendEvent = (type: string, data: Record<string, any>) => {
writer.write(`data: ${JSON.stringify({ type, ...data })}\n\n`);
};
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: { status: 'executing' },
});
const plan = execution.reviewResult as any;
let currentCode = execution.generatedCode as string;
let lastError: string | null = null;
for (let attempt = 0; attempt <= codeRunnerService.maxRetries; attempt++) {
sendEvent('code_executing', {
executionId: execution.id,
attempt: attempt + 1,
message: attempt === 0 ? '正在执行 R 代码...' : `${attempt + 1} 次重试执行...`,
});
const execResult = await codeRunnerService.executeCode(sessionId, currentCode);
if (execResult.success) {
const durationMs = execResult.durationMs || 0;
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: {
executionResult: execResult as any,
reportBlocks: execResult.reportBlocks as any,
generatedCode: currentCode,
status: 'completed',
retryCount: attempt,
durationMs,
},
});
sendEvent('code_result', {
executionId: execution.id,
reportBlocks: execResult.reportBlocks,
code: currentCode,
durationMs,
});
// 流式总结结果
const hint = [
`[系统指令] R 代码执行完成(${durationMs}ms生成 ${(execResult.reportBlocks || []).length} 个报告模块。`,
'请简要解释结果的统计学意义。',
'【禁止】不要编造数值(工作区已展示完整结果)。',
].join('\n');
const msgs = await conversationService.buildContext(sessionId, conversationId, 'analyze', hint);
const sr = await conversationService.streamToSSE(msgs, writer, { temperature: 0.5, maxTokens: 800 });
await conversationService.finalizeAssistantMessage(placeholderMessageId, sr.content, sr.thinking, sr.tokens);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
lastError = execResult.error || '执行失败';
const rawConsole = execResult.consoleOutput;
const consoleArr = Array.isArray(rawConsole) ? rawConsole : (rawConsole ? [String(rawConsole)] : []);
const consoleSnippet = consoleArr.slice(-20).join('\n');
if (attempt < codeRunnerService.maxRetries) {
const errorDetail = consoleSnippet
? `${lastError}\n\n--- R console output (last 20 lines) ---\n${consoleSnippet}`
: lastError;
sendEvent('code_error', {
executionId: execution.id,
message: lastError,
consoleOutput: consoleSnippet || undefined,
willRetry: true,
retryCount: attempt + 1,
});
sendEvent('code_retry', {
executionId: execution.id,
retryCount: attempt + 1,
message: `${attempt + 1} 次执行失败Agent 正在重新生成代码...`,
previousError: lastError,
});
const retry = await agentCoderService.generateCodeStream(
sessionId,
plan,
(accumulated) => {
sendEvent('code_generating', {
executionId: execution.id,
partialCode: accumulated,
});
},
errorDetail,
currentCode,
);
currentCode = retry.code;
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: { generatedCode: currentCode, retryCount: attempt + 1 },
});
sendEvent('code_generated', { executionId: execution.id, code: currentCode });
}
}
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: { status: 'error', errorMessage: lastError, retryCount: codeRunnerService.maxRetries },
});
sendEvent('code_error', {
executionId: execution.id,
message: `经过 ${codeRunnerService.maxRetries + 1} 次尝试仍然失败: ${lastError}`,
willRetry: false,
});
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
// ── Agent 取消 ──
private async agentCancel(
execution: any,
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
await (prisma as any).ssaAgentExecution.update({
where: { id: execution.id },
data: { status: 'error', errorMessage: '用户取消' },
});
const msgs = await conversationService.buildContext(
sessionId, conversationId, 'analyze', '[系统指令] 用户取消了当前分析流程。请简短回复确认取消。',
);
const sr = await conversationService.streamToSSE(msgs, writer, { temperature: 0.5, maxTokens: 200 });
await conversationService.finalizeAssistantMessage(placeholderMessageId, sr.content, sr.thinking, sr.tokens);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
// ── Agent 自由对话 ──
private async handleAgentChat(
sessionId: string,
conversationId: string,
writer: StreamWriter,
placeholderMessageId: string,
blackboard: any,
): Promise<HandleResult> {
let toolOutputs = '';
if (blackboard) {
const truncated = tokenTruncationService.truncate(blackboard, { maxTokens: 2000, strategy: 'balanced' });
const parts: string[] = [];
if (truncated.overview) parts.push(`数据概览:\n${truncated.overview}`);
if (truncated.variables) parts.push(`变量列表:\n${truncated.variables}`);
if (truncated.pico) parts.push(`PICO 推断:\n${truncated.pico}`);
if (parts.length > 0) toolOutputs = parts.join('\n\n');
}
const agentHint = [
'[当前模式: Agent 代码生成通道]',
'你是一位高级统计分析 Agent。你可以',
'1. 与临床研究者自由讨论统计分析需求',
'2. 帮助理解数据特征、研究设计、统计方法选择',
'3. 当用户明确分析需求后,自动制定计划并生成 R 代码执行',
'',
'注意:禁止编造或模拟任何分析结果和数值。',
blackboard?.dataOverview
? '用户已上传数据,你可以结合数据概览回答问题。当用户提出分析需求时,会自动触发分析流程。'
: '用户尚未上传数据。请引导用户先上传研究数据文件。',
].join('\n');
const fullToolOutputs = toolOutputs ? `${toolOutputs}\n\n${agentHint}` : agentHint;
const messages = await conversationService.buildContext(sessionId, conversationId, 'analyze', fullToolOutputs);
const result = await conversationService.streamToSSE(messages, writer, { temperature: 0.7, maxTokens: 2000 });
await conversationService.finalizeAssistantMessage(placeholderMessageId, result.content, result.thinking, result.tokens);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
}
/** 简单启发式:判断用户消息是否像分析请求 */
private looksLikeAnalysisRequest(content: string): boolean {
const kw = [
'分析', '比较', '检验', '回归', '相关', '差异', '统计',
'生存', '卡方', 'logistic', 't检验', 'anova',
'基线', '特征表', '描述性', '预测', '影响因素',
'帮我做', '帮我跑', '开始分析', '执行分析',
];
const lc = content.toLowerCase();
return kw.some(k => lc.includes(k));
}
// ────────────────────────────────────────────
// ask_user 响应处理Phase III
// ────────────────────────────────────────────
async handleAskUserResponse(
sessionId: string,
conversationId: string,
response: AskUserResponse,
writer: StreamWriter,
placeholderMessageId: string,
): Promise<HandleResult> {
// 先读取 pending 元数据(含 workflowId再清除
const pending = await askUserService.getPending(sessionId);
const pendingMeta = pending?.metadata || {};
await askUserService.clearPending(sessionId);
if (response.action === 'skip') {
// 用户跳过 — 生成友好回复
const messages = await conversationService.buildContext(
sessionId, conversationId, 'chat',
'[系统提示] 用户跳过了上一个确认问题。请友好地回应,表示随时可以继续。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 500,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'chat', success: true };
}
// 用户选择了具体选项
const selectedValue = response.selectedValues?.[0];
if (selectedValue === 'confirm_plan') {
// Phase IV: 确认分析方案 → 前端打开工作区,用户手动点击执行
const workflowId = pendingMeta.workflowId || response.metadata?.workflowId || '';
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze',
[
`[系统指令——严格遵守] 用户已确认分析方案workflow: ${workflowId})。`,
'你只需回复一句简短的确认消息,例如:"好的,方案已确认。请在右侧工作区点击「开始执行分析」启动 R 引擎。"',
'【铁律】禁止在此回复中生成任何分析结果、表格、P值、统计量、数值。',
'你不是计算引擎,所有数值结果将由 R 统计引擎独立计算后返回。',
'你的回复不得超过 2 句话。',
].join('\n'),
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.1, maxTokens: 150,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
// 推送 plan_confirmed 事件,前端据此触发 executeWorkflow
const confirmEvent = `data: ${JSON.stringify({
type: 'plan_confirmed',
workflowId,
})}\n\n`;
writer.write(confirmEvent);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
} else if (selectedValue === 'confirm') {
// Phase III: 确认使用推荐方法 → 提示可以开始分析
const messages = await conversationService.buildContext(
sessionId, conversationId, 'analyze',
'[系统指令] 用户已确认使用推荐的统计方法。请简要确认方案,告知用户可以在对话中说"开始分析"或在右侧面板触发执行。禁止生成任何数值或假设的分析结果。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.3,
maxTokens: 500,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'analyze', success: true };
} else if (selectedValue === 'use_fallback') {
// 使用备选方案
const messages = await conversationService.buildContext(
sessionId, conversationId, 'consult',
'[系统提示] 用户选择使用备选方案。请确认切换,并简要说明备选方案的适用场景。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.5,
maxTokens: 800,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'consult', success: true };
} else if (selectedValue === 'change_method') {
// 用户想换方法 → 引导重新描述
const messages = await conversationService.buildContext(
sessionId, conversationId, 'consult',
'[系统提示] 用户不满意当前推荐,想换方法。请询问用户希望使用什么方法,或引导其更详细地描述分析需求。',
);
const result = await conversationService.streamToSSE(messages, writer, {
temperature: 0.7,
maxTokens: 800,
});
await conversationService.finalizeAssistantMessage(
placeholderMessageId, result.content, result.thinking, result.tokens,
);
return { messageId: placeholderMessageId, intent: 'consult', success: true };
}
// 其他情况fallback
return await this.handleChat(sessionId, conversationId, writer, placeholderMessageId, 'chat');
}
}
export const chatHandlerService = new ChatHandlerService();