feat(aia): Protocol Agent streaming + editable state panel + protocol generation plan

Day 2 Development (2026-01-24):

Backend Enhancements:
- Implement SSE streaming in ProtocolAgentController using createStreamingService
- Add data condensation via LLM in ProtocolOrchestrator.handleProtocolSync
- Support stage editing without resetting progress
- Add explicit JSON output format for each stage in system prompt
- Create independent seed script for Protocol Agent (seed-protocol-agent.ts)

Frontend Improvements:
- Integrate useAIStream hook for typewriter effect in ChatArea
- Add MarkdownContent component for basic Markdown rendering
- Implement StageEditModal for editing stage data (scientific question, PICO, etc.)
- Add edit button to StageCard (visible on hover)
- Fix routing paths from /aia to /ai-qa
- Enhance CSS with full-screen layout and Markdown styles

New Documentation:
- One-click protocol generation development plan (v1.1)
- Editor selection evaluation (Novel vs BlockNote vs Tiptap)
- Novel fork strategy for AI-native editing

Technical Decisions:
- Choose Novel (Fork) as protocol editor for AI-first design
- Two-stage progressive generation: summary in chat, full protocol in editor
- 10-day development plan for protocol generation feature

Code Stats:
- Backend: 3 files modified, 1 new file
- Frontend: 9 files modified, 2 new files
- Docs: 3 new files

Status: Streaming and editable features working, protocol generation pending
This commit is contained in:
2026-01-24 23:06:33 +08:00
parent 596f2dfc02
commit 4d7d97ca19
18 changed files with 2708 additions and 192 deletions

View File

@@ -10,6 +10,9 @@ import { PrismaClient } from '@prisma/client';
import { ProtocolOrchestrator } from '../services/ProtocolOrchestrator.js';
import { LLMServiceInterface } from '../../services/BaseAgentOrchestrator.js';
import { ProtocolStageCode } from '../../types/index.js';
import { createStreamingService } from '../../../../common/streaming/index.js';
import type { OpenAIMessage } from '../../../../common/streaming/index.js';
import { logger } from '../../../../common/logging/index.js';
// 请求类型定义
interface SendMessageBody {
@@ -38,46 +41,95 @@ interface GetContextParams {
export class ProtocolAgentController {
private orchestrator: ProtocolOrchestrator;
private prisma: PrismaClient;
constructor(prisma: PrismaClient, llmService: LLMServiceInterface) {
this.prisma = prisma;
this.orchestrator = new ProtocolOrchestrator({ prisma, llmService });
}
/**
* 发送消息
* 发送消息(流式输出)
* POST /api/aia/protocol-agent/message
*
* 使用通用 StreamingService 实现打字机效果
*/
async sendMessage(
request: FastifyRequest<{ Body: SendMessageBody }>,
reply: FastifyReply
): Promise<void> {
const { conversationId, content } = request.body;
const userId = (request as any).user?.userId;
if (!userId) {
reply.code(401).send({ error: 'Unauthorized' });
return;
}
if (!conversationId || !content) {
reply.code(400).send({ error: 'Missing required fields: conversationId, content' });
return;
}
try {
const { conversationId, content, messageId } = request.body;
const userId = (request as any).user?.userId;
// 1. 确保上下文存在
const contextService = this.orchestrator.getContextService();
const context = await contextService.getOrCreateContext(conversationId, userId);
// 2. 构建包含上下文的消息
const messages = await this.buildMessagesWithContext(conversationId, content, context);
// 3. 保存用户消息到数据库
await this.prisma.message.create({
data: {
conversationId,
role: 'user',
content,
},
});
if (!userId) {
reply.code(401).send({ error: 'Unauthorized' });
return;
}
if (!conversationId || !content) {
reply.code(400).send({ error: 'Missing required fields: conversationId, content' });
return;
}
const response = await this.orchestrator.handleMessage({
conversationId,
// 4. 使用通用 StreamingService 流式输出
const streamingService = createStreamingService(reply, {
model: 'deepseek-v3',
temperature: 0.7,
maxTokens: 4096,
enableDeepThinking: false,
userId,
content,
messageId,
conversationId,
});
reply.send({
success: true,
data: response,
await streamingService.streamGenerate(messages, {
onComplete: async (fullContent, thinkingContent) => {
// 5. 保存 AI 回复到数据库
await this.prisma.message.create({
data: {
conversationId,
role: 'assistant',
content: fullContent,
thinkingContent: thinkingContent || null,
model: 'deepseek-v3',
},
});
// 6. 更新对话时间
await this.prisma.conversation.update({
where: { id: conversationId },
data: { updatedAt: new Date() },
});
logger.info('[ProtocolAgent] 消息发送完成', {
conversationId,
stage: context.currentStage,
hasThinking: !!thinkingContent,
});
},
onError: (error) => {
logger.error('[ProtocolAgent] 流式生成失败', { error, conversationId });
},
});
} catch (error) {
console.error('[ProtocolAgentController] sendMessage error:', error);
logger.error('[ProtocolAgentController] sendMessage error:', error);
reply.code(500).send({
success: false,
error: error instanceof Error ? error.message : 'Internal server error',
@@ -85,6 +137,165 @@ export class ProtocolAgentController {
}
}
/**
* 构建包含上下文的消息列表
*/
private async buildMessagesWithContext(
conversationId: string,
userContent: string,
context: any
): Promise<OpenAIMessage[]> {
// 获取历史消息
const historyMessages = await this.prisma.message.findMany({
where: { conversationId },
orderBy: { createdAt: 'asc' },
take: 20,
});
// 构建系统 Prompt包含当前阶段和已完成的数据
const systemPrompt = this.buildSystemPrompt(context);
const messages: OpenAIMessage[] = [
{ role: 'system', content: systemPrompt },
];
// 添加历史消息
for (const msg of historyMessages) {
messages.push({
role: msg.role as 'user' | 'assistant',
content: msg.content,
});
}
// 添加当前用户消息
messages.push({ role: 'user', content: userContent });
return messages;
}
/**
* 构建系统 Prompt包含上下文数据
*/
private buildSystemPrompt(context: any): string {
const stageNames: Record<string, string> = {
scientific_question: '科学问题梳理',
pico: 'PICO要素',
study_design: '研究设计',
sample_size: '样本量计算',
endpoints: '观察指标',
};
const currentStageName = stageNames[context.currentStage] || context.currentStage;
// 构建已完成阶段的数据摘要(从 ProtocolContextData 各字段读取)
let completedDataSummary = '';
const completedStages = context.completedStages || [];
if (completedStages.includes('scientific_question') && context.scientificQuestion) {
completedDataSummary += `\n\n### 已确认的科学问题\n${JSON.stringify(context.scientificQuestion, null, 2)}`;
}
if (completedStages.includes('pico') && context.pico) {
completedDataSummary += `\n\n### 已确认的PICO要素\n${JSON.stringify(context.pico, null, 2)}`;
}
if (completedStages.includes('study_design') && context.studyDesign) {
completedDataSummary += `\n\n### 已确认的研究设计\n${JSON.stringify(context.studyDesign, null, 2)}`;
}
if (completedStages.includes('sample_size') && context.sampleSize) {
completedDataSummary += `\n\n### 已确认的样本量\n${JSON.stringify(context.sampleSize, null, 2)}`;
}
if (completedStages.includes('endpoints') && context.endpoints) {
completedDataSummary += `\n\n### 已确认的观察指标\n${JSON.stringify(context.endpoints, null, 2)}`;
}
// 计算进度
const progress = Math.round((completedStages.length / 5) * 100);
// 获取当前阶段需要输出的字段格式
const stageOutputFormat = this.getStageOutputFormat(context.currentStage);
return `你是一位资深的临床研究方法学专家,正在帮助医生设计临床研究方案。
## 当前状态
- **当前阶段**: ${currentStageName}
- **已完成阶段**: ${completedStages.map((s: string) => stageNames[s]).join(', ') || '无'}
- **进度**: ${progress}%
## 已收集的数据${completedDataSummary || '\n暂无已确认的数据'}
## 你的任务
1. **只围绕「${currentStageName}」阶段与用户对话**,不要跨阶段讨论
2. 引导用户提供当前阶段所需的完整信息
3. 当信息收集完整时,先用文字总结,然后**必须**在回复末尾输出结构化数据
## 当前阶段「${currentStageName}」的输出格式
当信息完整时,**必须**在回复末尾添加以下格式的数据提取标签:
${stageOutputFormat}
## 重要提示
- 只有当用户提供了足够的信息后才输出 <extracted_data> 标签
- 输出的 JSON 必须是有效格式
- 每次对话只关注当前阶段「${currentStageName}
- 回复使用 Markdown 格式,简洁专业`;
}
/**
* 获取不同阶段的输出格式说明
*/
private getStageOutputFormat(stageCode: string): string {
const formats: Record<string, string> = {
scientific_question: `<extracted_data>
{
"content": "一句话科学问题不超过50字"
}
</extracted_data>`,
pico: `<extracted_data>
{
"population": "研究人群不超过20字",
"intervention": "干预措施不超过20字",
"comparison": "对照组不超过20字",
"outcome": "结局指标不超过20字"
}
</extracted_data>`,
study_design: `<extracted_data>
{
"studyType": "研究类型,如:回顾性队列研究、前瞻性队列研究、随机对照试验等",
"design": ["设计特征1", "设计特征2"]
}
</extracted_data>`,
sample_size: `<extracted_data>
{
"sampleSize": 样本量数字,
"calculation": {
"alpha": 显著性水平如0.05,
"power": 检验效能如0.8,
"effectSize": "效应量描述"
}
}
</extracted_data>`,
endpoints: `<extracted_data>
{
"outcomes": {
"primary": ["主要结局指标1", "主要结局指标2"],
"secondary": ["次要结局指标1"],
"safety": ["安全性指标"]
},
"confounders": ["混杂因素1", "混杂因素2"]
}
</extracted_data>`,
};
return formats[stageCode] || `<extracted_data>
{
"key": "value"
}
</extracted_data>`;
}
/**
* 同步阶段数据
* POST /api/aia/protocol-agent/sync

View File

@@ -127,39 +127,63 @@ export class ProtocolOrchestrator extends BaseAgentOrchestrator {
/**
* 处理Protocol同步请求
* 支持两种场景1. 首次同步从对话中提取2. 编辑更新(用户手动修改)
*/
async handleProtocolSync(
conversationId: string,
userId: string,
stageCode: string,
data: Record<string, unknown>
data: Record<string, unknown>,
isEdit: boolean = false // 是否是编辑更新
): Promise<{
success: boolean;
context: ProtocolContextData;
nextStage?: ProtocolStageCode;
message?: string;
condensedData?: Record<string, unknown>;
}> {
const stage = stageCode as ProtocolStageCode;
// 获取当前上下文
const existingContext = await this.contextService.getContext(conversationId);
const isAlreadyCompleted = existingContext?.completedStages.includes(stage);
let finalData: Record<string, unknown>;
if (isEdit || isAlreadyCompleted) {
// 编辑模式:直接使用用户提供的数据,不再凝练
finalData = data;
} else {
// 首次同步:使用 LLM 凝练数据
finalData = await this.condenseStageData(stage, data);
}
// 保存阶段数据
await this.contextService.updateStageData(conversationId, stage, {
...data,
...finalData,
confirmed: true,
confirmedAt: new Date(),
});
// 获取下一阶段
const currentIndex = STAGE_ORDER.indexOf(stage);
const nextStage = currentIndex < STAGE_ORDER.length - 1
? STAGE_ORDER[currentIndex + 1]
: undefined;
let context: ProtocolContextData;
let nextStage: ProtocolStageCode | undefined;
// 标记当前阶段完成,更新到下一阶段
const context = await this.contextService.completeStage(
conversationId,
stage,
nextStage
);
if (isAlreadyCompleted) {
// 已完成的阶段:只更新数据,不改变当前阶段
context = await this.contextService.getContext(conversationId) as ProtocolContextData;
} else {
// 首次完成:标记完成并进入下一阶段
const currentIndex = STAGE_ORDER.indexOf(stage);
nextStage = currentIndex < STAGE_ORDER.length - 1
? STAGE_ORDER[currentIndex + 1]
: undefined;
context = await this.contextService.completeStage(
conversationId,
stage,
nextStage
);
}
// 检查是否所有阶段都已完成
const allCompleted = this.contextService.isAllStagesCompleted(context);
@@ -168,14 +192,104 @@ export class ProtocolOrchestrator extends BaseAgentOrchestrator {
success: true,
context,
nextStage,
message: allCompleted
? '🎉 所有核心要素已完成!您可以点击「一键生成研究方案」生成完整方案。'
: nextStage
? `已同步${STAGE_NAMES[stage]},进入${STAGE_NAMES[nextStage]}阶段`
: `已同步${STAGE_NAMES[stage]}`,
condensedData: finalData,
message: isAlreadyCompleted
? `✅ 已更新「${STAGE_NAMES[stage]}`
: allCompleted
? '🎉 所有核心要素已完成!您可以点击「一键生成研究方案」生成完整方案。'
: nextStage
? `已同步${STAGE_NAMES[stage]},进入${STAGE_NAMES[nextStage]}阶段`
: `已同步${STAGE_NAMES[stage]}`,
};
}
/**
* 使用 LLM 凝练阶段数据
*/
private async condenseStageData(
stageCode: ProtocolStageCode,
data: Record<string, unknown>
): Promise<Record<string, unknown>> {
// 构建凝练 Prompt
const condensePrompts: Record<ProtocolStageCode, string> = {
scientific_question: `请将以下科学问题内容凝练成一句话不超过50字保留核心要点
原始内容:
${JSON.stringify(data, null, 2)}
要求:
- 输出格式:{ "content": "一句话科学问题" }
- 只输出 JSON不要其他内容`,
pico: `请将以下 PICO 要素凝练成简短描述:
原始内容:
${JSON.stringify(data, null, 2)}
要求:
- 每个要素不超过20字
- 输出格式:{ "population": "...", "intervention": "...", "comparison": "...", "outcome": "..." }
- 只输出 JSON不要其他内容`,
study_design: `请将以下研究设计凝练成关键标签:
原始内容:
${JSON.stringify(data, null, 2)}
要求:
- 输出格式:{ "studyType": "研究类型", "design": ["特征1", "特征2"] }
- 只输出 JSON不要其他内容`,
sample_size: `请提取样本量关键数据:
原始内容:
${JSON.stringify(data, null, 2)}
要求:
- 输出格式:{ "sampleSize": 数字, "calculation": { "alpha": 数字, "power": 数字 } }
- 只输出 JSON不要其他内容`,
endpoints: `请将以下观察指标凝练成简短列表:
原始内容:
${JSON.stringify(data, null, 2)}
要求:
- 每个指标不超过10字
- 输出格式:{ "baseline": {...}, "exposure": {...}, "outcomes": {...}, "confounders": [...] }
- 只输出 JSON不要其他内容`,
};
try {
const prompt = condensePrompts[stageCode];
if (!prompt) {
return data;
}
const response = await this.llmService.chat({
messages: [
{ role: 'system', content: '你是一位专业的临床研究方法学专家,擅长提炼和凝练研究要素。请严格按照要求的 JSON 格式输出。' },
{ role: 'user', content: prompt },
],
temperature: 0.3,
maxTokens: 500,
});
// 解析 LLM 返回的 JSON
const jsonMatch = response.content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
const condensed = JSON.parse(jsonMatch[0]);
return condensed;
}
return data;
} catch (error) {
console.error('[ProtocolOrchestrator] condenseStageData error:', error);
// 凝练失败时返回原始数据
return data;
}
}
/**
* 获取Protocol上下文服务
*/

View File

@@ -24,6 +24,7 @@ import type { Agent, AgentStage } from '../types/index.js';
* 用于从 PromptService 获取对应的提示词
*/
const AGENT_TO_PROMPT_CODE: Record<string, string> = {
'PROTOCOL_AGENT': 'PROTOCOL_SYSTEM', // Protocol Agent使用自己的Prompt系统
'TOPIC_01': 'AIA_SCIENTIFIC_QUESTION',
'TOPIC_02': 'AIA_PICO_ANALYSIS',
'TOPIC_03': 'AIA_TOPIC_EVALUATION',
@@ -39,9 +40,22 @@ const AGENT_TO_PROMPT_CODE: Record<string, string> = {
// ==================== 智能体配置 ====================
/**
* 12个智能体配置(与前端保持一致)
* 13个智能体配置(与前端保持一致)
* 包含12个传统智能体 + 1个Protocol Agent
*/
const AGENTS: Agent[] = [
// Protocol Agent: 全流程研究方案制定
{
id: 'PROTOCOL_AGENT',
name: '全流程研究方案制定',
description: '一站式完成研究方案核心要素科学问题→PICO→研究设计→样本量→观察指标支持一键生成完整方案。',
icon: '🚀',
stage: 'protocol',
color: '#6366F1',
systemPrompt: `你是研究方案制定助手,将引导用户系统地完成临床研究方案的核心要素设计。`,
welcomeMessage: '您好!我是研究方案制定助手。让我们开始制定您的研究方案吧!',
},
// Phase 1: 选题优化智能体
{
id: 'TOPIC_01',