feat(asl): Complete Deep Research V2.0 core development
Backend: - Add SSE streaming client (unifuncsSseClient) replacing async polling - Add paragraph-based reasoning parser with mergeConsecutiveThinking - Add requirement expansion service (DeepSeek-V3 PICOS+MeSH) - Add Word export service with Pandoc, inline hyperlinks, reference link expansion - Add deep research V2 worker with 2s log flush and Chinese source prompt - Add 5 curated data sources config (PubMed/ClinicalTrials/Cochrane/CNKI/MedJournals) - Add 4 API endpoints (generate-requirement/tasks/task-status/export-word) - Update Prisma schema with 6 new V2.0 fields on AslResearchTask - Add DB migration for V2.0 fields - Simplify ASL_DEEP_RESEARCH_EXPANSION prompt (remove strategy section) Frontend: - Add waterfall-flow DeepResearchPage (phase 0-4 progressive reveal) - Add LandingView, SetupPanel, StrategyConfirm, AgentTerminal, ResultsView - Add react-markdown + remark-gfm for report rendering - Add custom link component showing visible URLs after references - Add useDeepResearchTask polling hook - Add deep research TypeScript types Tests: - Add E2E test, smoke test, and Chinese data source test scripts Docs: - Update ASL module status (v2.0 - core features complete) - Update system status (v6.1 - ASL V2.0 milestone) - Update Unifuncs DeepSearch API guide (v2.0 - SSE mode + Chinese source results) - Update module auth specification (test script guidelines) - Update V2.0 development plan Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
@@ -0,0 +1,10 @@
|
||||
-- Deep Research V2.0: Add 6 new fields to research_tasks
|
||||
-- Backward compatible: all new columns are nullable
|
||||
|
||||
ALTER TABLE "asl_schema"."research_tasks"
|
||||
ADD COLUMN IF NOT EXISTS "target_sources" JSONB,
|
||||
ADD COLUMN IF NOT EXISTS "confirmed_requirement" TEXT,
|
||||
ADD COLUMN IF NOT EXISTS "ai_intent_summary" JSONB,
|
||||
ADD COLUMN IF NOT EXISTS "execution_logs" JSONB,
|
||||
ADD COLUMN IF NOT EXISTS "synthesis_report" TEXT,
|
||||
ADD COLUMN IF NOT EXISTS "result_list" JSONB;
|
||||
@@ -1,100 +0,0 @@
|
||||
-- =====================================================
|
||||
-- Phase 2A: SSA 智能化核心 - 数据库迁移脚本
|
||||
-- 日期: 2026-02-20
|
||||
-- 描述: 添加工作流表和数据画像字段
|
||||
-- 注意: ssa_sessions.id 是 TEXT 类型(存储 UUID 字符串)
|
||||
-- =====================================================
|
||||
|
||||
-- 1. 给 ssa_sessions 表添加 data_profile 字段(如果不存在)
|
||||
ALTER TABLE ssa_schema.ssa_sessions
|
||||
ADD COLUMN IF NOT EXISTS data_profile JSONB;
|
||||
|
||||
COMMENT ON COLUMN ssa_schema.ssa_sessions.data_profile IS 'Python Tool C 生成的数据画像 (Phase 2A)';
|
||||
|
||||
-- 2. 创建 ssa_workflows 表(多步骤分析流程)
|
||||
CREATE TABLE IF NOT EXISTS ssa_schema.ssa_workflows (
|
||||
id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::TEXT,
|
||||
session_id TEXT NOT NULL,
|
||||
message_id TEXT,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'pending',
|
||||
total_steps INTEGER NOT NULL,
|
||||
completed_steps INTEGER NOT NULL DEFAULT 0,
|
||||
workflow_plan JSONB NOT NULL,
|
||||
reasoning TEXT,
|
||||
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT NOW(),
|
||||
started_at TIMESTAMP WITHOUT TIME ZONE,
|
||||
completed_at TIMESTAMP WITHOUT TIME ZONE,
|
||||
|
||||
CONSTRAINT fk_ssa_workflow_session
|
||||
FOREIGN KEY (session_id)
|
||||
REFERENCES ssa_schema.ssa_sessions(id)
|
||||
ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- ssa_workflows 索引
|
||||
CREATE INDEX IF NOT EXISTS idx_ssa_workflow_session
|
||||
ON ssa_schema.ssa_workflows(session_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_ssa_workflow_status
|
||||
ON ssa_schema.ssa_workflows(status);
|
||||
|
||||
-- ssa_workflows 字段注释
|
||||
COMMENT ON TABLE ssa_schema.ssa_workflows IS 'SSA 多步骤分析工作流 (Phase 2A)';
|
||||
COMMENT ON COLUMN ssa_schema.ssa_workflows.status IS 'pending | running | completed | partial | error';
|
||||
COMMENT ON COLUMN ssa_schema.ssa_workflows.workflow_plan IS 'LLM 生成的原始工作流计划 JSON';
|
||||
COMMENT ON COLUMN ssa_schema.ssa_workflows.reasoning IS 'LLM 规划理由说明';
|
||||
|
||||
-- 3. 创建 ssa_workflow_steps 表(流程中的每个步骤)
|
||||
CREATE TABLE IF NOT EXISTS ssa_schema.ssa_workflow_steps (
|
||||
id TEXT PRIMARY KEY DEFAULT gen_random_uuid()::TEXT,
|
||||
workflow_id TEXT NOT NULL,
|
||||
step_order INTEGER NOT NULL,
|
||||
tool_code VARCHAR(50) NOT NULL,
|
||||
tool_name VARCHAR(100) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'pending',
|
||||
input_params JSONB,
|
||||
guardrail_checks JSONB,
|
||||
output_result JSONB,
|
||||
error_info JSONB,
|
||||
execution_ms INTEGER,
|
||||
started_at TIMESTAMP WITHOUT TIME ZONE,
|
||||
completed_at TIMESTAMP WITHOUT TIME ZONE,
|
||||
|
||||
CONSTRAINT fk_ssa_workflow_step_workflow
|
||||
FOREIGN KEY (workflow_id)
|
||||
REFERENCES ssa_schema.ssa_workflows(id)
|
||||
ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- ssa_workflow_steps 索引
|
||||
CREATE INDEX IF NOT EXISTS idx_ssa_workflow_step_workflow
|
||||
ON ssa_schema.ssa_workflow_steps(workflow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_ssa_workflow_step_status
|
||||
ON ssa_schema.ssa_workflow_steps(status);
|
||||
|
||||
-- ssa_workflow_steps 字段注释
|
||||
COMMENT ON TABLE ssa_schema.ssa_workflow_steps IS 'SSA 工作流单步执行记录 (Phase 2A)';
|
||||
COMMENT ON COLUMN ssa_schema.ssa_workflow_steps.status IS 'pending | running | success | warning | error | skipped';
|
||||
COMMENT ON COLUMN ssa_schema.ssa_workflow_steps.guardrail_checks IS 'R Service JIT 护栏检验结果 (正态性、方差齐性等)';
|
||||
COMMENT ON COLUMN ssa_schema.ssa_workflow_steps.output_result IS '工具执行结果 (已裁剪,符合 LLM 上下文限制)';
|
||||
COMMENT ON COLUMN ssa_schema.ssa_workflow_steps.error_info IS '错误信息 (用于容错管道的部分成功场景)';
|
||||
|
||||
-- =====================================================
|
||||
-- 验证脚本
|
||||
-- =====================================================
|
||||
SELECT 'ssa_sessions.data_profile 字段' as item,
|
||||
CASE WHEN EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_schema = 'ssa_schema' AND table_name = 'ssa_sessions' AND column_name = 'data_profile'
|
||||
) THEN '✅ 已创建' ELSE '❌ 未创建' END as status;
|
||||
|
||||
SELECT 'ssa_workflows 表' as item,
|
||||
CASE WHEN EXISTS (
|
||||
SELECT 1 FROM information_schema.tables
|
||||
WHERE table_schema = 'ssa_schema' AND table_name = 'ssa_workflows'
|
||||
) THEN '✅ 已创建' ELSE '❌ 未创建' END as status;
|
||||
|
||||
SELECT 'ssa_workflow_steps 表' as item,
|
||||
CASE WHEN EXISTS (
|
||||
SELECT 1 FROM information_schema.tables
|
||||
WHERE table_schema = 'ssa_schema' AND table_name = 'ssa_workflow_steps'
|
||||
) THEN '✅ 已创建' ELSE '❌ 未创建' END as status;
|
||||
@@ -477,7 +477,7 @@ model AslFulltextScreeningTask {
|
||||
@@schema("asl_schema")
|
||||
}
|
||||
|
||||
/// 智能文献检索任务(DeepSearch)
|
||||
/// 智能文献检索任务(DeepSearch V1.x + V2.0)
|
||||
model AslResearchTask {
|
||||
id String @id @default(uuid())
|
||||
|
||||
@@ -486,23 +486,23 @@ model AslResearchTask {
|
||||
userId String @map("user_id")
|
||||
|
||||
// 检索输入
|
||||
query String // 用户的自然语言查询
|
||||
filters Json? // 🔜 后续:高级筛选 { yearFrom, yearTo, articleTypes }
|
||||
query String // 用户的自然语言查询(V1.x 原始输入 / V2.0 Step 1 粗略想法)
|
||||
filters Json? // 高级筛选 { yearRange, targetCount, requireOpenAccess }
|
||||
|
||||
// unifuncs 任务
|
||||
externalTaskId String? @map("external_task_id")
|
||||
|
||||
// 状态
|
||||
status String @default("pending") // pending/processing/completed/failed
|
||||
// 状态: draft → pending → running → completed / failed
|
||||
status String @default("pending")
|
||||
errorMessage String? @map("error_message")
|
||||
|
||||
// 结果
|
||||
// V1.x 结果字段(保留向后兼容)
|
||||
resultCount Int? @map("result_count")
|
||||
rawResult String? @map("raw_result") @db.Text
|
||||
reasoningContent String? @map("reasoning_content") @db.Text // AI思考过程
|
||||
literatures Json? // 解析后的文献列表
|
||||
reasoningContent String? @map("reasoning_content") @db.Text
|
||||
literatures Json?
|
||||
|
||||
// 统计(🔜 后续展示)
|
||||
// 统计
|
||||
tokenUsage Json? @map("token_usage")
|
||||
searchCount Int? @map("search_count")
|
||||
readCount Int? @map("read_count")
|
||||
@@ -513,6 +513,15 @@ model AslResearchTask {
|
||||
updatedAt DateTime @updatedAt @map("updated_at")
|
||||
completedAt DateTime? @map("completed_at")
|
||||
|
||||
// ── V2.0 新增字段 ──────────────────────────────
|
||||
targetSources Json? @map("target_sources") // 选中的数据源 ["https://pubmed.ncbi.nlm.nih.gov/", ...]
|
||||
confirmedRequirement String? @map("confirmed_requirement") @db.Text // 用户核验后的自然语言检索指令书
|
||||
aiIntentSummary Json? @map("ai_intent_summary") // PICOS + MeSH 结构化摘要
|
||||
executionLogs Json? @map("execution_logs") // 终端日志数组 [{type, title, text, ts}]
|
||||
synthesisReport String? @map("synthesis_report") @db.Text // AI综合报告(Markdown)
|
||||
resultList Json? @map("result_list") // 结构化文献元数据列表
|
||||
|
||||
// ── 索引 ────────────────────────────
|
||||
@@index([projectId], map: "idx_research_tasks_project_id")
|
||||
@@index([userId], map: "idx_research_tasks_user_id")
|
||||
@@index([status], map: "idx_research_tasks_status")
|
||||
|
||||
@@ -55,7 +55,7 @@ const RVW_FALLBACKS: Record<string, FallbackPrompt> = {
|
||||
};
|
||||
|
||||
/**
|
||||
* ASL 模块兜底 Prompt(预留)
|
||||
* ASL 模块兜底 Prompt
|
||||
*/
|
||||
const ASL_FALLBACKS: Record<string, FallbackPrompt> = {
|
||||
ASL_SCREENING: {
|
||||
@@ -64,6 +64,49 @@ const ASL_FALLBACKS: Record<string, FallbackPrompt> = {
|
||||
请根据提供的标准对文献进行筛选,输出JSON格式的结果。`,
|
||||
modelConfig: { model: 'deepseek-v3', temperature: 0.2 },
|
||||
},
|
||||
|
||||
ASL_DEEP_RESEARCH_EXPANSION: {
|
||||
content: `你是一位经验丰富的医学信息官,擅长将研究者的模糊想法转化为精准的文献检索需求指令。
|
||||
|
||||
## 任务
|
||||
根据用户输入的粗略研究想法,生成一份简洁的深度文献检索指令书。
|
||||
|
||||
## 输出规则
|
||||
1. 自然语言风格:像写邮件一样,口语化但专业,方便研究者直接阅读和编辑
|
||||
2. PICOS 拆解:明确 Population / Intervention / Comparison / Outcome / Study Design
|
||||
3. MeSH 扩展:为关键术语补充 MeSH 同义词(用括号标注英文 MeSH 术语)
|
||||
4. 研究设计偏好:若用户未指定,默认优先 RCT、Systematic Review/Meta-Analysis、Cohort Study
|
||||
5. 不要输出"检索策略建议"章节
|
||||
6. 不要使用 Markdown 加粗标记(即不要用 ** 符号)
|
||||
7. 不得自行添加约束:不要擅自限定"仅开放获取"或"仅英文文献"
|
||||
|
||||
## 用户输入
|
||||
- 研究想法:{{originalQuery}}
|
||||
- 选择的数据源:{{targetSources}}
|
||||
- 时间范围:{{yearRange}}
|
||||
- 目标数量:{{targetCount}}
|
||||
|
||||
## 输出格式
|
||||
请严格按以下两部分输出,不要添加额外内容:
|
||||
|
||||
### Part 1: 自然语言检索指令书
|
||||
(简洁的检索需求描述,包含研究背景、PICOS要素、MeSH术语,不要包含检索策略建议)
|
||||
|
||||
### Part 2: 结构化摘要(JSON)
|
||||
\`\`\`json
|
||||
{
|
||||
"objective": "研究目标一句话描述",
|
||||
"population": "研究人群",
|
||||
"intervention": "干预措施(含英文MeSH)",
|
||||
"comparison": "对照组",
|
||||
"outcome": "主要结局指标",
|
||||
"studyDesign": ["RCT", "Meta-analysis"],
|
||||
"meshTerms": ["MeSH术语1", "MeSH术语2"],
|
||||
"condition": "疾病/状况"
|
||||
}
|
||||
\`\`\``,
|
||||
modelConfig: { model: 'deepseek-v3', temperature: 0.4 },
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,179 @@
|
||||
/**
|
||||
* 中文数据源专项测试
|
||||
*
|
||||
* 只搜索 CNKI + 中华医学期刊网,验证 Unifuncs API 能否检索中文文献。
|
||||
*
|
||||
* 运行: npx tsx src/modules/asl/__tests__/deep-research-chinese-sources.ts
|
||||
*/
|
||||
|
||||
const UNIFUNCS_BASE_URL = 'https://api.unifuncs.com/deepsearch/v1';
|
||||
const UNIFUNCS_API_KEY = process.env.UNIFUNCS_API_KEY || 'sk-2fNwqUH73elGq0aDKJEM4ReqP7Ry0iqHo4OXyidDe2WpQ9XQ';
|
||||
|
||||
const CHINESE_SOURCES = [
|
||||
'https://www.cnki.net/',
|
||||
'https://medjournals.cn/',
|
||||
];
|
||||
|
||||
const TEST_QUERIES = [
|
||||
'2型糖尿病患者SGLT2抑制剂的肾脏保护作用',
|
||||
'非小细胞肺癌免疫治疗的中国临床研究进展',
|
||||
];
|
||||
|
||||
async function runTest(query: string, domainScope: string[]) {
|
||||
console.log('\n' + '='.repeat(80));
|
||||
console.log(`查询: ${query}`);
|
||||
console.log(`数据源: ${domainScope.join(', ')}`);
|
||||
console.log('='.repeat(80));
|
||||
|
||||
// --- Test 1: SSE 流式模式 ---
|
||||
console.log('\n--- SSE 流式模式测试 ---');
|
||||
try {
|
||||
const payload = {
|
||||
model: 's2',
|
||||
messages: [{ role: 'user', content: query }],
|
||||
stream: true,
|
||||
introduction: '你是一名专业的中国临床研究文献检索专家,擅长从中国学术数据库中检索中文医学文献。请使用中文关键词进行检索。',
|
||||
max_depth: 10,
|
||||
domain_scope: domainScope,
|
||||
reference_style: 'link',
|
||||
output_prompt: '请使用中文输出。列出所有检索到的文献,包含标题、作者、期刊、年份和链接。如果文献是中文的,请保留中文信息。',
|
||||
};
|
||||
|
||||
const response = await fetch(`${UNIFUNCS_BASE_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${UNIFUNCS_API_KEY}`,
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'text/event-stream',
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
console.error(`❌ SSE 请求失败 HTTP ${response.status}: ${text}`);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`✅ SSE 连接成功 (HTTP ${response.status})`);
|
||||
|
||||
const reader = (response.body as any).getReader() as ReadableStreamDefaultReader<Uint8Array>;
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
let reasoningContent = '';
|
||||
let content = '';
|
||||
let chunkCount = 0;
|
||||
let reasoningChunks = 0;
|
||||
let contentChunks = 0;
|
||||
const startTime = Date.now();
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith(':')) continue;
|
||||
|
||||
if (trimmed.startsWith('data: ')) {
|
||||
const data = trimmed.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
console.log('\n✅ 流式传输完成 [DONE]');
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const delta = json.choices?.[0]?.delta;
|
||||
chunkCount++;
|
||||
|
||||
if (delta?.reasoning_content) {
|
||||
reasoningContent += delta.reasoning_content;
|
||||
reasoningChunks++;
|
||||
if (reasoningChunks % 50 === 0) {
|
||||
process.stdout.write(`\r reasoning chunks: ${reasoningChunks}, content chunks: ${contentChunks}`);
|
||||
}
|
||||
} else if (delta?.content) {
|
||||
content += delta.content;
|
||||
contentChunks++;
|
||||
if (contentChunks % 20 === 0) {
|
||||
process.stdout.write(`\r reasoning chunks: ${reasoningChunks}, content chunks: ${contentChunks}`);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Skip unparseable
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reader.releaseLock();
|
||||
const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
|
||||
|
||||
console.log(`\n\n📊 统计:`);
|
||||
console.log(` 耗时: ${elapsed}s`);
|
||||
console.log(` 总 chunks: ${chunkCount}`);
|
||||
console.log(` reasoning chunks: ${reasoningChunks} (${reasoningContent.length} chars)`);
|
||||
console.log(` content chunks: ${contentChunks} (${content.length} chars)`);
|
||||
|
||||
// 分析结果
|
||||
console.log(`\n📝 Reasoning 内容(前 500 字):`);
|
||||
console.log(reasoningContent.slice(0, 500));
|
||||
|
||||
console.log(`\n📝 Content 结果(前 1000 字):`);
|
||||
console.log(content.slice(0, 1000));
|
||||
|
||||
// 检查是否包含中文内容
|
||||
const chineseCharCount = (content.match(/[\u4e00-\u9fa5]/g) || []).length;
|
||||
const hasChineseLinks = content.includes('cnki.net') || content.includes('medjournals.cn');
|
||||
const hasPubMedLinks = content.includes('pubmed.ncbi.nlm.nih.gov');
|
||||
|
||||
console.log(`\n🔍 中文分析:`);
|
||||
console.log(` 中文字符数: ${chineseCharCount}`);
|
||||
console.log(` 包含 CNKI 链接: ${hasChineseLinks ? '✅ 是' : '❌ 否'}`);
|
||||
console.log(` 包含 PubMed 链接: ${hasPubMedLinks ? '⚠️ 是(非中文源)' : '✅ 否'}`);
|
||||
|
||||
// 统计所有 URL
|
||||
const urls = content.match(/https?:\/\/[^\s)]+/g) || [];
|
||||
console.log(` 结果中的链接 (共 ${urls.length} 个):`);
|
||||
const domainCounts: Record<string, number> = {};
|
||||
for (const url of urls) {
|
||||
try {
|
||||
const domain = new URL(url).hostname;
|
||||
domainCounts[domain] = (domainCounts[domain] || 0) + 1;
|
||||
} catch { /* skip */ }
|
||||
}
|
||||
for (const [domain, count] of Object.entries(domainCounts).sort((a, b) => b[1] - a[1])) {
|
||||
console.log(` ${domain}: ${count} 个`);
|
||||
}
|
||||
|
||||
} catch (err: any) {
|
||||
console.error(`❌ SSE 测试失败: ${err.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
console.log('🔬 中文数据源专项测试');
|
||||
console.log(`API Key: ${UNIFUNCS_API_KEY.slice(0, 10)}...`);
|
||||
console.log(`测试数据源: ${CHINESE_SOURCES.join(', ')}`);
|
||||
|
||||
for (const query of TEST_QUERIES) {
|
||||
await runTest(query, CHINESE_SOURCES);
|
||||
}
|
||||
|
||||
// 额外测试: 同时包含中英文数据源
|
||||
console.log('\n\n' + '🔬'.repeat(20));
|
||||
console.log('附加测试: 中英文混合数据源');
|
||||
await runTest(
|
||||
'他汀类药物在心血管疾病一级预防中的最新RCT证据',
|
||||
['https://pubmed.ncbi.nlm.nih.gov/', ...CHINESE_SOURCES]
|
||||
);
|
||||
|
||||
console.log('\n\n✅ 所有测试完成');
|
||||
}
|
||||
|
||||
main().catch(console.error);
|
||||
345
backend/src/modules/asl/__tests__/deep-research-v2-e2e.ts
Normal file
345
backend/src/modules/asl/__tests__/deep-research-v2-e2e.ts
Normal file
@@ -0,0 +1,345 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 端到端集成测试
|
||||
*
|
||||
* 测试全流程:
|
||||
* Step 1: 登录获取 Token
|
||||
* Step 2: 获取数据源列表
|
||||
* Step 3: 需求扩写(LLM 调用)
|
||||
* Step 4: 查看 draft 任务
|
||||
* Step 5: HITL 确认 → 启动执行
|
||||
* Step 6: 轮询任务直到 completed/failed
|
||||
* Step 7: 验证结果完整性
|
||||
* Step 8: Word 导出(可选,需要 Pandoc 微服务)
|
||||
* Step 9: 清理测试数据
|
||||
*
|
||||
* 运行方式:
|
||||
* npx tsx src/modules/asl/__tests__/deep-research-v2-e2e.ts
|
||||
*
|
||||
* 前置条件:
|
||||
* - 后端服务运行在 localhost:3001
|
||||
* - PostgreSQL 运行中
|
||||
* - UNIFUNCS_API_KEY 环境变量已设置
|
||||
* - (可选)Python 微服务运行在 localhost:8000(Word 导出)
|
||||
*
|
||||
* 预计耗时:2-5 分钟(取决于 Unifuncs 搜索深度)
|
||||
* 预计成本:约 ¥0.05-0.2(LLM 需求扩写 + Unifuncs DeepSearch)
|
||||
*/
|
||||
|
||||
// ─── 配置 ───────────────────────────────────────
|
||||
|
||||
const BASE_URL = process.env.TEST_BASE_URL || 'http://localhost:3001';
|
||||
const API_PREFIX = `${BASE_URL}/api/v1`;
|
||||
|
||||
const TEST_PHONE = process.env.TEST_PHONE || '13800000001';
|
||||
const TEST_PASSWORD = process.env.TEST_PASSWORD || '123456';
|
||||
|
||||
const TEST_QUERY = '他汀类药物在心血管疾病一级预防中的最新 RCT 证据';
|
||||
|
||||
const MAX_POLL_ATTEMPTS = 60;
|
||||
const POLL_INTERVAL_MS = 5000;
|
||||
|
||||
const SKIP_WORD_EXPORT = process.env.SKIP_WORD_EXPORT === 'true';
|
||||
const SKIP_CLEANUP = process.env.SKIP_CLEANUP === 'true';
|
||||
|
||||
// ─── 工具函数 ───────────────────────────────────
|
||||
|
||||
const sleep = (ms: number) => new Promise(r => setTimeout(r, ms));
|
||||
|
||||
let authToken = '';
|
||||
|
||||
async function api(
|
||||
path: string,
|
||||
options: RequestInit = {}
|
||||
): Promise<{ status: number; data: any }> {
|
||||
const url = `${API_PREFIX}${path}`;
|
||||
const res = await fetch(url, {
|
||||
...options,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(authToken ? { Authorization: `Bearer ${authToken}` } : {}),
|
||||
...(options.headers || {}),
|
||||
},
|
||||
});
|
||||
|
||||
const contentType = res.headers.get('content-type') || '';
|
||||
let data: any;
|
||||
|
||||
if (contentType.includes('application/json')) {
|
||||
data = await res.json();
|
||||
} else if (contentType.includes('application/vnd.openxmlformats')) {
|
||||
const buf = await res.arrayBuffer();
|
||||
data = { _binary: true, byteLength: buf.byteLength };
|
||||
} else {
|
||||
data = await res.text();
|
||||
}
|
||||
|
||||
return { status: res.status, data };
|
||||
}
|
||||
|
||||
function assert(condition: boolean, message: string) {
|
||||
if (!condition) {
|
||||
throw new Error(`❌ 断言失败: ${message}`);
|
||||
}
|
||||
console.log(` ✅ ${message}`);
|
||||
}
|
||||
|
||||
function printDivider(title: string) {
|
||||
console.log(`\n${'═'.repeat(70)}`);
|
||||
console.log(` ${title}`);
|
||||
console.log(`${'═'.repeat(70)}\n`);
|
||||
}
|
||||
|
||||
// ─── 测试流程 ───────────────────────────────────
|
||||
|
||||
async function runE2E() {
|
||||
console.log('🧪 Deep Research V2.0 端到端集成测试');
|
||||
console.log(`⏰ 时间: ${new Date().toLocaleString('zh-CN')}`);
|
||||
console.log(`📍 后端: ${BASE_URL}`);
|
||||
console.log(`📝 查询: ${TEST_QUERY}`);
|
||||
console.log('');
|
||||
|
||||
let taskId: string | null = null;
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
// ═══════════ Step 1: 登录 ═══════════
|
||||
printDivider('Step 1: 登录获取 Token');
|
||||
|
||||
const loginRes = await api('/auth/login/password', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ phone: TEST_PHONE, password: TEST_PASSWORD }),
|
||||
});
|
||||
|
||||
if (loginRes.status !== 200 || !loginRes.data.success) {
|
||||
console.log('⚠️ 登录返回:', JSON.stringify(loginRes.data, null, 2));
|
||||
throw new Error(
|
||||
`登录失败 (HTTP ${loginRes.status}): ${loginRes.data.error || loginRes.data.message || '未知错误'}\n` +
|
||||
`请确认测试账号存在:phone=${TEST_PHONE}, password=${TEST_PASSWORD}\n` +
|
||||
`或通过环境变量指定:TEST_PHONE=xxx TEST_PASSWORD=yyy`
|
||||
);
|
||||
}
|
||||
|
||||
authToken = loginRes.data.data.tokens?.accessToken || loginRes.data.data.accessToken || loginRes.data.data.token;
|
||||
assert(!!authToken, `获取到 Token (${authToken.slice(0, 20)}...)`);
|
||||
|
||||
// ═══════════ Step 2: 获取数据源列表 ═══════════
|
||||
printDivider('Step 2: 获取数据源列表');
|
||||
|
||||
const sourcesRes = await api('/asl/research/data-sources');
|
||||
assert(sourcesRes.status === 200, `HTTP 200 (实际: ${sourcesRes.status})`);
|
||||
assert(sourcesRes.data.success === true, 'success=true');
|
||||
|
||||
const sources = sourcesRes.data.data;
|
||||
assert(Array.isArray(sources), `返回数组 (${sources.length} 个数据源)`);
|
||||
assert(sources.length >= 3, `至少 3 个数据源`);
|
||||
|
||||
console.log('\n 数据源列表:');
|
||||
sources.forEach((s: any) => {
|
||||
console.log(` ${s.defaultChecked ? '☑' : '☐'} [${s.category}] ${s.label} → ${s.domainScope}`);
|
||||
});
|
||||
|
||||
const defaultIds = sources.filter((s: any) => s.defaultChecked).map((s: any) => s.domainScope);
|
||||
console.log(`\n 默认选中: ${defaultIds.join(', ')}`);
|
||||
|
||||
// ═══════════ Step 3: 需求扩写 ═══════════
|
||||
printDivider('Step 3: 需求扩写(LLM 调用)');
|
||||
console.log(' ⏳ 调用 LLM 生成检索指令书,请稍候...\n');
|
||||
|
||||
const expandStart = Date.now();
|
||||
const expandRes = await api('/asl/research/generate-requirement', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
originalQuery: TEST_QUERY,
|
||||
targetSources: defaultIds,
|
||||
filters: { yearRange: '近5年', targetCount: '约20篇' },
|
||||
}),
|
||||
});
|
||||
|
||||
const expandMs = Date.now() - expandStart;
|
||||
assert(expandRes.status === 200, `HTTP 200 (实际: ${expandRes.status})`);
|
||||
assert(expandRes.data.success === true, 'success=true');
|
||||
|
||||
const expandData = expandRes.data.data;
|
||||
taskId = expandData.taskId;
|
||||
assert(!!taskId, `创建 draft 任务: ${taskId}`);
|
||||
assert(!!expandData.generatedRequirement, `生成检索指令书 (${expandData.generatedRequirement.length} 字)`);
|
||||
assert(!!expandData.intentSummary, 'PICOS 结构化摘要已生成');
|
||||
|
||||
console.log(`\n ⏱️ 耗时: ${(expandMs / 1000).toFixed(1)}s`);
|
||||
console.log(`\n 📋 PICOS 摘要:`);
|
||||
const summary = expandData.intentSummary;
|
||||
console.log(` 目标: ${summary.objective || '—'}`);
|
||||
console.log(` P: ${summary.population || '—'}`);
|
||||
console.log(` I: ${summary.intervention || '—'}`);
|
||||
console.log(` C: ${summary.comparison || '—'}`);
|
||||
console.log(` O: ${summary.outcome || '—'}`);
|
||||
console.log(` 研究设计: ${(summary.studyDesign || []).join(', ')}`);
|
||||
console.log(` MeSH: ${(summary.meshTerms || []).join(', ')}`);
|
||||
|
||||
console.log(`\n 📝 指令书预览 (前 200 字):`);
|
||||
console.log(` ${expandData.generatedRequirement.slice(0, 200).replace(/\n/g, '\n ')}...`);
|
||||
|
||||
// ═══════════ Step 4: 查看 draft 任务 ═══════════
|
||||
printDivider('Step 4: 查看 draft 状态任务');
|
||||
|
||||
const draftRes = await api(`/asl/research/tasks/${taskId}`);
|
||||
assert(draftRes.status === 200, `HTTP 200`);
|
||||
assert(draftRes.data.data.status === 'draft', `状态为 draft`);
|
||||
assert(draftRes.data.data.query === TEST_QUERY, `query 匹配`);
|
||||
|
||||
// ═══════════ Step 5: HITL 确认 → 启动 ═══════════
|
||||
printDivider('Step 5: HITL 确认 → 启动执行');
|
||||
|
||||
const confirmedReq = expandData.generatedRequirement;
|
||||
|
||||
const executeRes = await api(`/asl/research/tasks/${taskId}/execute`, {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify({ confirmedRequirement: confirmedReq }),
|
||||
});
|
||||
|
||||
assert(executeRes.status === 200, `HTTP 200 (实际: ${executeRes.status})`);
|
||||
assert(executeRes.data.success === true, '任务已入队');
|
||||
console.log(' 🚀 Deep Research 任务已启动!');
|
||||
|
||||
// ═══════════ Step 6: 轮询 ═══════════
|
||||
printDivider('Step 6: 轮询任务进度');
|
||||
|
||||
let lastLogCount = 0;
|
||||
let finalStatus = '';
|
||||
|
||||
for (let i = 1; i <= MAX_POLL_ATTEMPTS; i++) {
|
||||
await sleep(POLL_INTERVAL_MS);
|
||||
|
||||
const pollRes = await api(`/asl/research/tasks/${taskId}`);
|
||||
const task = pollRes.data.data;
|
||||
|
||||
const logs = task.executionLogs || [];
|
||||
const newLogs = logs.slice(lastLogCount);
|
||||
|
||||
if (newLogs.length > 0) {
|
||||
newLogs.forEach((log: any) => {
|
||||
const icon: Record<string, string> = {
|
||||
thinking: '💭', searching: '🔍', reading: '📖',
|
||||
analyzing: '🧪', summary: '📊', info: 'ℹ️',
|
||||
};
|
||||
console.log(` ${icon[log.type] || '•'} [${log.title}] ${log.text.slice(0, 80)}`);
|
||||
});
|
||||
lastLogCount = logs.length;
|
||||
}
|
||||
|
||||
console.log(` [${i}/${MAX_POLL_ATTEMPTS}] status=${task.status} | logs=${logs.length} | elapsed=${((Date.now() - startTime) / 1000).toFixed(0)}s`);
|
||||
|
||||
if (task.status === 'completed' || task.status === 'failed') {
|
||||
finalStatus = task.status;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(finalStatus === 'completed', `任务完成 (实际: ${finalStatus || 'timeout'})`);
|
||||
|
||||
// ═══════════ Step 7: 验证结果 ═══════════
|
||||
printDivider('Step 7: 验证结果完整性');
|
||||
|
||||
const resultRes = await api(`/asl/research/tasks/${taskId}`);
|
||||
const result = resultRes.data.data;
|
||||
|
||||
assert(result.status === 'completed', '状态: completed');
|
||||
assert(!!result.synthesisReport, `AI 综合报告已生成 (${(result.synthesisReport || '').length} 字)`);
|
||||
assert(!!result.completedAt, `completedAt 已设置`);
|
||||
|
||||
const hasResultList = Array.isArray(result.resultList) && result.resultList.length > 0;
|
||||
if (hasResultList) {
|
||||
console.log(` ✅ 结构化文献列表: ${result.resultList.length} 篇`);
|
||||
console.log('\n 📚 文献样例 (前 3 篇):');
|
||||
result.resultList.slice(0, 3).forEach((item: any, i: number) => {
|
||||
console.log(` ${i + 1}. ${item.title || '(无标题)'}`);
|
||||
if (item.authors) console.log(` 作者: ${item.authors}`);
|
||||
if (item.journal) console.log(` 期刊: ${item.journal}`);
|
||||
if (item.pmid) console.log(` PMID: ${item.pmid}`);
|
||||
});
|
||||
} else {
|
||||
console.log(' ⚠️ 结构化文献列表为空(降级到报告展示模式)');
|
||||
}
|
||||
|
||||
const logs = result.executionLogs || [];
|
||||
console.log(`\n 📊 执行日志统计: ${logs.length} 条`);
|
||||
const typeCounts: Record<string, number> = {};
|
||||
logs.forEach((l: any) => { typeCounts[l.type] = (typeCounts[l.type] || 0) + 1; });
|
||||
Object.entries(typeCounts).forEach(([type, count]) => {
|
||||
console.log(` ${type}: ${count}`);
|
||||
});
|
||||
|
||||
console.log(`\n 📝 报告预览 (前 300 字):`);
|
||||
console.log(` ${result.synthesisReport.slice(0, 300).replace(/\n/g, '\n ')}...`);
|
||||
|
||||
// ═══════════ Step 8: Word 导出 ═══════════
|
||||
if (!SKIP_WORD_EXPORT) {
|
||||
printDivider('Step 8: Word 导出(需要 Pandoc 微服务)');
|
||||
|
||||
try {
|
||||
const exportRes = await api(`/asl/research/tasks/${taskId}/export-word`);
|
||||
|
||||
if (exportRes.status === 200 && exportRes.data._binary) {
|
||||
assert(true, `Word 导出成功 (${exportRes.data.byteLength} bytes)`);
|
||||
} else {
|
||||
console.log(` ⚠️ Word 导出返回异常 (HTTP ${exportRes.status})`);
|
||||
console.log(` 这通常是因为 Python 微服务(Pandoc)未运行,可忽略。`);
|
||||
}
|
||||
} catch (e: any) {
|
||||
console.log(` ⚠️ Word 导出跳过: ${e.message}`);
|
||||
console.log(` 如需测试,请启动 Python 微服务: cd extraction-service && python app.py`);
|
||||
}
|
||||
} else {
|
||||
console.log('\n ⏭️ 跳过 Word 导出测试 (SKIP_WORD_EXPORT=true)');
|
||||
}
|
||||
|
||||
// ═══════════ Step 9: 清理 ═══════════
|
||||
if (!SKIP_CLEANUP && taskId) {
|
||||
printDivider('Step 9: 清理测试数据');
|
||||
try {
|
||||
const { PrismaClient } = await import('@prisma/client');
|
||||
const prisma = new PrismaClient();
|
||||
await prisma.aslResearchTask.delete({ where: { id: taskId } });
|
||||
await prisma.$disconnect();
|
||||
console.log(` 🗑️ 已删除测试任务: ${taskId}`);
|
||||
} catch (e: any) {
|
||||
console.log(` ⚠️ 清理失败: ${e.message} (可手动删除)`);
|
||||
}
|
||||
} else {
|
||||
console.log(`\n ⏭️ 保留测试数据 taskId=${taskId}`);
|
||||
}
|
||||
|
||||
// ═══════════ 总结 ═══════════
|
||||
printDivider('🎉 测试通过!');
|
||||
const totalMs = Date.now() - startTime;
|
||||
console.log(` 总耗时: ${(totalMs / 1000).toFixed(1)}s`);
|
||||
console.log(` 任务 ID: ${taskId}`);
|
||||
console.log(` 综合报告: ${(result.synthesisReport || '').length} 字`);
|
||||
console.log(` 文献数量: ${hasResultList ? result.resultList.length : 0} 篇`);
|
||||
console.log(` 执行日志: ${logs.length} 条`);
|
||||
console.log('');
|
||||
|
||||
} catch (error: any) {
|
||||
printDivider('💥 测试失败');
|
||||
console.error(` 错误: ${error.message}`);
|
||||
console.error(` 任务 ID: ${taskId || '未创建'}`);
|
||||
console.error(` 耗时: ${((Date.now() - startTime) / 1000).toFixed(1)}s`);
|
||||
console.error('');
|
||||
|
||||
if (error.message.includes('登录失败')) {
|
||||
console.error(' 💡 提示: 请检查测试账号配置');
|
||||
console.error(' 可通过环境变量指定: TEST_PHONE=xxx TEST_PASSWORD=yyy npx tsx ...');
|
||||
}
|
||||
|
||||
if (error.message.includes('fetch failed') || error.message.includes('ECONNREFUSED')) {
|
||||
console.error(' 💡 提示: 后端服务未启动,请先运行:');
|
||||
console.error(' cd backend && npm run dev');
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// ─── 入口 ───────────────────────────────────────
|
||||
|
||||
runE2E();
|
||||
161
backend/src/modules/asl/__tests__/deep-research-v2-smoke.ts
Normal file
161
backend/src/modules/asl/__tests__/deep-research-v2-smoke.ts
Normal file
@@ -0,0 +1,161 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 冒烟测试(Smoke Test)
|
||||
*
|
||||
* 仅测试 API 接口连通性和基本参数校验,
|
||||
* 不调用 LLM / Unifuncs,无外部依赖,几秒完成。
|
||||
*
|
||||
* 运行方式:
|
||||
* npx tsx src/modules/asl/__tests__/deep-research-v2-smoke.ts
|
||||
*
|
||||
* 前置条件:
|
||||
* - 后端服务运行在 localhost:3001
|
||||
* - PostgreSQL 运行中
|
||||
*/
|
||||
|
||||
const BASE_URL = process.env.TEST_BASE_URL || 'http://localhost:3001';
|
||||
const API_PREFIX = `${BASE_URL}/api/v1`;
|
||||
|
||||
const TEST_PHONE = process.env.TEST_PHONE || '13800000001';
|
||||
const TEST_PASSWORD = process.env.TEST_PASSWORD || '123456';
|
||||
|
||||
let authToken = '';
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
async function api(path: string, options: RequestInit = {}): Promise<{ status: number; data: any }> {
|
||||
const res = await fetch(`${API_PREFIX}${path}`, {
|
||||
...options,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(authToken ? { Authorization: `Bearer ${authToken}` } : {}),
|
||||
...(options.headers || {}),
|
||||
},
|
||||
});
|
||||
|
||||
const contentType = res.headers.get('content-type') || '';
|
||||
const data = contentType.includes('json') ? await res.json() : await res.text();
|
||||
return { status: res.status, data };
|
||||
}
|
||||
|
||||
function check(ok: boolean, label: string) {
|
||||
if (ok) {
|
||||
console.log(` ✅ ${label}`);
|
||||
passed++;
|
||||
} else {
|
||||
console.log(` ❌ ${label}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
async function run() {
|
||||
console.log('🔥 Deep Research V2.0 冒烟测试\n');
|
||||
|
||||
// ─── 1. 登录 ───
|
||||
console.log('[1] 登录');
|
||||
try {
|
||||
const res = await api('/auth/login/password', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ phone: TEST_PHONE, password: TEST_PASSWORD }),
|
||||
});
|
||||
const ok = res.status === 200 && res.data.success;
|
||||
check(ok, `POST /auth/login/password → ${res.status}`);
|
||||
if (ok) {
|
||||
authToken = res.data.data.tokens?.accessToken || res.data.data.accessToken || res.data.data.token;
|
||||
} else {
|
||||
console.log(' ⚠️ 登录失败,后续需要认证的测试将跳过');
|
||||
console.log(` 返回: ${JSON.stringify(res.data).slice(0, 200)}`);
|
||||
}
|
||||
} catch (e: any) {
|
||||
check(false, `连接后端失败: ${e.message}`);
|
||||
console.log('\n💡 请先启动后端: cd backend && npm run dev\n');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// ─── 2. GET /data-sources ───
|
||||
console.log('\n[2] GET /asl/research/data-sources');
|
||||
{
|
||||
const res = await api('/asl/research/data-sources');
|
||||
check(res.status === 200, `HTTP ${res.status} === 200`);
|
||||
check(res.data.success === true, 'success=true');
|
||||
check(Array.isArray(res.data.data) && res.data.data.length >= 3, `返回 ${res.data.data?.length} 个数据源`);
|
||||
|
||||
const hasPubmed = res.data.data?.some((s: any) => s.id === 'pubmed');
|
||||
check(hasPubmed, 'PubMed 存在于数据源列表');
|
||||
|
||||
const defaultChecked = res.data.data?.filter((s: any) => s.defaultChecked);
|
||||
check(defaultChecked?.length >= 1, `默认选中 ${defaultChecked?.length} 个`);
|
||||
}
|
||||
|
||||
// ─── 3. POST /generate-requirement (参数校验) ───
|
||||
console.log('\n[3] POST /asl/research/generate-requirement — 参数校验');
|
||||
{
|
||||
const res = await api('/asl/research/generate-requirement', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ originalQuery: '' }),
|
||||
});
|
||||
check(res.status === 400, `空 query → HTTP ${res.status} === 400`);
|
||||
}
|
||||
|
||||
{
|
||||
const res = await api('/asl/research/generate-requirement', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({}),
|
||||
});
|
||||
check(res.status === 400, `缺少 query → HTTP ${res.status} === 400`);
|
||||
}
|
||||
|
||||
// ─── 4. PUT /tasks/:taskId/execute (参数校验) ───
|
||||
console.log('\n[4] PUT /asl/research/tasks/:taskId/execute — 参数校验');
|
||||
{
|
||||
const res = await api('/asl/research/tasks/nonexistent-id/execute', {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify({ confirmedRequirement: 'test' }),
|
||||
});
|
||||
check(res.status === 404, `不存在的 taskId → HTTP ${res.status} === 404`);
|
||||
}
|
||||
|
||||
{
|
||||
const res = await api('/asl/research/tasks/some-id/execute', {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify({ confirmedRequirement: '' }),
|
||||
});
|
||||
check(res.status === 400, `空 confirmedRequirement → HTTP ${res.status} === 400`);
|
||||
}
|
||||
|
||||
// ─── 5. GET /tasks/:taskId (不存在) ───
|
||||
console.log('\n[5] GET /asl/research/tasks/:taskId — 不存在');
|
||||
{
|
||||
const res = await api('/asl/research/tasks/nonexistent-id');
|
||||
check(res.status === 404, `不存在 → HTTP ${res.status} === 404`);
|
||||
}
|
||||
|
||||
// ─── 6. GET /tasks/:taskId/export-word (不存在) ───
|
||||
console.log('\n[6] GET /asl/research/tasks/:taskId/export-word — 不存在');
|
||||
{
|
||||
const res = await api('/asl/research/tasks/nonexistent-id/export-word');
|
||||
check(res.status === 500 || res.status === 404, `不存在 → HTTP ${res.status}`);
|
||||
}
|
||||
|
||||
// ─── 7. 未认证访问 ───
|
||||
console.log('\n[7] 未认证访问(无 Token)');
|
||||
{
|
||||
const savedToken = authToken;
|
||||
authToken = '';
|
||||
|
||||
const res = await api('/asl/research/data-sources');
|
||||
check(res.status === 401, `无 Token → HTTP ${res.status} === 401`);
|
||||
|
||||
authToken = savedToken;
|
||||
}
|
||||
|
||||
// ─── 结果汇总 ───
|
||||
console.log(`\n${'═'.repeat(50)}`);
|
||||
console.log(` 🏁 冒烟测试完成: ${passed} 通过, ${failed} 失败 (共 ${passed + failed})`);
|
||||
console.log(`${'═'.repeat(50)}\n`);
|
||||
|
||||
if (failed > 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
run();
|
||||
75
backend/src/modules/asl/config/dataSources.ts
Normal file
75
backend/src/modules/asl/config/dataSources.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 精选数据源配置
|
||||
*
|
||||
* 基于 Unifuncs API 18 站实测结果(2026-02-22)精选的 5 个数据源。
|
||||
* 前端 SetupPanel 直接消费此配置渲染 Checkbox 列表。
|
||||
*/
|
||||
|
||||
export interface DataSourceConfig {
|
||||
id: string;
|
||||
label: string;
|
||||
labelEn: string;
|
||||
domainScope: string;
|
||||
category: 'english' | 'chinese';
|
||||
defaultChecked: boolean;
|
||||
note?: string;
|
||||
}
|
||||
|
||||
export const DEEP_RESEARCH_DATA_SOURCES: DataSourceConfig[] = [
|
||||
{
|
||||
id: 'pubmed',
|
||||
label: 'PubMed',
|
||||
labelEn: 'PubMed',
|
||||
domainScope: 'https://pubmed.ncbi.nlm.nih.gov/',
|
||||
category: 'english',
|
||||
defaultChecked: true,
|
||||
},
|
||||
{
|
||||
id: 'clinicaltrials',
|
||||
label: 'ClinicalTrials.gov',
|
||||
labelEn: 'ClinicalTrials.gov',
|
||||
domainScope: 'https://clinicaltrials.gov/',
|
||||
category: 'english',
|
||||
defaultChecked: false,
|
||||
},
|
||||
{
|
||||
id: 'cochrane',
|
||||
label: 'Cochrane Library',
|
||||
labelEn: 'Cochrane Library',
|
||||
domainScope: 'https://www.cochranelibrary.com/',
|
||||
category: 'english',
|
||||
defaultChecked: false,
|
||||
},
|
||||
{
|
||||
id: 'cnki',
|
||||
label: '中国知网 CNKI',
|
||||
labelEn: 'CNKI',
|
||||
domainScope: 'https://www.cnki.net/',
|
||||
category: 'chinese',
|
||||
defaultChecked: false,
|
||||
},
|
||||
{
|
||||
id: 'medjournals',
|
||||
label: '中华医学期刊网',
|
||||
labelEn: 'Chinese Medical Journals',
|
||||
domainScope: 'https://medjournals.cn/',
|
||||
category: 'chinese',
|
||||
defaultChecked: false,
|
||||
},
|
||||
];
|
||||
|
||||
export function getDefaultDataSources(): string[] {
|
||||
return DEEP_RESEARCH_DATA_SOURCES
|
||||
.filter(ds => ds.defaultChecked)
|
||||
.map(ds => ds.domainScope);
|
||||
}
|
||||
|
||||
export function getDomainScopes(ids: string[]): string[] {
|
||||
return DEEP_RESEARCH_DATA_SOURCES
|
||||
.filter(ds => ids.includes(ds.id))
|
||||
.map(ds => ds.domainScope);
|
||||
}
|
||||
|
||||
export function hasEnglishOnlySource(ids: string[]): boolean {
|
||||
return ids.includes('clinicaltrials');
|
||||
}
|
||||
210
backend/src/modules/asl/controllers/deepResearchController.ts
Normal file
210
backend/src/modules/asl/controllers/deepResearchController.ts
Normal file
@@ -0,0 +1,210 @@
|
||||
/**
|
||||
* Deep Research V2.0 Controller
|
||||
*
|
||||
* 新增端点(保留 V1.x 不动):
|
||||
* - POST /generate-requirement — 需求扩写
|
||||
* - PUT /tasks/:taskId/execute — 启动异步执行
|
||||
* - GET /tasks/:taskId — 状态 + 日志 + 结果
|
||||
*/
|
||||
|
||||
import { FastifyRequest, FastifyReply } from 'fastify';
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { jobQueue } from '../../../common/jobs/index.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
import { requirementExpansionService } from '../services/requirementExpansionService.js';
|
||||
import { wordExportService } from '../services/wordExportService.js';
|
||||
import { DEEP_RESEARCH_DATA_SOURCES } from '../config/dataSources.js';
|
||||
|
||||
// ─── Request types ──────────────────────────────
|
||||
|
||||
interface GenerateRequirementBody {
|
||||
originalQuery: string;
|
||||
targetSources?: string[];
|
||||
filters?: {
|
||||
yearRange?: string;
|
||||
targetCount?: string;
|
||||
requireOpenAccess?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
interface ExecuteBody {
|
||||
confirmedRequirement: string;
|
||||
}
|
||||
|
||||
interface TaskParams {
|
||||
taskId: string;
|
||||
}
|
||||
|
||||
// ─── POST /research/generate-requirement ────────
|
||||
|
||||
export async function generateRequirement(
|
||||
request: FastifyRequest<{ Body: GenerateRequirementBody }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = request.user?.userId;
|
||||
if (!userId) {
|
||||
return reply.code(401).send({ success: false, error: '用户未认证' });
|
||||
}
|
||||
|
||||
const { originalQuery, targetSources, filters } = request.body;
|
||||
|
||||
if (!originalQuery?.trim()) {
|
||||
return reply.code(400).send({ success: false, error: '请输入研究想法' });
|
||||
}
|
||||
|
||||
const projectId = 'default';
|
||||
|
||||
const result = await requirementExpansionService.generateRequirement({
|
||||
projectId,
|
||||
userId,
|
||||
originalQuery: originalQuery.trim(),
|
||||
targetSources,
|
||||
filters,
|
||||
});
|
||||
|
||||
return reply.send({ success: true, data: result });
|
||||
} catch (error: any) {
|
||||
logger.error('[DeepResearchController] generateRequirement failed', {
|
||||
error: error.message,
|
||||
});
|
||||
return reply.code(500).send({ success: false, error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
// ─── PUT /research/tasks/:taskId/execute ────────
|
||||
|
||||
export async function executeTask(
|
||||
request: FastifyRequest<{ Params: TaskParams; Body: ExecuteBody }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = request.user?.userId;
|
||||
if (!userId) {
|
||||
return reply.code(401).send({ success: false, error: '用户未认证' });
|
||||
}
|
||||
|
||||
const { taskId } = request.params;
|
||||
const { confirmedRequirement } = request.body;
|
||||
|
||||
if (!confirmedRequirement?.trim()) {
|
||||
return reply.code(400).send({ success: false, error: '检索指令不能为空' });
|
||||
}
|
||||
|
||||
const task = await prisma.aslResearchTask.findUnique({
|
||||
where: { id: taskId },
|
||||
});
|
||||
|
||||
if (!task) {
|
||||
return reply.code(404).send({ success: false, error: '任务不存在' });
|
||||
}
|
||||
|
||||
if (task.userId !== userId) {
|
||||
return reply.code(403).send({ success: false, error: '无权操作此任务' });
|
||||
}
|
||||
|
||||
if (task.status !== 'draft') {
|
||||
return reply.code(400).send({
|
||||
success: false,
|
||||
error: `任务状态为 ${task.status},只有 draft 状态可启动`,
|
||||
});
|
||||
}
|
||||
|
||||
await prisma.aslResearchTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
confirmedRequirement: confirmedRequirement.trim(),
|
||||
status: 'pending',
|
||||
},
|
||||
});
|
||||
|
||||
await jobQueue.push('asl_deep_research_v2', { taskId });
|
||||
|
||||
logger.info('[DeepResearchController] Task pushed to queue', { taskId });
|
||||
|
||||
return reply.send({ success: true });
|
||||
} catch (error: any) {
|
||||
logger.error('[DeepResearchController] executeTask failed', {
|
||||
error: error.message,
|
||||
});
|
||||
return reply.code(500).send({ success: false, error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
// ─── GET /research/tasks/:taskId ────────────────
|
||||
|
||||
export async function getTask(
|
||||
request: FastifyRequest<{ Params: TaskParams }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const { taskId } = request.params;
|
||||
|
||||
const task = await prisma.aslResearchTask.findUnique({
|
||||
where: { id: taskId },
|
||||
});
|
||||
|
||||
if (!task) {
|
||||
return reply.code(404).send({ success: false, error: '任务不存在' });
|
||||
}
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: {
|
||||
taskId: task.id,
|
||||
status: task.status,
|
||||
query: task.query,
|
||||
targetSources: task.targetSources,
|
||||
confirmedRequirement: task.confirmedRequirement,
|
||||
aiIntentSummary: task.aiIntentSummary,
|
||||
executionLogs: task.executionLogs || [],
|
||||
synthesisReport: task.synthesisReport,
|
||||
resultList: task.resultList,
|
||||
resultCount: task.resultCount,
|
||||
errorMessage: task.errorMessage,
|
||||
createdAt: task.createdAt,
|
||||
completedAt: task.completedAt,
|
||||
},
|
||||
});
|
||||
} catch (error: any) {
|
||||
logger.error('[DeepResearchController] getTask failed', {
|
||||
error: error.message,
|
||||
});
|
||||
return reply.code(500).send({ success: false, error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
// ─── GET /research/tasks/:taskId/export-word ────
|
||||
|
||||
export async function exportWord(
|
||||
request: FastifyRequest<{ Params: TaskParams }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const { taskId } = request.params;
|
||||
|
||||
const { buffer, filename } = await wordExportService.exportTaskToWord(taskId);
|
||||
|
||||
reply
|
||||
.header('Content-Type', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document')
|
||||
.header('Content-Disposition', `attachment; filename*=UTF-8''${encodeURIComponent(filename)}`)
|
||||
.send(buffer);
|
||||
} catch (error: any) {
|
||||
logger.error('[DeepResearchController] exportWord failed', {
|
||||
error: error.message,
|
||||
});
|
||||
return reply.code(500).send({ success: false, error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
// ─── GET /research/data-sources ─────────────────
|
||||
|
||||
export async function getDataSources(
|
||||
_request: FastifyRequest,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: DEEP_RESEARCH_DATA_SOURCES,
|
||||
});
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import * as literatureController from '../controllers/literatureController.js';
|
||||
import * as screeningController from '../controllers/screeningController.js';
|
||||
import * as fulltextScreeningController from '../fulltext-screening/controllers/FulltextScreeningController.js';
|
||||
import * as researchController from '../controllers/researchController.js';
|
||||
import * as deepResearchController from '../controllers/deepResearchController.js';
|
||||
import { authenticate, requireModule } from '../../../common/auth/auth.middleware.js';
|
||||
|
||||
export async function aslRoutes(fastify: FastifyInstance) {
|
||||
@@ -79,16 +80,33 @@ export async function aslRoutes(fastify: FastifyInstance) {
|
||||
// 导出Excel
|
||||
fastify.get('/fulltext-screening/tasks/:taskId/export', { preHandler: [authenticate, requireModule('ASL')] }, fulltextScreeningController.exportExcel);
|
||||
|
||||
// ==================== 智能文献检索路由 (DeepSearch) ====================
|
||||
// ==================== 智能文献检索路由 (DeepSearch V1.x — 保留兼容) ====================
|
||||
|
||||
// SSE 流式检索(推荐,实时显示思考过程)
|
||||
// SSE 流式检索
|
||||
fastify.post('/research/stream', { preHandler: [authenticate, requireModule('ASL')] }, researchController.streamSearch);
|
||||
|
||||
// 创建检索任务(异步模式,备用)
|
||||
// 创建检索任务(V1.x 异步模式)
|
||||
fastify.post('/research/tasks', { preHandler: [authenticate, requireModule('ASL')] }, researchController.createTask);
|
||||
|
||||
// 获取任务状态(轮询)
|
||||
// 获取任务状态(V1.x 轮询)
|
||||
fastify.get('/research/tasks/:taskId/status', { preHandler: [authenticate, requireModule('ASL')] }, researchController.getTaskStatus);
|
||||
|
||||
// ==================== Deep Research V2.0 路由 ====================
|
||||
|
||||
// 获取可选数据源列表
|
||||
fastify.get('/research/data-sources', { preHandler: [authenticate, requireModule('ASL')] }, deepResearchController.getDataSources);
|
||||
|
||||
// 需求扩写(PICOS + MeSH)
|
||||
fastify.post('/research/generate-requirement', { preHandler: [authenticate, requireModule('ASL')] }, deepResearchController.generateRequirement);
|
||||
|
||||
// 启动异步执行
|
||||
fastify.put('/research/tasks/:taskId/execute', { preHandler: [authenticate, requireModule('ASL')] }, deepResearchController.executeTask);
|
||||
|
||||
// V2.0 任务详情(状态 + 日志 + 结果)
|
||||
fastify.get('/research/tasks/:taskId', { preHandler: [authenticate, requireModule('ASL')] }, deepResearchController.getTask);
|
||||
|
||||
// V2.0 导出 Word
|
||||
fastify.get('/research/tasks/:taskId/export-word', { preHandler: [authenticate, requireModule('ASL')] }, deepResearchController.exportWord);
|
||||
}
|
||||
|
||||
|
||||
|
||||
180
backend/src/modules/asl/services/requirementExpansionService.ts
Normal file
180
backend/src/modules/asl/services/requirementExpansionService.ts
Normal file
@@ -0,0 +1,180 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 需求扩写服务
|
||||
*
|
||||
* 职责:
|
||||
* 1. 通过 Prompt 管理服务获取 ASL_DEEP_RESEARCH_EXPANSION 模板
|
||||
* 2. 调用 LLM 将用户粗略想法扩写为 PICOS 结构化检索指令
|
||||
* 3. 解析 LLM 输出为 generatedRequirement + intentSummary
|
||||
* 4. 创建 DB 记录(status=draft)
|
||||
*/
|
||||
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { getPromptService } from '../../../common/prompt/index.js';
|
||||
import { LLMFactory } from '../../../common/llm/adapters/LLMFactory.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
import { getDefaultDataSources, hasEnglishOnlySource } from '../config/dataSources.js';
|
||||
import type { Message } from '../../../common/llm/adapters/types.js';
|
||||
|
||||
export interface GenerateRequirementInput {
|
||||
projectId: string;
|
||||
userId: string;
|
||||
originalQuery: string;
|
||||
targetSources?: string[];
|
||||
filters?: {
|
||||
yearRange?: string;
|
||||
targetCount?: string;
|
||||
requireOpenAccess?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
export interface IntentSummary {
|
||||
objective: string;
|
||||
population: string;
|
||||
intervention: string;
|
||||
comparison: string;
|
||||
outcome: string;
|
||||
studyDesign: string[];
|
||||
meshTerms: string[];
|
||||
condition: string;
|
||||
}
|
||||
|
||||
export interface GenerateRequirementResult {
|
||||
taskId: string;
|
||||
generatedRequirement: string;
|
||||
intentSummary: IntentSummary;
|
||||
}
|
||||
|
||||
class RequirementExpansionService {
|
||||
|
||||
async generateRequirement(input: GenerateRequirementInput): Promise<GenerateRequirementResult> {
|
||||
const {
|
||||
projectId,
|
||||
userId,
|
||||
originalQuery,
|
||||
targetSources,
|
||||
filters,
|
||||
} = input;
|
||||
|
||||
const sources = targetSources && targetSources.length > 0
|
||||
? targetSources
|
||||
: getDefaultDataSources();
|
||||
|
||||
logger.info('[RequirementExpansion] Starting expansion', {
|
||||
userId,
|
||||
queryLength: originalQuery.length,
|
||||
sources,
|
||||
});
|
||||
|
||||
const promptService = getPromptService(prisma);
|
||||
const rendered = await promptService.get(
|
||||
'ASL_DEEP_RESEARCH_EXPANSION',
|
||||
{
|
||||
originalQuery,
|
||||
targetSources: sources.join(', '),
|
||||
yearRange: filters?.yearRange || '不限',
|
||||
targetCount: filters?.targetCount || '全面检索',
|
||||
},
|
||||
{ userId }
|
||||
);
|
||||
|
||||
const adapter = LLMFactory.getAdapter(
|
||||
(rendered.modelConfig.model as any) || 'deepseek-v3'
|
||||
);
|
||||
|
||||
const messages: Message[] = [
|
||||
{ role: 'system', content: rendered.content },
|
||||
{ role: 'user', content: originalQuery },
|
||||
];
|
||||
|
||||
const llmResponse = await adapter.chat(messages, {
|
||||
temperature: rendered.modelConfig.temperature ?? 0.4,
|
||||
maxTokens: rendered.modelConfig.maxTokens ?? 4096,
|
||||
});
|
||||
|
||||
const rawOutput = llmResponse.content;
|
||||
|
||||
const { requirement, intentSummary } = this.parseOutput(rawOutput);
|
||||
|
||||
const task = await prisma.aslResearchTask.create({
|
||||
data: {
|
||||
projectId,
|
||||
userId,
|
||||
query: originalQuery,
|
||||
status: 'draft',
|
||||
targetSources: sources as any,
|
||||
confirmedRequirement: requirement,
|
||||
aiIntentSummary: intentSummary as any,
|
||||
filters: filters as any,
|
||||
},
|
||||
});
|
||||
|
||||
logger.info('[RequirementExpansion] Task created', {
|
||||
taskId: task.id,
|
||||
meshTerms: intentSummary.meshTerms?.length || 0,
|
||||
});
|
||||
|
||||
return {
|
||||
taskId: task.id,
|
||||
generatedRequirement: requirement,
|
||||
intentSummary,
|
||||
};
|
||||
}
|
||||
|
||||
private parseOutput(raw: string): {
|
||||
requirement: string;
|
||||
intentSummary: IntentSummary;
|
||||
} {
|
||||
let requirement = '';
|
||||
let intentSummary: IntentSummary = {
|
||||
objective: '',
|
||||
population: '',
|
||||
intervention: '',
|
||||
comparison: '',
|
||||
outcome: '',
|
||||
studyDesign: [],
|
||||
meshTerms: [],
|
||||
condition: '',
|
||||
};
|
||||
|
||||
const part1Match = raw.match(
|
||||
/### Part 1[::][^\n]*\n([\s\S]*?)(?=### Part 2|$)/i
|
||||
);
|
||||
if (part1Match) {
|
||||
requirement = part1Match[1].trim();
|
||||
} else {
|
||||
const jsonBlockStart = raw.indexOf('```json');
|
||||
if (jsonBlockStart > 0) {
|
||||
requirement = raw.slice(0, jsonBlockStart).trim();
|
||||
} else {
|
||||
requirement = raw.trim();
|
||||
}
|
||||
}
|
||||
|
||||
const jsonMatch = raw.match(/```json\s*([\s\S]*?)```/);
|
||||
if (jsonMatch) {
|
||||
try {
|
||||
let cleaned = jsonMatch[1].trim();
|
||||
cleaned = cleaned.replace(/,\s*([}\]])/g, '$1');
|
||||
const parsed = JSON.parse(cleaned);
|
||||
intentSummary = {
|
||||
objective: parsed.objective || '',
|
||||
population: parsed.population || '',
|
||||
intervention: parsed.intervention || '',
|
||||
comparison: parsed.comparison || '',
|
||||
outcome: parsed.outcome || '',
|
||||
studyDesign: Array.isArray(parsed.studyDesign) ? parsed.studyDesign : [],
|
||||
meshTerms: Array.isArray(parsed.meshTerms) ? parsed.meshTerms : [],
|
||||
condition: parsed.condition || '',
|
||||
};
|
||||
} catch (e) {
|
||||
logger.warn('[RequirementExpansion] Failed to parse intent summary JSON', {
|
||||
error: (e as Error).message,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return { requirement, intentSummary };
|
||||
}
|
||||
}
|
||||
|
||||
export const requirementExpansionService = new RequirementExpansionService();
|
||||
155
backend/src/modules/asl/services/unifuncsAsyncClient.ts
Normal file
155
backend/src/modules/asl/services/unifuncsAsyncClient.ts
Normal file
@@ -0,0 +1,155 @@
|
||||
/**
|
||||
* Unifuncs DeepSearch 异步客户端
|
||||
*
|
||||
* 封装 create_task / query_task 两个 API。
|
||||
* Worker 通过此客户端与 Unifuncs 服务交互。
|
||||
*/
|
||||
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
const UNIFUNCS_BASE_URL = 'https://api.unifuncs.com/deepsearch/v1';
|
||||
const UNIFUNCS_API_KEY = process.env.UNIFUNCS_API_KEY;
|
||||
|
||||
export interface CreateTaskParams {
|
||||
query: string;
|
||||
introduction?: string;
|
||||
maxDepth?: number;
|
||||
domainScope?: string[];
|
||||
domainBlacklist?: string[];
|
||||
referenceStyle?: 'link' | 'character';
|
||||
generateSummary?: boolean;
|
||||
outputPrompt?: string;
|
||||
}
|
||||
|
||||
export interface CreateTaskResponse {
|
||||
code: number;
|
||||
message: string;
|
||||
data: {
|
||||
task_id: string;
|
||||
status: string;
|
||||
created_at: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface QueryTaskResponse {
|
||||
code: number;
|
||||
message: string;
|
||||
data: {
|
||||
task_id: string;
|
||||
status: 'pending' | 'running' | 'completed' | 'failed';
|
||||
result?: {
|
||||
content: string;
|
||||
reasoning_content: string;
|
||||
};
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
progress?: {
|
||||
current: number;
|
||||
total: number;
|
||||
message: string;
|
||||
};
|
||||
statistics?: {
|
||||
iterations: number;
|
||||
search_count: number;
|
||||
read_count: number;
|
||||
token_usage?: {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
total_tokens: number;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
class UnifuncsAsyncClient {
|
||||
private apiKey: string;
|
||||
private baseUrl: string;
|
||||
|
||||
constructor() {
|
||||
this.apiKey = UNIFUNCS_API_KEY || '';
|
||||
this.baseUrl = UNIFUNCS_BASE_URL;
|
||||
}
|
||||
|
||||
async createTask(params: CreateTaskParams): Promise<CreateTaskResponse> {
|
||||
if (!this.apiKey) {
|
||||
throw new Error('UNIFUNCS_API_KEY not configured');
|
||||
}
|
||||
|
||||
const payload = {
|
||||
model: 's2',
|
||||
messages: [{ role: 'user', content: params.query }],
|
||||
introduction: params.introduction || this.defaultIntroduction(),
|
||||
max_depth: params.maxDepth ?? 25,
|
||||
domain_scope: params.domainScope || ['https://pubmed.ncbi.nlm.nih.gov/'],
|
||||
domain_blacklist: params.domainBlacklist || [],
|
||||
reference_style: params.referenceStyle || 'link',
|
||||
generate_summary: params.generateSummary ?? true,
|
||||
...(params.outputPrompt ? { output_prompt: params.outputPrompt } : {}),
|
||||
};
|
||||
|
||||
logger.info('[UnifuncsClient] Creating task', {
|
||||
queryLen: params.query.length,
|
||||
domainScope: payload.domain_scope,
|
||||
maxDepth: payload.max_depth,
|
||||
});
|
||||
|
||||
const res = await fetch(`${this.baseUrl}/create_task`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.apiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`create_task HTTP ${res.status}: ${text}`);
|
||||
}
|
||||
|
||||
const json = await res.json() as CreateTaskResponse;
|
||||
|
||||
if (json.code !== 0) {
|
||||
throw new Error(`create_task error: ${json.message}`);
|
||||
}
|
||||
|
||||
logger.info('[UnifuncsClient] Task created', {
|
||||
externalTaskId: json.data.task_id,
|
||||
});
|
||||
|
||||
return json;
|
||||
}
|
||||
|
||||
async queryTask(taskId: string): Promise<QueryTaskResponse> {
|
||||
if (!this.apiKey) {
|
||||
throw new Error('UNIFUNCS_API_KEY not configured');
|
||||
}
|
||||
|
||||
const params = new URLSearchParams({ task_id: taskId });
|
||||
|
||||
const res = await fetch(`${this.baseUrl}/query_task?${params.toString()}`, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${this.apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
throw new Error(`query_task HTTP ${res.status}: ${text}`);
|
||||
}
|
||||
|
||||
const json = await res.json() as QueryTaskResponse;
|
||||
|
||||
if (json.code !== 0) {
|
||||
throw new Error(`query_task error: ${json.message}`);
|
||||
}
|
||||
|
||||
return json;
|
||||
}
|
||||
|
||||
private defaultIntroduction(): string {
|
||||
return '你是一名专业的临床研究文献检索专家,擅长从多个学术数据库中检索高质量的医学文献。请根据用户的检索需求,系统性地搜索并返回相关文献的详细信息。';
|
||||
}
|
||||
}
|
||||
|
||||
export const unifuncsAsyncClient = new UnifuncsAsyncClient();
|
||||
122
backend/src/modules/asl/services/unifuncsSseClient.ts
Normal file
122
backend/src/modules/asl/services/unifuncsSseClient.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* Unifuncs DeepSearch SSE 流式客户端
|
||||
*
|
||||
* 通过 OpenAI 兼容协议的 SSE 流获取实时 reasoning_content 和 content。
|
||||
* Worker 消费此流可实现逐条实时写入 executionLogs。
|
||||
*/
|
||||
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
const UNIFUNCS_BASE_URL = 'https://api.unifuncs.com/deepsearch/v1';
|
||||
const UNIFUNCS_API_KEY = process.env.UNIFUNCS_API_KEY;
|
||||
|
||||
export interface SseStreamParams {
|
||||
query: string;
|
||||
introduction?: string;
|
||||
maxDepth?: number;
|
||||
domainScope?: string[];
|
||||
domainBlacklist?: string[];
|
||||
referenceStyle?: 'link' | 'character';
|
||||
outputPrompt?: string;
|
||||
}
|
||||
|
||||
export interface SseChunk {
|
||||
type: 'reasoning' | 'content' | 'done' | 'error';
|
||||
text: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* 以 AsyncGenerator 形式逐 chunk 返回 Unifuncs SSE 流数据
|
||||
*/
|
||||
export async function* streamDeepSearch(
|
||||
params: SseStreamParams
|
||||
): AsyncGenerator<SseChunk> {
|
||||
const apiKey = UNIFUNCS_API_KEY;
|
||||
if (!apiKey) throw new Error('UNIFUNCS_API_KEY not configured');
|
||||
|
||||
const payload: Record<string, any> = {
|
||||
model: 's2',
|
||||
messages: [{ role: 'user', content: params.query }],
|
||||
stream: true,
|
||||
introduction: params.introduction || defaultIntroduction(),
|
||||
max_depth: params.maxDepth ?? 25,
|
||||
domain_scope: params.domainScope || ['https://pubmed.ncbi.nlm.nih.gov/'],
|
||||
domain_blacklist: params.domainBlacklist || [],
|
||||
reference_style: params.referenceStyle || 'link',
|
||||
};
|
||||
|
||||
if (params.outputPrompt) {
|
||||
payload.output_prompt = params.outputPrompt;
|
||||
}
|
||||
|
||||
logger.info('[UnifuncsSse] Starting SSE stream', {
|
||||
queryLen: params.query.length,
|
||||
domainScope: payload.domain_scope,
|
||||
maxDepth: payload.max_depth,
|
||||
});
|
||||
|
||||
const response = await fetch(`${UNIFUNCS_BASE_URL}/chat/completions`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${apiKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'text/event-stream',
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`SSE request failed HTTP ${response.status}: ${text}`);
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('Response body is null');
|
||||
}
|
||||
|
||||
const reader = (response.body as any).getReader() as ReadableStreamDefaultReader<Uint8Array>;
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith(':')) continue;
|
||||
|
||||
if (trimmed.startsWith('data: ')) {
|
||||
const data = trimmed.slice(6);
|
||||
if (data === '[DONE]') {
|
||||
yield { type: 'done' as const, text: '' };
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const json = JSON.parse(data);
|
||||
const delta = json.choices?.[0]?.delta;
|
||||
if (delta?.reasoning_content) {
|
||||
yield { type: 'reasoning' as const, text: delta.reasoning_content };
|
||||
} else if (delta?.content) {
|
||||
yield { type: 'content' as const, text: delta.content };
|
||||
}
|
||||
} catch {
|
||||
// Skip unparseable chunks
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
}
|
||||
}
|
||||
|
||||
function defaultIntroduction(): string {
|
||||
return '你是一名专业的临床研究文献检索专家,擅长从多个学术数据库中检索高质量的医学文献。请根据用户的检索需求,系统性地搜索并返回相关文献的详细信息。';
|
||||
}
|
||||
140
backend/src/modules/asl/services/wordExportService.ts
Normal file
140
backend/src/modules/asl/services/wordExportService.ts
Normal file
@@ -0,0 +1,140 @@
|
||||
/**
|
||||
* Deep Research V2.0 — Word 导出服务
|
||||
*
|
||||
* 将 synthesisReport + resultList 拼接为完整 Markdown,
|
||||
* 调用 Python 微服务(Pandoc)转换为 .docx。
|
||||
*
|
||||
* 文献清单中标题直接作为超链接,确保 Word 中可点击。
|
||||
*/
|
||||
|
||||
import axios from 'axios';
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
import type { LiteratureItem } from '../utils/resultParser.js';
|
||||
|
||||
const EXTRACTION_SERVICE_URL = process.env.EXTRACTION_SERVICE_URL || 'http://localhost:8000';
|
||||
|
||||
class WordExportService {
|
||||
|
||||
async exportTaskToWord(taskId: string): Promise<{
|
||||
buffer: Buffer;
|
||||
filename: string;
|
||||
}> {
|
||||
const task = await prisma.aslResearchTask.findUnique({
|
||||
where: { id: taskId },
|
||||
});
|
||||
|
||||
if (!task) throw new Error('任务不存在');
|
||||
if (task.status !== 'completed') throw new Error('任务尚未完成');
|
||||
|
||||
const markdown = this.buildMarkdown(
|
||||
task.query,
|
||||
task.synthesisReport,
|
||||
task.resultList as LiteratureItem[] | null,
|
||||
task.completedAt,
|
||||
);
|
||||
|
||||
const docxBuffer = await this.convertToDocx(markdown, task.query);
|
||||
|
||||
const safeQuery = task.query.replace(/[^\u4e00-\u9fa5a-zA-Z0-9]/g, '').slice(0, 30);
|
||||
const dateStr = new Date().toISOString().slice(0, 10).replace(/-/g, '');
|
||||
const filename = `DeepResearch_${safeQuery}_${dateStr}.docx`;
|
||||
|
||||
return { buffer: docxBuffer, filename };
|
||||
}
|
||||
|
||||
private buildMarkdown(
|
||||
query: string,
|
||||
report: string | null,
|
||||
resultList: LiteratureItem[] | null,
|
||||
completedAt: Date | null,
|
||||
): string {
|
||||
const parts: string[] = [];
|
||||
|
||||
parts.push(`# Deep Research 报告\n`);
|
||||
parts.push(`检索主题: ${query}\n`);
|
||||
parts.push(`生成时间: ${completedAt ? new Date(completedAt).toLocaleString('zh-CN') : new Date().toLocaleString('zh-CN')}\n`);
|
||||
parts.push('---\n');
|
||||
|
||||
if (report) {
|
||||
parts.push('## 综合分析报告\n');
|
||||
let cleaned = report.replace(/\*\*([^*]+)\*\*/g, '$1');
|
||||
cleaned = this.expandReferenceLinks(cleaned);
|
||||
parts.push(cleaned);
|
||||
parts.push('\n');
|
||||
}
|
||||
|
||||
if (resultList && resultList.length > 0) {
|
||||
parts.push('---\n');
|
||||
parts.push(`## 文献清单(共 ${resultList.length} 篇)\n`);
|
||||
|
||||
resultList.forEach((item, idx) => {
|
||||
const title = item.title || '(无标题)';
|
||||
const url = item.url || (item.pmid ? `https://pubmed.ncbi.nlm.nih.gov/${item.pmid}/` : '');
|
||||
|
||||
const titleLine = url ? `[${title}](${url})` : title;
|
||||
parts.push(`### ${idx + 1}. ${titleLine}\n`);
|
||||
|
||||
const details: string[] = [];
|
||||
if (item.authors) details.push(`作者: ${item.authors}`);
|
||||
if (item.journal) details.push(`期刊: ${item.journal}`);
|
||||
if (item.year) details.push(`年份: ${item.year}`);
|
||||
if (item.studyType) details.push(`研究类型: ${item.studyType}`);
|
||||
if (item.pmid) details.push(`PMID: ${item.pmid}`);
|
||||
|
||||
if (details.length > 0) {
|
||||
parts.push(details.join(' | '));
|
||||
}
|
||||
|
||||
if (url) {
|
||||
parts.push(`\n链接: ${url}`);
|
||||
}
|
||||
|
||||
parts.push('\n');
|
||||
});
|
||||
}
|
||||
|
||||
parts.push('---\n');
|
||||
parts.push('*本报告由 AI Clinical Research 平台 Deep Research 引擎自动生成*\n');
|
||||
|
||||
return parts.join('\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* 将 [[N]](url) 格式的引用链接展开为 [N] url 形式,
|
||||
* 使 Word 中引用旁边可见完整 URL。
|
||||
*/
|
||||
private expandReferenceLinks(text: string): string {
|
||||
return text.replace(
|
||||
/\[\[(\d+)\]\]\((https?:\/\/[^\s)]+)\)/g,
|
||||
'[$1]($2) ($2)'
|
||||
);
|
||||
}
|
||||
|
||||
private async convertToDocx(markdown: string, title: string): Promise<Buffer> {
|
||||
try {
|
||||
logger.info('[WordExport] Converting Markdown → Word');
|
||||
|
||||
const response = await axios.post(
|
||||
`${EXTRACTION_SERVICE_URL}/api/convert/docx`,
|
||||
{
|
||||
content: markdown,
|
||||
use_template: true,
|
||||
title: `Deep Research: ${title.slice(0, 50)}`,
|
||||
},
|
||||
{
|
||||
responseType: 'arraybuffer',
|
||||
timeout: 30000,
|
||||
}
|
||||
);
|
||||
|
||||
logger.info(`[WordExport] Conversion success, size: ${response.data.length} bytes`);
|
||||
return Buffer.from(response.data);
|
||||
} catch (error) {
|
||||
logger.error('[WordExport] Conversion failed:', error);
|
||||
throw new Error(`Word 转换失败: ${error instanceof Error ? error.message : 'Unknown error'}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const wordExportService = new WordExportService();
|
||||
115
backend/src/modules/asl/utils/reasoningParser.ts
Normal file
115
backend/src/modules/asl/utils/reasoningParser.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
/**
|
||||
* Reasoning Content 解析器
|
||||
*
|
||||
* 将 Unifuncs 返回的 reasoning_content 增量文本解析为结构化日志条目。
|
||||
*
|
||||
* 核心策略:按段落(\n\n)拆分,同一段落内的思考内容合并为一条日志,
|
||||
* 只有 搜索/阅读/分析 等动作才单独成条。
|
||||
*/
|
||||
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
export interface ExecutionLogEntry {
|
||||
type: 'thinking' | 'searching' | 'reading' | 'analyzing' | 'summary' | 'info';
|
||||
title: string;
|
||||
text: string;
|
||||
ts: string;
|
||||
}
|
||||
|
||||
const SEARCH_PATTERN = /(?:搜索|searching|search(?:ing)?\s+for|查找|检索|looking\s+for)[::\s]+(.+)/i;
|
||||
const READ_PATTERN = /(?:阅读|reading|read(?:ing)?|访问|打开|visiting|open(?:ing)?)\s*[::\s]*(https?:\/\/\S+|\S+\.(?:com|org|net|gov|cn)\S*)/i;
|
||||
const ANALYZE_PATTERN = /(?:分析|analyz|发现|总结|归纳|结论|found|result|finding|conclud|summariz)/i;
|
||||
|
||||
/**
|
||||
* 将增量 reasoning 文本解析为段落级日志条目。
|
||||
* 连续的思考行合并为一段,动作行(搜索/阅读/分析)独立成条。
|
||||
*/
|
||||
export function parseReasoningIncrement(
|
||||
newText: string,
|
||||
_previousLength: number
|
||||
): ExecutionLogEntry[] {
|
||||
if (!newText) return [];
|
||||
|
||||
const entries: ExecutionLogEntry[] = [];
|
||||
const now = new Date().toISOString();
|
||||
|
||||
const paragraphs = newText.split(/\n{2,}/);
|
||||
|
||||
for (const para of paragraphs) {
|
||||
const lines = para.split('\n').filter(l => l.trim());
|
||||
if (lines.length === 0) continue;
|
||||
|
||||
let thinkingBuf: string[] = [];
|
||||
|
||||
const flushThinking = () => {
|
||||
if (thinkingBuf.length === 0) return;
|
||||
const text = thinkingBuf.join('').slice(0, 800);
|
||||
if (text.length > 10) {
|
||||
entries.push({ type: 'thinking', title: '思考', text, ts: now });
|
||||
}
|
||||
thinkingBuf = [];
|
||||
};
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed) continue;
|
||||
|
||||
const searchMatch = trimmed.match(SEARCH_PATTERN);
|
||||
if (searchMatch) {
|
||||
flushThinking();
|
||||
entries.push({ type: 'searching', title: '搜索', text: searchMatch[1].trim(), ts: now });
|
||||
continue;
|
||||
}
|
||||
|
||||
const readMatch = trimmed.match(READ_PATTERN);
|
||||
if (readMatch) {
|
||||
flushThinking();
|
||||
entries.push({ type: 'reading', title: '阅读页面', text: readMatch[1].trim(), ts: now });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ANALYZE_PATTERN.test(trimmed) && trimmed.length > 20) {
|
||||
flushThinking();
|
||||
entries.push({ type: 'analyzing', title: '分析', text: trimmed.slice(0, 500), ts: now });
|
||||
continue;
|
||||
}
|
||||
|
||||
thinkingBuf.push(trimmed);
|
||||
}
|
||||
|
||||
flushThinking();
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* 合并连续的同类型(thinking)条目为一段。
|
||||
* 在 Worker 写入 DB 前调用,减少碎片化。
|
||||
*/
|
||||
export function mergeConsecutiveThinking(entries: ExecutionLogEntry[]): ExecutionLogEntry[] {
|
||||
if (entries.length <= 1) return entries;
|
||||
|
||||
const merged: ExecutionLogEntry[] = [];
|
||||
let current = { ...entries[0] };
|
||||
|
||||
for (let i = 1; i < entries.length; i++) {
|
||||
if (entries[i].type === 'thinking' && current.type === 'thinking') {
|
||||
current.text = (current.text + ' ' + entries[i].text).slice(0, 800);
|
||||
} else {
|
||||
merged.push(current);
|
||||
current = { ...entries[i] };
|
||||
}
|
||||
}
|
||||
merged.push(current);
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
||||
/**
|
||||
* 从完整的 reasoning_content 一次性提取摘要级日志
|
||||
*/
|
||||
export function parseFullReasoning(fullText: string): ExecutionLogEntry[] {
|
||||
if (!fullText) return [];
|
||||
return parseReasoningIncrement(fullText, 0);
|
||||
}
|
||||
113
backend/src/modules/asl/utils/resultParser.ts
Normal file
113
backend/src/modules/asl/utils/resultParser.ts
Normal file
@@ -0,0 +1,113 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 结果解析器
|
||||
*
|
||||
* 职责:
|
||||
* 1. 从 Unifuncs content 中切割出 synthesisReport + resultList
|
||||
* 2. safeParseJsonList: 4 层防崩溃 JSON 解析
|
||||
*/
|
||||
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
export interface LiteratureItem {
|
||||
title: string;
|
||||
authors?: string;
|
||||
journal?: string;
|
||||
year?: number | string;
|
||||
doi?: string;
|
||||
pmid?: string;
|
||||
url?: string;
|
||||
abstract?: string;
|
||||
studyType?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* 从 Unifuncs 返回的 content 中拆分报告和文献列表
|
||||
*/
|
||||
export function parseContent(content: string): {
|
||||
synthesisReport: string;
|
||||
resultList: LiteratureItem[] | null;
|
||||
} {
|
||||
if (!content) {
|
||||
return { synthesisReport: '', resultList: null };
|
||||
}
|
||||
|
||||
const jsonBlockMatch = content.match(/```json\s*([\s\S]*?)```/);
|
||||
|
||||
if (jsonBlockMatch) {
|
||||
const beforeJson = content.slice(0, content.indexOf('```json')).trim();
|
||||
const jsonRaw = jsonBlockMatch[1];
|
||||
|
||||
const resultList = safeParseJsonList(jsonRaw);
|
||||
|
||||
const afterJsonEnd = content.indexOf('```', content.indexOf('```json') + 7) + 3;
|
||||
const afterJson = content.slice(afterJsonEnd).trim();
|
||||
|
||||
const synthesisReport = (beforeJson + (afterJson ? '\n\n' + afterJson : '')).trim();
|
||||
|
||||
return { synthesisReport: synthesisReport || content, resultList };
|
||||
}
|
||||
|
||||
const links = extractPubMedLinks(content);
|
||||
if (links.length > 0) {
|
||||
const resultList: LiteratureItem[] = links.map(url => ({
|
||||
title: '',
|
||||
url,
|
||||
pmid: extractPmidFromUrl(url) || undefined,
|
||||
}));
|
||||
return { synthesisReport: content, resultList };
|
||||
}
|
||||
|
||||
return { synthesisReport: content, resultList: null };
|
||||
}
|
||||
|
||||
/**
|
||||
* 4 层防崩溃 JSON 解析
|
||||
*/
|
||||
export function safeParseJsonList(raw: string | null): LiteratureItem[] | null {
|
||||
if (!raw) return null;
|
||||
|
||||
let cleaned = raw.replace(/```json\s*/gi, '').replace(/```\s*/g, '');
|
||||
|
||||
cleaned = cleaned.replace(/,\s*([}\]])/g, '$1');
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(cleaned);
|
||||
return Array.isArray(parsed) ? parsed : [parsed];
|
||||
} catch {
|
||||
logger.warn('[resultParser] Standard JSON.parse failed, trying regex extraction');
|
||||
}
|
||||
|
||||
const objects: any[] = [];
|
||||
const regex = /\{[^{}]*\}/g;
|
||||
let match;
|
||||
while ((match = regex.exec(cleaned)) !== null) {
|
||||
try {
|
||||
objects.push(JSON.parse(match[0]));
|
||||
} catch {
|
||||
// skip unparseable fragment
|
||||
}
|
||||
}
|
||||
|
||||
if (objects.length > 0) {
|
||||
logger.info('[resultParser] Regex extraction recovered items', { count: objects.length });
|
||||
return objects;
|
||||
}
|
||||
|
||||
logger.warn('[resultParser] All parsing strategies failed');
|
||||
return null;
|
||||
}
|
||||
|
||||
function extractPubMedLinks(content: string): string[] {
|
||||
const linkSet = new Set<string>();
|
||||
const pattern = /https?:\/\/pubmed\.ncbi\.nlm\.nih\.gov\/(\d+)\/?/gi;
|
||||
let match;
|
||||
while ((match = pattern.exec(content)) !== null) {
|
||||
linkSet.add(`https://pubmed.ncbi.nlm.nih.gov/${match[1]}/`);
|
||||
}
|
||||
return Array.from(linkSet);
|
||||
}
|
||||
|
||||
function extractPmidFromUrl(url: string): string | null {
|
||||
const m = url.match(/pubmed\.ncbi\.nlm\.nih\.gov\/(\d+)/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
223
backend/src/modules/asl/workers/deepResearchV2Worker.ts
Normal file
223
backend/src/modules/asl/workers/deepResearchV2Worker.ts
Normal file
@@ -0,0 +1,223 @@
|
||||
/**
|
||||
* Deep Research V2.0 Worker — SSE 流式架构
|
||||
*
|
||||
* 核心流程:
|
||||
* 1. 从 DB 读取任务(含 confirmedRequirement + targetSources)
|
||||
* 2. 通过 SSE 流连接 Unifuncs OpenAI 兼容接口
|
||||
* 3. 实时读取 reasoning_content 增量 → 解析为 executionLogs → 每 2s 刷写 DB
|
||||
* 4. 读取 content 增量 → 累积
|
||||
* 5. 流结束后解析 content → synthesisReport + resultList
|
||||
* 6. 超时保护: 15 分钟
|
||||
*/
|
||||
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
import { streamDeepSearch } from '../services/unifuncsSseClient.js';
|
||||
import { parseReasoningIncrement, mergeConsecutiveThinking, type ExecutionLogEntry } from '../utils/reasoningParser.js';
|
||||
import { parseContent } from '../utils/resultParser.js';
|
||||
|
||||
const MAX_DURATION_MS = 15 * 60 * 1000;
|
||||
const LOG_FLUSH_INTERVAL_MS = 2000;
|
||||
|
||||
export async function processDeepResearchV2(job: { data: { taskId: string } }) {
|
||||
const { taskId } = job.data;
|
||||
|
||||
logger.info('[DeepResearchV2Worker] Starting', { taskId });
|
||||
|
||||
const task = await prisma.aslResearchTask.findUnique({
|
||||
where: { id: taskId },
|
||||
});
|
||||
|
||||
if (!task) {
|
||||
logger.error('[DeepResearchV2Worker] Task not found', { taskId });
|
||||
return;
|
||||
}
|
||||
|
||||
const searchQuery = task.confirmedRequirement || task.query;
|
||||
const targetSources = (task.targetSources as string[]) || ['https://pubmed.ncbi.nlm.nih.gov/'];
|
||||
|
||||
try {
|
||||
await prisma.aslResearchTask.update({
|
||||
where: { id: taskId },
|
||||
data: { status: 'running', executionLogs: [] as any },
|
||||
});
|
||||
|
||||
const hasChinese = targetSources.some(
|
||||
s => s.includes('cnki') || s.includes('medjournals')
|
||||
);
|
||||
const outputPrompt = buildOutputPrompt(hasChinese);
|
||||
|
||||
const allLogs: ExecutionLogEntry[] = [{
|
||||
type: 'info',
|
||||
title: '任务已提交',
|
||||
text: '正在连接深度检索引擎...',
|
||||
ts: new Date().toISOString(),
|
||||
}];
|
||||
await updateLogs(taskId, allLogs);
|
||||
|
||||
const startTime = Date.now();
|
||||
let reasoningAccumulated = '';
|
||||
let contentAccumulated = '';
|
||||
let lastFlushTime = Date.now();
|
||||
let previousReasoningLength = 0;
|
||||
let pendingLogEntries: ExecutionLogEntry[] = [];
|
||||
let contentStarted = false;
|
||||
|
||||
const stream = streamDeepSearch({
|
||||
query: searchQuery,
|
||||
domainScope: targetSources,
|
||||
maxDepth: 25,
|
||||
referenceStyle: 'link',
|
||||
outputPrompt,
|
||||
});
|
||||
|
||||
allLogs.push({
|
||||
type: 'info',
|
||||
title: '连接成功',
|
||||
text: 'AI 开始深度检索,正在分析检索策略...',
|
||||
ts: new Date().toISOString(),
|
||||
});
|
||||
await updateLogs(taskId, allLogs);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
if (Date.now() - startTime > MAX_DURATION_MS) {
|
||||
throw new Error('Task exceeded 15-minute timeout');
|
||||
}
|
||||
|
||||
if (chunk.type === 'reasoning') {
|
||||
reasoningAccumulated += chunk.text;
|
||||
|
||||
const newText = reasoningAccumulated.slice(previousReasoningLength);
|
||||
if (newText.length > 200) {
|
||||
const newEntries = parseReasoningIncrement(newText, previousReasoningLength);
|
||||
if (newEntries.length > 0) {
|
||||
pendingLogEntries.push(...newEntries);
|
||||
}
|
||||
previousReasoningLength = reasoningAccumulated.length;
|
||||
}
|
||||
|
||||
if (pendingLogEntries.length > 0 && Date.now() - lastFlushTime >= LOG_FLUSH_INTERVAL_MS) {
|
||||
const merged = mergeConsecutiveThinking(pendingLogEntries);
|
||||
allLogs.push(...merged);
|
||||
pendingLogEntries = [];
|
||||
await updateLogs(taskId, allLogs);
|
||||
lastFlushTime = Date.now();
|
||||
}
|
||||
} else if (chunk.type === 'content') {
|
||||
contentAccumulated += chunk.text;
|
||||
|
||||
if (pendingLogEntries.length > 0) {
|
||||
allLogs.push(...pendingLogEntries);
|
||||
pendingLogEntries = [];
|
||||
}
|
||||
|
||||
if (!contentStarted) {
|
||||
contentStarted = true;
|
||||
allLogs.push({
|
||||
type: 'summary',
|
||||
title: '思考完毕',
|
||||
text: '正在生成综合分析报告...',
|
||||
ts: new Date().toISOString(),
|
||||
});
|
||||
await updateLogs(taskId, allLogs);
|
||||
}
|
||||
} else if (chunk.type === 'done') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pendingLogEntries.length > 0) {
|
||||
const remaining = reasoningAccumulated.slice(previousReasoningLength);
|
||||
if (remaining.length > 0) {
|
||||
const lastEntries = parseReasoningIncrement(remaining, previousReasoningLength);
|
||||
pendingLogEntries.push(...lastEntries);
|
||||
}
|
||||
allLogs.push(...mergeConsecutiveThinking(pendingLogEntries));
|
||||
pendingLogEntries = [];
|
||||
}
|
||||
|
||||
allLogs.push({
|
||||
type: 'summary',
|
||||
title: '检索完成',
|
||||
text: 'AI 深度检索已完成,正在整理结果...',
|
||||
ts: new Date().toISOString(),
|
||||
});
|
||||
await updateLogs(taskId, allLogs);
|
||||
|
||||
const { synthesisReport, resultList } = parseContent(contentAccumulated);
|
||||
|
||||
await prisma.aslResearchTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: 'completed',
|
||||
rawResult: contentAccumulated,
|
||||
reasoningContent: reasoningAccumulated,
|
||||
synthesisReport,
|
||||
resultList: resultList as any,
|
||||
resultCount: resultList?.length || 0,
|
||||
executionLogs: allLogs as any,
|
||||
completedAt: new Date(),
|
||||
},
|
||||
});
|
||||
|
||||
logger.info('[DeepResearchV2Worker] Completed', {
|
||||
taskId,
|
||||
resultCount: resultList?.length || 0,
|
||||
reasoningLen: reasoningAccumulated.length,
|
||||
contentLen: contentAccumulated.length,
|
||||
duration: Date.now() - startTime,
|
||||
});
|
||||
|
||||
} catch (error: any) {
|
||||
logger.error('[DeepResearchV2Worker] Failed', {
|
||||
taskId,
|
||||
error: error.message,
|
||||
});
|
||||
|
||||
await prisma.aslResearchTask.update({
|
||||
where: { id: taskId },
|
||||
data: {
|
||||
status: 'failed',
|
||||
errorMessage: error.message,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function buildOutputPrompt(hasChinese: boolean): string {
|
||||
let prompt = `请按以下格式输出:
|
||||
|
||||
## 综合分析报告
|
||||
(对检索到的文献进行综合分析,包括研究现状、主要发现、研究趋势等。引用文献时请使用内联链接格式 [标题](url),不要使用脚注式引用。)
|
||||
|
||||
## 文献清单
|
||||
请以 JSON 数组格式输出所有检索到的文献元数据:
|
||||
\`\`\`json
|
||||
[
|
||||
{
|
||||
"title": "文献标题",
|
||||
"authors": "第一作者 et al.",
|
||||
"journal": "期刊名",
|
||||
"year": 2024,
|
||||
"doi": "10.xxxx/xxxxx",
|
||||
"pmid": "12345678",
|
||||
"url": "文献完整链接",
|
||||
"abstract": "摘要前200字...",
|
||||
"studyType": "RCT"
|
||||
}
|
||||
]
|
||||
\`\`\``;
|
||||
|
||||
if (hasChinese) {
|
||||
prompt += `\n\n重要提示:用户选择了中文数据库(CNKI/中华医学期刊网),请务必使用中文关键词检索中文文献,确保结果中包含中文文献。如有中文文献,请保留中文标题和作者信息,url 使用原始中文数据库链接。`;
|
||||
}
|
||||
|
||||
return prompt;
|
||||
}
|
||||
|
||||
async function updateLogs(taskId: string, logs: ExecutionLogEntry[]) {
|
||||
await prisma.aslResearchTask.update({
|
||||
where: { id: taskId },
|
||||
data: { executionLogs: logs as any },
|
||||
});
|
||||
}
|
||||
@@ -8,6 +8,7 @@
|
||||
import { jobQueue } from '../../../common/jobs/index.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
import { researchService } from '../services/researchService.js';
|
||||
import { processDeepResearchV2 } from './deepResearchV2Worker.js';
|
||||
import type { Job } from '../../../common/jobs/types.js';
|
||||
|
||||
/**
|
||||
@@ -79,4 +80,18 @@ export function registerResearchWorker() {
|
||||
});
|
||||
|
||||
logger.info('[ResearchWorker] ✅ Worker registered: asl_research_execute');
|
||||
|
||||
// ── Deep Research V2.0 Worker ──
|
||||
jobQueue.process<{ taskId: string }>('asl_deep_research_v2', async (job: Job<{ taskId: string }>) => {
|
||||
logger.info('[ResearchWorker] Starting V2 deep research', {
|
||||
jobId: job.id,
|
||||
taskId: job.data.taskId,
|
||||
});
|
||||
|
||||
await processDeepResearchV2(job);
|
||||
|
||||
return { success: true, taskId: job.data.taskId };
|
||||
});
|
||||
|
||||
logger.info('[ResearchWorker] ✅ Worker registered: asl_deep_research_v2');
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# AIclinicalresearch 系统当前状态与开发指南
|
||||
|
||||
> **文档版本:** v6.0
|
||||
> **文档版本:** v6.1
|
||||
> **创建日期:** 2025-11-28
|
||||
> **维护者:** 开发团队
|
||||
> **最后更新:** 2026-02-22
|
||||
> **最后更新:** 2026-02-23
|
||||
> **🎉 重大里程碑:**
|
||||
> - **🆕 2026-02-22:ASL Deep Research V2.0 开发计划确认 + Unifuncs API 网站覆盖测试完成!** 18站点实测,9个一级可用,ClinicalTrials.gov 专项验证通过
|
||||
> - **🆕 2026-02-23:ASL Deep Research V2.0 核心功能完成!** SSE 实时流 + 段落化思考 + 瀑布流 UI + Markdown 渲染 + 引用链接可见 + Word 导出 + 中文数据源
|
||||
> - **🆕 2026-02-22:SSA Phase I-IV 开发完成!** Session 黑板 + 对话层 LLM + 方法咨询 + 对话驱动分析,E2E 107/107 通过
|
||||
> - **2026-02-21:SSA QPER 智能化主线闭环完成!** Q→P→E→R 四层架构全部开发完成,端到端 40/40 测试通过
|
||||
> - **2026-02-20:SSA Phase 2A 前端集成完成!** 多步骤工作流端到端 + V11 UI联调 + Block-based 架构共识
|
||||
@@ -26,11 +26,12 @@
|
||||
> - **2026-01-24:Protocol Agent 框架完成!** 可复用Agent框架+5阶段对话流程
|
||||
> - **2026-01-22:OSS 存储集成完成!** 阿里云 OSS 正式接入平台基础层
|
||||
>
|
||||
> **🆕 最新进展(ASL V2.0 + SSA Phase I-IV 2026-02-22):**
|
||||
> - ✅ **🎉 ASL Deep Research V2.0 开发计划确认** — 四步瀑布流 + 异步模式 + HITL + 多站点搜索 + Word 导出
|
||||
> - ✅ **Unifuncs API 网站覆盖测试完成** — 18 站点实测,9 个一级可用(PubMed/ClinicalTrials/NCBI/Scholar/Cochrane/CNKI/SinoMed/GeenMedical/维普)
|
||||
> - ✅ **ClinicalTrials.gov 专项验证通过** — 英文查询 + max_depth≥10,120s 获取 38 个 NCT 编号链接
|
||||
> - ✅ **DeepSearch 通用能力指南发布** — `docs/02-通用能力层/04-DeepResearch引擎/`
|
||||
> **🆕 最新进展(ASL V2.0 核心完成 2026-02-23):**
|
||||
> - ✅ **🎉 ASL Deep Research V2.0 核心功能完成** — SSE 流式架构 + 瀑布流 UI + HITL + 5 精选数据源 + Word 导出
|
||||
> - ✅ **SSE 流式替代轮询** — 实时推送 AI 思考过程(reasoning_content),段落化日志聚合
|
||||
> - ✅ **Markdown 渲染 + 引用链接可见化** — react-markdown 正确渲染报告,`[6]` 后显示完整 URL
|
||||
> - ✅ **中文数据源专项测试** — CNKI/中华医学期刊网 domain_scope 有效,混合源建议分批搜索
|
||||
> - ✅ **DeepSearch 通用能力指南 v2.0** — `docs/02-通用能力层/04-DeepResearch引擎/`
|
||||
> - ✅ **🎉 SSA Phase I-IV 全部开发完成** — Session 黑板 + 意图路由器 + 对话层 LLM + 方法咨询 + AskUser 标准化 + 对话驱动分析 + QPER 集成
|
||||
> - ✅ **SSA E2E 测试全部通过** — Phase I 31/31 + Phase II 38/38 + Phase III 13/13 + Phase IV 25/25 = 共 107 项
|
||||
>
|
||||
@@ -70,7 +71,7 @@
|
||||
|---------|---------|---------|---------|---------|--------|
|
||||
| **AIA** | AI智能问答 | 12个智能体 + Protocol Agent(全流程方案) | ⭐⭐⭐⭐⭐ | 🎉 **V3.1 MVP完整交付(90%)** - 一键生成+Word导出 | **P0** |
|
||||
| **PKB** | 个人知识库 | RAG问答、私人文献库 | ⭐⭐⭐ | 🎉 **Dify已替换!自研RAG上线(95%)** | P1 |
|
||||
| **ASL** | AI智能文献 | 文献筛选、Deep Research、证据图谱 | ⭐⭐⭐⭐⭐ | 🚀 **V2.0 计划确认(65%)** - Unifuncs 18站实测 + 5天开发计划 | **P0** |
|
||||
| **ASL** | AI智能文献 | 文献筛选、Deep Research、证据图谱 | ⭐⭐⭐⭐⭐ | 🎉 **V2.0 核心完成(80%)** - SSE流式+瀑布流UI+HITL+Word导出+中文数据源 | **P0** |
|
||||
| **DC** | 数据清洗整理 | ETL + 医学NER(百万行级数据) | ⭐⭐⭐⭐⭐ | ✅ **Tool B完成 + Tool C 99%(异步架构+性能优化-99%+多指标转换+7大功能)** | **P0** |
|
||||
| **IIT** | IIT Manager Agent | AI驱动IIT研究助手 - 双脑架构+REDCap集成 | ⭐⭐⭐⭐⭐ | 🎉 **事件级质控V3.1完成(设计100%,代码60%)** | **P0** |
|
||||
| **SSA** | 智能统计分析 | **QPER架构** + 四层七工具 + 对话层LLM + 意图路由器 | ⭐⭐⭐⭐⭐ | 🎉 **Phase I-IV 开发完成** — QPER闭环 + Session黑板 + 意图路由 + 对话LLM + 方法咨询 + 对话驱动分析,E2E 107/107 | **P1** |
|
||||
@@ -158,9 +159,39 @@
|
||||
|
||||
---
|
||||
|
||||
## 🚀 当前开发状态(2026-02-22)
|
||||
## 🚀 当前开发状态(2026-02-23)
|
||||
|
||||
### 🎉 最新进展:SSA Phase I-IV 开发完成(2026-02-22)
|
||||
### 🎉 最新进展:ASL Deep Research V2.0 核心功能完成(2026-02-23)
|
||||
|
||||
#### ✅ ASL Deep Research V2.0 核心开发完成
|
||||
|
||||
**重大里程碑:从 V1.x 单一 PubMed SSE 搜索升级为多数据源、实时流式、段落化思考的完整深度检索系统!**
|
||||
|
||||
| 功能 | 技术实现 | 状态 |
|
||||
|------|---------|------|
|
||||
| SSE 流式架构 | unifuncsSseClient → OpenAI Compatible SSE,替代 create_task/query_task 轮询 | ✅ |
|
||||
| 段落化思考日志 | reasoningParser 按段落聚合 + mergeConsecutiveThinking,200+ 字符批量解析 | ✅ |
|
||||
| 引用链接可见化 | react-markdown 自定义 `<a>` 组件 + Word 端 expandReferenceLinks() | ✅ |
|
||||
| 瀑布流 UI | phase 0-4 渐进展开,已完成步骤折叠为摘要卡片 | ✅ |
|
||||
| LLM 需求扩写 | DeepSeek-V3 PICOS+MeSH 结构化扩写,Prompt 管理系统可配置 | ✅ |
|
||||
| HITL 策略确认 | 用户可编辑/保存/确认检索指令,单列布局 + PICOS 摘要卡片 | ✅ |
|
||||
| Markdown 报告渲染 | react-markdown + remark-gfm,正确渲染标题/链接/列表/加粗 | ✅ |
|
||||
| Word 导出 | Pandoc 微服务,文献标题内嵌超链接,引用展开为可见 URL | ✅ |
|
||||
| 中文数据源 | CNKI/中华医学期刊网动态 prompt + 专项测试脚本验证 | ✅ |
|
||||
| 5 精选数据源 | PubMed/ClinicalTrials.gov/Cochrane/CNKI/中华医学期刊网 | ✅ |
|
||||
| 数据库扩展 | 6 个新字段(targetSources/confirmedRequirement/aiIntentSummary/executionLogs/synthesisReport/resultList) | ✅ |
|
||||
|
||||
**关键技术决策**:
|
||||
- ✅ **SSE 替代轮询**:解决"等很久才一股脑显示"问题,reasoning_content 实时推送
|
||||
- ✅ **段落化日志**:从逐行碎片到连贯段落,思考过程可读性大幅提升
|
||||
- ✅ **引用链接可见**:`[6]` 后追加完整 URL,方便复制分享(Web+Word 双端)
|
||||
|
||||
**相关文档**:
|
||||
- 模块状态:`docs/03-业务模块/ASL-AI智能文献/00-模块当前状态与开发指南.md`
|
||||
- 开发计划:`docs/03-业务模块/ASL-AI智能文献/04-开发计划/07-Deep Research V2.0 开发计划.md`
|
||||
- API 指南:`docs/02-通用能力层/04-DeepResearch引擎/01-Unifuncs DeepSearch API 使用指南.md`
|
||||
|
||||
### 🎉 SSA Phase I-IV 开发完成(2026-02-22)
|
||||
|
||||
#### ✅ SSA 智能对话与工具体系 Phase I-IV 全部完成(2026-02-22)
|
||||
|
||||
@@ -1462,7 +1493,7 @@ npm run dev # http://localhost:3000
|
||||
|
||||
### 模块完成度
|
||||
- ✅ **已完成**:AIA V2.0(85%,核心功能完成)、平台基础层(100%)、RVW(95%)、通用能力层升级(100%)、**PKB(95%,Dify已替换)** 🎉
|
||||
- 🚧 **开发中**:**ASL(65%,V2.0 计划确认 + 18站Unifuncs测试完成)**、DC(Tool C 98%,Tool B后端100%,Tool B前端0%)、IIT(60%,Phase 1.5完成)、**SSA(QPER主线100% + Phase I-IV 全部完成,E2E 107/107,Phase VI 待启动)** 🎉
|
||||
- 🚧 **开发中**:**ASL(80%,🎉 V2.0 核心功能完成:SSE流式+瀑布流UI+HITL+Word导出+中文数据源)**、DC(Tool C 98%,Tool B后端100%,Tool B前端0%)、IIT(60%,Phase 1.5完成)、**SSA(QPER主线100% + Phase I-IV 全部完成,E2E 107/107,Phase VI 待启动)** 🎉
|
||||
- 📋 **未开始**:ST
|
||||
|
||||
### 部署完成度
|
||||
@@ -1612,9 +1643,9 @@ if (items.length >= 50) {
|
||||
|
||||
---
|
||||
|
||||
**文档版本**:v5.9
|
||||
**最后更新**:2026-02-22
|
||||
**本次更新**:SSA Phase I-IV 全部开发完成,E2E 107/107 通过,开发计划 v1.8
|
||||
**文档版本**:v6.1
|
||||
**最后更新**:2026-02-23
|
||||
**本次更新**:ASL Deep Research V2.0 核心功能完成(SSE流式+段落化思考+引用链接可见化+瀑布流UI+Word导出+中文数据源)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
# Unifuncs DeepSearch API 使用指南
|
||||
|
||||
> **文档版本:** v1.0
|
||||
> **文档版本:** v2.0
|
||||
> **创建日期:** 2026-02-22
|
||||
> **最后更新:** 2026-02-23
|
||||
> **维护者:** 开发团队
|
||||
> **文档目的:** 指导业务模块正确使用 Unifuncs DeepSearch API,明确可用网站与最佳策略
|
||||
> **文档目的:** 指导业务模块正确使用 Unifuncs DeepSearch API,明确可用网站、调用模式与最佳策略
|
||||
|
||||
---
|
||||
|
||||
## 1. 概述
|
||||
|
||||
Unifuncs DeepSearch 是一个 AI 驱动的深度搜索引擎,可以在指定的网站范围内自动搜索、阅读和汇总信息。在本平台中,它作为**通用能力层**的一部分,为文献检索、临床试验查找等场景提供底层搜索能力。
|
||||
Unifuncs DeepSearch 是一个 AI 驱动的深度搜索引擎,可以在指定的网站范围内自动搜索、阅读和汇总信息。在本平台中,它作为通用能力层的一部分,为文献检索、临床试验查找等场景提供底层搜索能力。
|
||||
|
||||
### 核心能力
|
||||
- 自然语言输入 → AI 自动生成搜索策略
|
||||
- 多轮迭代搜索(最大深度可配置)
|
||||
- 自动阅读网页内容并提取关键信息
|
||||
- 返回结构化结果 + 综合报告
|
||||
- 支持 SSE 实时流式 + 异步轮询两种调用模式
|
||||
|
||||
### API 基础信息
|
||||
|
||||
@@ -28,108 +30,92 @@ Unifuncs DeepSearch 是一个 AI 驱动的深度搜索引擎,可以在指定
|
||||
|
||||
---
|
||||
|
||||
## 2. 网站覆盖能力(2026-02-22 实测)
|
||||
## 2. 两种调用模式
|
||||
|
||||
### 2.1 测试条件
|
||||
### 2.1 SSE 流式模式(V2.0 采用,推荐)
|
||||
|
||||
- **查询**:他汀类药物预防心血管疾病的随机对照试验和Meta分析,近5年高质量研究
|
||||
- **配置**:max_depth=5,异步模式(create_task + query_task)
|
||||
- **ClinicalTrials.gov 专项**:4 种策略对比测试,max_depth=5~15
|
||||
|
||||
### 2.2 可用性分级
|
||||
|
||||
#### 一级:确认可搜索(返回站内直接链接)
|
||||
|
||||
| 站点 | 域名 | 类型 | 站内链接数 | 搜索/阅读 | 最佳策略 |
|
||||
|------|------|------|-----------|-----------|---------|
|
||||
| **PubMed** | pubmed.ncbi.nlm.nih.gov | 英文 | 28 | 9/29 | 中/英文查询均可,效果最佳 |
|
||||
| **NCBI/PMC** | www.ncbi.nlm.nih.gov | 英文 | 18 | 24/19 | 含 PMC 全文链接 |
|
||||
| **ClinicalTrials.gov** | clinicaltrials.gov | 英文 | 38 | 6/24 | **必须英文查询**,max_depth≥10 |
|
||||
| **Google Scholar** | scholar.google.com | 英文 | 10 | 22/26 | 跨库聚合搜索 |
|
||||
| **CBM/SinoMed** | www.sinomed.ac.cn | 中文 | 9 | 17/12 | 中文生物医学文献数据库 |
|
||||
| **中国知网 CNKI** | www.cnki.net | 中文 | 7 | 40/6 | 中文核心期刊 |
|
||||
| **GeenMedical** | www.geenmedical.com | 英文 | 5 | 38/3 | 医学搜索聚合引擎 |
|
||||
| **Cochrane Library** | www.cochranelibrary.com | 英文 | 4 | 38/12 | 系统综述金标准 |
|
||||
| **维普 VIP** | www.cqvip.com | 中文 | 1 | 33/3 | 可用但链接较少 |
|
||||
|
||||
#### 二级:可到达但链接间接(搜索到内容,但返回链接不指向该站点域名)
|
||||
|
||||
| 站点 | 域名 | 类型 | 其他链接数 | 说明 |
|
||||
|------|------|------|-----------|------|
|
||||
| 中华医学期刊网 | medjournals.cn | 中文 | 12 | 搜索活跃(41次),内容丰富但链接跳转 |
|
||||
| 万方数据 | www.wanfangdata.com.cn | 中文 | 7 | 搜索活跃(42次),链接可能转跳 |
|
||||
| 中国临床试验注册中心 | www.chictr.org.cn | 中文 | 7 | 有内容产出,链接指向其他站 |
|
||||
| 中国中医药数据库 | cintmed.cintcm.cn | 中文 | 22 | 内容最丰富(8631字),链接非直达 |
|
||||
| Scopus | www.scopus.com | 英文 | 15 | 付费墙限制,内容来自外部引用 |
|
||||
| Embase | www.embase.com | 英文 | 14 | 需机构登录 |
|
||||
| Web of Science | www.webofscience.com | 英文 | 6 | 付费墙限制 |
|
||||
|
||||
#### 三级:不可用或受限
|
||||
|
||||
| 站点 | 域名 | 说明 |
|
||||
|------|------|------|
|
||||
| Ovid | ovidsp.ovid.com | 仅搜索未读取内容,需机构登录 |
|
||||
| NSTL | www.nstl.gov.cn | 搜索到但无有效内容和链接 |
|
||||
|
||||
### 2.3 关键发现
|
||||
|
||||
1. **付费库无法穿透**:Unifuncs 只能访问公开可达的网页内容,不支持传入用户名密码。Web of Science、Embase、Scopus、Ovid 等需要机构 IP 或账号登录的库无法直接搜索。
|
||||
|
||||
2. **ClinicalTrials.gov 必须用英文**:该站点为纯英文网站,中文查询效率极低。使用英文查询 + max_depth≥10 时,可稳定返回 30+ 个 NCT 编号和链接。
|
||||
|
||||
3. **中文库表现不一**:CNKI 和 SinoMed 效果较好,能直接返回站内链接;万方和中华医学期刊网可到达但链接不直达。
|
||||
|
||||
---
|
||||
|
||||
## 3. 两种调用模式
|
||||
|
||||
### 3.1 OpenAI 兼容协议(流式,适合实时展示)
|
||||
通过 OpenAI 兼容协议的 `POST /chat/completions` 端点,设置 `stream: true`,以 SSE 事件流实时返回 AI 思考过程和最终结果。
|
||||
|
||||
```typescript
|
||||
import OpenAI from 'openai';
|
||||
|
||||
const client = new OpenAI({
|
||||
baseURL: 'https://api.unifuncs.com/deepsearch/v1',
|
||||
apiKey: process.env.UNIFUNCS_API_KEY,
|
||||
});
|
||||
|
||||
const stream = await client.chat.completions.create({
|
||||
const response = await fetch('https://api.unifuncs.com/deepsearch/v1/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${UNIFUNCS_API_KEY}`,
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'text/event-stream',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 's2',
|
||||
messages: [{ role: 'user', content: query }],
|
||||
stream: true,
|
||||
introduction: '你是一名专业的临床研究文献检索专家',
|
||||
max_depth: 15,
|
||||
max_depth: 25,
|
||||
domain_scope: ['https://pubmed.ncbi.nlm.nih.gov/'],
|
||||
domain_blacklist: [],
|
||||
reference_style: 'link',
|
||||
} as any);
|
||||
output_prompt: '请输出结构化报告和文献列表',
|
||||
}),
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const delta = chunk.choices[0]?.delta;
|
||||
if ((delta as any)?.reasoning_content) {
|
||||
// AI 思考过程(逐字流式)
|
||||
// 解析 SSE 事件流
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let buffer = '';
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed.startsWith('data: ')) {
|
||||
const data = trimmed.slice(6);
|
||||
if (data === '[DONE]') return;
|
||||
const json = JSON.parse(data);
|
||||
const delta = json.choices?.[0]?.delta;
|
||||
if (delta?.reasoning_content) {
|
||||
// AI 思考过程(逐字流式,实时展示)
|
||||
}
|
||||
if (delta?.content) {
|
||||
// 最终结果内容(逐字流式)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**优点:** 实时展示 AI 思考过程,用户体验好
|
||||
**缺点:** 连接不稳定,离开页面任务丢失,长任务容易超时
|
||||
**SSE 流的数据特征(2026-02-23 实测):**
|
||||
|
||||
### 3.2 异步模式(推荐用于 V2.0)
|
||||
| 指标 | 典型值 |
|
||||
|------|--------|
|
||||
| reasoning chunks | 2000~4000+ 个(实时逐条返回) |
|
||||
| content chunks | 500~1600+ 个 |
|
||||
| reasoning 总字符数 | 10000~13000 字符 |
|
||||
| content 总字符数 | 8000~14000 字符 |
|
||||
| 总耗时 | 4~6 分钟(max_depth=25) |
|
||||
|
||||
#### 创建任务
|
||||
**优点:**
|
||||
- reasoning_content 实时逐 chunk 返回,可立即展示 AI 思考过程
|
||||
- 用户体验优秀:搜索、阅读、分析过程实时可见
|
||||
- 无需轮询,数据推送式
|
||||
|
||||
**缺点:**
|
||||
- 长连接可能因网络中断而失败
|
||||
- 不返回 statistics(iterations、search_count 等)
|
||||
|
||||
### 2.2 异步模式(V1.x 曾使用,仅用于备选)
|
||||
|
||||
#### 创建任务 POST /v1/create_task
|
||||
|
||||
```typescript
|
||||
const payload = {
|
||||
model: 's2',
|
||||
messages: [{ role: 'user', content: query }],
|
||||
introduction: '你是一名专业的临床研究文献检索专家',
|
||||
max_depth: 15,
|
||||
max_depth: 25,
|
||||
domain_scope: ['https://pubmed.ncbi.nlm.nih.gov/'],
|
||||
domain_blacklist: [],
|
||||
reference_style: 'link',
|
||||
generate_summary: true,
|
||||
output_prompt: '请输出结构化报告和文献列表',
|
||||
@@ -148,7 +134,7 @@ const { data } = await res.json();
|
||||
// data.task_id → 保存到数据库
|
||||
```
|
||||
|
||||
#### 轮询任务
|
||||
#### 轮询任务 GET /v1/query_task
|
||||
|
||||
```typescript
|
||||
const params = new URLSearchParams({ task_id: taskId });
|
||||
@@ -158,15 +144,110 @@ const res = await fetch(
|
||||
);
|
||||
|
||||
const { data } = await res.json();
|
||||
// data.status: pending / processing / completed / failed
|
||||
// data.status: pending / running / completed / failed
|
||||
// data.result.content: 最终结果
|
||||
// data.result.reasoning_content: AI 思考过程(增量)
|
||||
// data.result.reasoning_content: AI 思考过程(仅在接近完成时才有数据)
|
||||
// data.progress: { current, total, message }
|
||||
// data.statistics: { iterations, search_count, read_count, token_usage }
|
||||
```
|
||||
|
||||
**优点:** 任务持久化,离开页面不中断,可恢复,适合长任务
|
||||
**缺点:** 非实时,需要轮询获取进度
|
||||
**优点:** 任务持久化,离开页面不中断,返回 statistics
|
||||
**缺点:** reasoning_content 不实时 — 实测中在 12 分钟内仅返回 25%/50%/90% 进度,最后才一次性返回全部思考内容,用户体验差
|
||||
|
||||
### 2.3 模式选择建议
|
||||
|
||||
| 场景 | 推荐模式 | 原因 |
|
||||
|------|---------|------|
|
||||
| 用户在线等待、需要实时反馈 | SSE 流式 | reasoning 实时展示 |
|
||||
| 后台批量任务、无人值守 | 异步 | 任务持久化、可恢复 |
|
||||
| V2.0 Deep Research | SSE 流式 | Worker 消费 SSE 流 + DB 持久化 |
|
||||
|
||||
---
|
||||
|
||||
## 3. 网站覆盖能力
|
||||
|
||||
### 3.1 V2.0 精选数据源(前端可选)
|
||||
|
||||
| 站点 | 域名 | 类型 | 默认 | 搜索效果 |
|
||||
|------|------|------|------|----------|
|
||||
| PubMed | pubmed.ncbi.nlm.nih.gov | 英文 | 默认勾选 | 效果最佳,核心数据源 |
|
||||
| ClinicalTrials.gov | clinicaltrials.gov | 英文 | 可选 | 英文查询效果好 |
|
||||
| Cochrane Library | www.cochranelibrary.com | 英文 | 可选 | 系统综述金标准 |
|
||||
| 中国知网 CNKI | www.cnki.net | 中文 | 可选 | 中文核心期刊 |
|
||||
| 中华医学期刊网 | medjournals.cn | 中文 | 可选 | 中文医学期刊,效果优秀 |
|
||||
|
||||
### 3.2 中文数据源专项测试(2026-02-23 实测)
|
||||
|
||||
测试条件:SSE 流式模式,max_depth=10,domain_scope 仅包含 CNKI + 中华医学期刊网
|
||||
|
||||
**测试 1:2型糖尿病患者SGLT2抑制剂的肾脏保护作用**
|
||||
|
||||
| 指标 | 结果 |
|
||||
|------|------|
|
||||
| 耗时 | 268.3s |
|
||||
| reasoning chunks | 4071 个(12555 字符) |
|
||||
| content chunks | 1671 个(8055 字符) |
|
||||
| 中文字符数 | 2398 |
|
||||
| 中华医学期刊网链接 | 46 个 |
|
||||
| CNKI 链接 | 有 |
|
||||
| PubMed 链接 | 0 个 |
|
||||
|
||||
**测试 2:非小细胞肺癌免疫治疗的中国临床研究进展**
|
||||
|
||||
| 指标 | 结果 |
|
||||
|------|------|
|
||||
| 耗时 | 316.4s |
|
||||
| reasoning chunks | 2675 个(9985 字符) |
|
||||
| content chunks | 3151 个(14291 字符) |
|
||||
| 中文字符数 | 3536 |
|
||||
| 中华医学期刊网链接 | 94 个 |
|
||||
| PubMed 链接 | 0 个 |
|
||||
|
||||
**测试 3:他汀类药物一级预防(混合源:PubMed + CNKI + 中华医学期刊网)**
|
||||
|
||||
| 指标 | 结果 |
|
||||
|------|------|
|
||||
| 中文字符数 | 3676 |
|
||||
| PubMed 链接 | 96 个 |
|
||||
| 中文源链接 | 0 个 |
|
||||
|
||||
### 3.3 关键发现
|
||||
|
||||
1. 纯中文 domain_scope 效果好:当仅限定 CNKI + 中华医学期刊网时,API 能有效检索中文文献,返回的链接全部指向中文站点。中华医学期刊网效果尤其突出。
|
||||
|
||||
2. 混合模式存在偏向性:同时包含 PubMed + 中文源时,AI 明显倾向于 PubMed,中文文献被忽略。如果用户需要中文文献,建议仅选择中文数据库,或在 Prompt 中强调中文检索需求。
|
||||
|
||||
3. 付费库无法穿透:Unifuncs 只能访问公开可达的网页内容,不支持传入用户名密码。Web of Science、Embase、Scopus 等需要机构登录的库无法直接搜索。
|
||||
|
||||
4. SSE 模式实时性优于异步模式:SSE 流的 reasoning_content 真正逐 chunk 实时返回,而异步模式的 query_task 轮询中 reasoning_content 直到接近完成才有数据。
|
||||
|
||||
### 3.4 全站覆盖测试(2026-02-22 实测,异步模式)
|
||||
|
||||
#### 一级:确认可搜索(返回站内直接链接)
|
||||
|
||||
| 站点 | 域名 | 类型 | 站内链接数 | 搜索/阅读 | 最佳策略 |
|
||||
|------|------|------|-----------|-----------|---------|
|
||||
| PubMed | pubmed.ncbi.nlm.nih.gov | 英文 | 28 | 9/29 | 中/英文查询均可 |
|
||||
| NCBI/PMC | www.ncbi.nlm.nih.gov | 英文 | 18 | 24/19 | 含 PMC 全文链接 |
|
||||
| ClinicalTrials.gov | clinicaltrials.gov | 英文 | 38 | 6/24 | 英文查询,max_depth≥10 |
|
||||
| Google Scholar | scholar.google.com | 英文 | 10 | 22/26 | 跨库聚合搜索 |
|
||||
| CBM/SinoMed | www.sinomed.ac.cn | 中文 | 9 | 17/12 | 中文生物医学文献 |
|
||||
| 中国知网 CNKI | www.cnki.net | 中文 | 7 | 40/6 | 中文核心期刊 |
|
||||
| GeenMedical | www.geenmedical.com | 英文 | 5 | 38/3 | 医学搜索聚合引擎 |
|
||||
| Cochrane Library | www.cochranelibrary.com | 英文 | 4 | 38/12 | 系统综述金标准 |
|
||||
| 维普 VIP | www.cqvip.com | 中文 | 1 | 33/3 | 可用但链接较少 |
|
||||
|
||||
#### 二级:可到达但链接间接
|
||||
|
||||
| 站点 | 域名 | 类型 | 其他链接数 | 说明 |
|
||||
|------|------|------|-----------|------|
|
||||
| 中华医学期刊网 | medjournals.cn | 中文 | 12 | 搜索活跃,专项测试效果优秀 |
|
||||
| 万方数据 | www.wanfangdata.com.cn | 中文 | 7 | 搜索活跃,链接可能转跳 |
|
||||
| 中国临床试验注册中心 | www.chictr.org.cn | 中文 | 7 | 有内容产出 |
|
||||
| 中国中医药数据库 | cintmed.cintcm.cn | 中文 | 22 | 内容丰富 |
|
||||
| Scopus | www.scopus.com | 英文 | 15 | 付费墙限制 |
|
||||
| Embase | www.embase.com | 英文 | 14 | 需机构登录 |
|
||||
| Web of Science | www.webofscience.com | 英文 | 6 | 付费墙限制 |
|
||||
|
||||
---
|
||||
|
||||
@@ -175,92 +256,76 @@ const { data } = await res.json();
|
||||
| 参数 | 类型 | 推荐值 | 说明 |
|
||||
|------|------|--------|------|
|
||||
| `model` | string | `"s2"` | 固定值 |
|
||||
| `max_depth` | number | 10~25 | 搜索深度。测试用 5,生产用 15~25。越大越全但越慢 |
|
||||
| `domain_scope` | string[] | 按需配置 | 限定搜索范围。留空则不限 |
|
||||
| `stream` | boolean | `true` | SSE 流式模式 |
|
||||
| `max_depth` | number | 10~25 | 搜索深度。测试用 10,生产用 25 |
|
||||
| `domain_scope` | string[] | 按需配置 | 限定搜索范围,留空则不限 |
|
||||
| `domain_blacklist` | string[] | `[]` | 排除特定站点 |
|
||||
| `introduction` | string | 见下方 | 设定 AI 角色和搜索指导 |
|
||||
| `reference_style` | string | `"link"` | 引用格式,`link` 或 `character` |
|
||||
| `output_prompt` | string | 可选 | 自定义输出格式提示词 |
|
||||
| `generate_summary` | boolean | `true` | 异步模式完成后自动生成摘要 |
|
||||
| `generate_summary` | boolean | `true` | 仅异步模式有效 |
|
||||
|
||||
### 推荐的 introduction 模板
|
||||
|
||||
```
|
||||
你是一名专业的临床研究文献检索专家。
|
||||
请根据用户的研究需求,在指定数据库中系统性地检索相关文献。
|
||||
|
||||
检索要求:
|
||||
1. 优先检索高质量研究:系统综述、Meta分析、RCT
|
||||
2. 关注 PICOS 要素(人群、干预、对照、结局、研究设计)
|
||||
3. 优先近 5 年的研究
|
||||
4. 返回每篇文献的完整元数据(标题、作者、期刊、年份、链接)
|
||||
|
||||
输出要求:
|
||||
1. 按研究类型分组
|
||||
2. 每篇文献附带直接链接
|
||||
3. 最后给出综合性研究概述
|
||||
你是一名专业的临床研究文献检索专家,擅长从多个学术数据库中检索高质量的医学文献。
|
||||
请根据用户的检索需求,系统性地搜索并返回相关文献的详细信息。
|
||||
```
|
||||
|
||||
### domain_scope 使用策略
|
||||
|
||||
| 场景 | domain_scope 配置 | 说明 |
|
||||
|------|-------------------|------|
|
||||
| 英文文献检索 | `["https://pubmed.ncbi.nlm.nih.gov/"]` | 核心数据源 |
|
||||
| 中文文献检索 | `["https://www.cnki.net/", "https://medjournals.cn/"]` | 仅中文源,避免被英文库稀释 |
|
||||
| 临床试验检索 | `["https://clinicaltrials.gov/"]` | 必须英文查询 |
|
||||
| 系统综述 | `["https://www.cochranelibrary.com/"]` | Cochrane 专用 |
|
||||
| 全面检索 | `["https://pubmed.ncbi.nlm.nih.gov/", "https://www.cochranelibrary.com/"]` | 多英文源组合 |
|
||||
|
||||
注意:不建议中英文数据源混合,测试表明混合时 AI 偏向英文源,中文文献会被忽略。如需同时检索中英文,建议分两次独立搜索。
|
||||
|
||||
---
|
||||
|
||||
## 5. 最佳策略指南
|
||||
|
||||
### 5.1 针对不同站点的策略
|
||||
|
||||
| 目标站点 | 查询语言 | max_depth | 特殊说明 |
|
||||
|---------|---------|-----------|---------|
|
||||
| PubMed / NCBI | 中文或英文均可 | 15~25 | 效果最好,核心数据源 |
|
||||
| ClinicalTrials.gov | **必须英文** | 10~15 | 中文查询极慢甚至超时 |
|
||||
| Cochrane Library | 英文优先 | 10~15 | 系统综述专用 |
|
||||
| Google Scholar | 中文或英文 | 10~15 | 跨库聚合,可能有重复 |
|
||||
| CNKI / SinoMed | 中文 | 10~15 | 中文文献首选 |
|
||||
| GeenMedical | 英文优先 | 5~10 | 聚合搜索,速度快 |
|
||||
|
||||
### 5.2 多站点组合搜索
|
||||
|
||||
```typescript
|
||||
// V2.0 推荐:用户选择多个数据源,合并到 domain_scope
|
||||
const domainScope = [
|
||||
'https://pubmed.ncbi.nlm.nih.gov/',
|
||||
'https://www.cochranelibrary.com/',
|
||||
'https://scholar.google.com/',
|
||||
];
|
||||
|
||||
// 如果包含 ClinicalTrials.gov,需求扩写时自动翻译为英文
|
||||
```
|
||||
|
||||
### 5.3 性能预期
|
||||
## 5. 性能预期
|
||||
|
||||
| max_depth | 预计耗时 | 搜索/阅读量 | 适用场景 |
|
||||
|-----------|---------|------------|---------|
|
||||
| 5 | 1~3 分钟 | 10~40 / 0~20 | 快速探索 |
|
||||
| 10 | 2~5 分钟 | 20~50 / 10~30 | 常规检索 |
|
||||
| 10 | 2~5 分钟 | 20~50 / 10~30 | 常规检索、中文源测试 |
|
||||
| 15 | 3~8 分钟 | 30~80 / 20~50 | 深度检索 |
|
||||
| 25 | 5~15 分钟 | 50~150 / 30~80 | 全面研究 |
|
||||
| 25 | 5~15 分钟 | 50~150 / 30~80 | 全面研究(V2.0 生产配置) |
|
||||
|
||||
### 5.4 成本估算
|
||||
### 成本估算
|
||||
|
||||
- 单次搜索 Token 消耗:5万~30万 tokens(取决于深度和站点数量)
|
||||
- 估算成本:约 ¥0.1~0.5/次(按 unifuncs 定价)
|
||||
|
||||
---
|
||||
|
||||
## 6. 平台集成方式
|
||||
## 6. 平台集成架构
|
||||
|
||||
### 当前使用(V1.x - ASL 模块)
|
||||
### V2.0 SSE + 异步 Worker 混合架构(当前)
|
||||
|
||||
```
|
||||
researchService.ts → OpenAI SDK → SSE 流式
|
||||
researchWorker.ts → pg-boss → 异步执行
|
||||
前端 DeepResearchPage
|
||||
→ API: PUT /research/tasks/:taskId/execute
|
||||
→ pg-boss Job Queue: asl_deep_research_v2
|
||||
→ deepResearchV2Worker.ts
|
||||
→ unifuncsSseClient.ts (SSE 流式)
|
||||
→ 实时解析 reasoning_content → executionLogs → 写入 DB(每2s刷写)
|
||||
→ 累积 content → 完成后解析为 synthesisReport + resultList
|
||||
→ 前端 3s 轮询 DB → AgentTerminal 实时展示日志
|
||||
```
|
||||
|
||||
### 计划升级(V2.0 - ASL Deep Research)
|
||||
### 核心文件
|
||||
|
||||
```
|
||||
requirementExpansionService.ts → DeepSeek-V3 需求扩写
|
||||
unifuncsAsyncClient.ts → create_task / query_task 异步模式
|
||||
deepResearchV2Worker.ts → pg-boss Worker → 轮询 + 日志解析
|
||||
```
|
||||
| 文件 | 说明 |
|
||||
|------|------|
|
||||
| `unifuncsSseClient.ts` | SSE 流式客户端,AsyncGenerator 逐 chunk 返回 |
|
||||
| `unifuncsAsyncClient.ts` | 异步客户端(备选),create_task / query_task |
|
||||
| `deepResearchV2Worker.ts` | pg-boss Worker,消费 SSE 流并实时写 DB |
|
||||
| `reasoningParser.ts` | 将 reasoning_content 文本解析为结构化日志 |
|
||||
| `resultParser.ts` | 将 content 解析为 synthesisReport + resultList |
|
||||
|
||||
### 其他模块可复用场景
|
||||
|
||||
@@ -275,21 +340,28 @@ deepResearchV2Worker.ts → pg-boss Worker → 轮询 + 日志解析
|
||||
|
||||
## 7. 测试脚本
|
||||
|
||||
项目中已提供两个测试脚本:
|
||||
|
||||
| 脚本 | 路径 | 用途 |
|
||||
|------|------|------|
|
||||
| 中文数据源专项测试 | `backend/src/modules/asl/__tests__/deep-research-chinese-sources.ts` | SSE 流式模式测试 CNKI + 中华医学期刊网 |
|
||||
| 全站覆盖测试 | `backend/scripts/test-unifuncs-site-coverage.ts` | 并行测试 18 个医学网站的搜索能力 |
|
||||
| ClinicalTrials 专项 | `backend/scripts/test-unifuncs-clinicaltrials.ts` | 4 种策略对比测试 ClinicalTrials.gov |
|
||||
| 快速验证 | `backend/scripts/test-unifuncs-deepsearch.ts` | 单站点 SSE 流式快速测试 |
|
||||
| E2E 集成测试 | `backend/src/modules/asl/__tests__/deep-research-v2-e2e.ts` | V2.0 全流程端到端测试 |
|
||||
| Smoke 测试 | `backend/src/modules/asl/__tests__/deep-research-v2-smoke.ts` | V2.0 API 连通性快速验证 |
|
||||
|
||||
```bash
|
||||
# 中文数据源专项测试(SSE 流式,约 10~15 分钟)
|
||||
cd backend
|
||||
npx tsx src/modules/asl/__tests__/deep-research-chinese-sources.ts
|
||||
|
||||
# 全站覆盖测试
|
||||
npx tsx scripts/test-unifuncs-site-coverage.ts
|
||||
npx tsx scripts/test-unifuncs-clinicaltrials.ts
|
||||
|
||||
# E2E 集成测试(需先启动后端服务)
|
||||
npx tsx src/modules/asl/__tests__/deep-research-v2-e2e.ts
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**维护者:** 开发团队
|
||||
**最后更新:** 2026-02-22
|
||||
**最后更新:** 2026-02-23
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
# AI智能文献模块 - 当前状态与开发指南
|
||||
|
||||
> **文档版本:** v1.6
|
||||
> **文档版本:** v2.0
|
||||
> **创建日期:** 2025-11-21
|
||||
> **维护者:** AI智能文献开发团队
|
||||
> **最后更新:** 2026-02-22 🆕 **Deep Research V2.0 开发计划确认 + Unifuncs API 网站覆盖测试完成**
|
||||
> **最后更新:** 2026-02-23 🆕 **Deep Research V2.0 核心功能开发完成!SSE 实时流 + 瀑布流 UI + 中文数据源 + Word 导出**
|
||||
> **重大进展:**
|
||||
> - 🆕 2026-02-22:V2.0 开发计划确认!四步瀑布流 + 异步模式 + HITL 需求确认 + 务实结果展示
|
||||
> - 🆕 2026-02-22:Unifuncs API 网站覆盖测试完成!18 站点实测,9 个一级可用,ClinicalTrials.gov 专项验证通过
|
||||
> - 🆕 2026-02-23:V2.0 核心功能完成!SSE 流式架构 + 段落化思考日志 + 引用链接可见化
|
||||
> - 🆕 2026-02-22:V2.0 前后端联调完成!瀑布流 UI + Markdown 渲染 + Word 导出 + 中文数据源测试
|
||||
> - 🆕 2026-02-22:V2.0 开发计划确认 + Unifuncs API 网站覆盖测试完成
|
||||
> - 2026-01-18:智能文献检索(DeepSearch)MVP完成 - unifuncs API 集成
|
||||
> **文档目的:** 反映模块真实状态,帮助新开发人员快速上手
|
||||
|
||||
@@ -30,46 +31,96 @@
|
||||
AI智能文献模块是一个基于大语言模型(LLM)的文献筛选系统,用于帮助研究人员根据PICOS标准自动筛选文献。
|
||||
|
||||
### 当前状态
|
||||
- **开发阶段**:🚧 V1.x 完成,V2.0 Deep Research 开发计划已确认,即将启动
|
||||
- **开发阶段**:🎉 V2.0 Deep Research 核心功能开发完成
|
||||
- **已完成功能**:
|
||||
- ✅ 标题摘要初筛(Title & Abstract Screening)- 完整流程
|
||||
- ✅ 全文复筛后端(Day 2-5)- LLM服务 + API + Excel导出
|
||||
- ✅ **智能文献检索(DeepSearch)V1.x MVP** - unifuncs API 集成,SSE 实时流式
|
||||
- ✅ **智能文献检索(DeepSearch)V1.x MVP** - unifuncs API 集成
|
||||
- ✅ **Unifuncs API 网站覆盖测试** - 18 站点实测,9 个一级可用
|
||||
- ✅ **Deep Research V2.0 开发计划** - 完整技术方案、API 契约、5 天分阶段计划
|
||||
- **V2.0 开发中**:
|
||||
- 🚧 Deep Research V2.0 — 四步瀑布流(Landing→配置→HITL→终端→结果)
|
||||
- 🚧 异步模式改造(SSE → Unifuncs create_task/query_task)
|
||||
- 🚧 需求扩写 + HITL 确认 + Agent 终端 + 简洁结果展示 + Word 导出
|
||||
- **模型支持**:DeepSeek-V3(需求扩写) + unifuncs s2(深度搜索) + Qwen-Max(筛选)
|
||||
- ✅ **🎉 Deep Research V2.0 核心功能** — SSE 流式架构 + 瀑布流 UI + HITL + Word 导出
|
||||
- **V2.0 已完成**:
|
||||
- ✅ **SSE 流式架构**:从 create_task/query_task 轮询改为 OpenAI Compatible SSE 流,实时推送 AI 思考过程
|
||||
- ✅ **LLM 需求扩写**:DeepSeek-V3 将粗略输入扩写为结构化检索指令书(PICOS + MeSH)
|
||||
- ✅ **HITL 策略确认**:用户可编辑、保存、确认 AI 生成的检索指令
|
||||
- ✅ **瀑布流 UI**:Landing → 配置 → HITL → Agent 终端 → 结果,已完成步骤折叠为摘要卡片
|
||||
- ✅ **段落化思考日志**:reasoning_content 按段落聚合,连续思考合并为一段(非逐行碎片)
|
||||
- ✅ **Markdown 渲染**:react-markdown + remark-gfm 正确渲染报告中的标题、链接、列表
|
||||
- ✅ **引用链接可见化**:报告中 `[6]` 引用后显示完整 URL,方便复制分享
|
||||
- ✅ **Word 导出**:Pandoc 微服务,文献标题内嵌超链接,引用展开为可见 URL
|
||||
- ✅ **中文数据源支持**:CNKI/中华医学期刊网动态 prompt 增强 + 专项测试验证
|
||||
- ✅ **5 个精选数据源**:PubMed(默认)、ClinicalTrials.gov、Cochrane Library、CNKI、中华医学期刊网
|
||||
- **模型支持**:DeepSeek-V3(需求扩写) + unifuncs s2(深度搜索,SSE 流式) + Qwen-Max(筛选)
|
||||
- **部署状态**:✅ 本地开发环境运行正常
|
||||
|
||||
### 🆕 Deep Research V2.0(2026-02-22 开发计划确认)
|
||||
### 🎉 Deep Research V2.0(2026-02-23 核心功能完成)
|
||||
|
||||
**V2.0 核心升级:**
|
||||
- 四步瀑布流:Landing → 配置 → HITL 策略确认 → Agent 终端 → 结果展示
|
||||
- LLM 需求扩写(DeepSeek-V3):粗略输入 → 结构化自然语言检索指令书
|
||||
- Human-in-the-Loop:用户可编辑修改 AI 生成的检索需求
|
||||
- **异步模式**:SSE → Unifuncs create_task/query_task + pg-boss 队列(离开页面不中断)
|
||||
- Agent 终端:暗色主题 + 分类结构化日志(每 3-5s 弹出一条)
|
||||
- 简洁结果展示:AI 综合报告(Markdown)+ 文献清单表格 + Word 导出
|
||||
- **多站点搜索**:9 个一级可用站点,用户可在前端选择数据源
|
||||
**V2.0 架构与功能:**
|
||||
|
||||
**V2.0 确认可用数据源(2026-02-22 实测):**
|
||||
| 层级 | 组件 | 说明 | 状态 |
|
||||
|------|------|------|------|
|
||||
| 前端 | 瀑布流页面 | Landing→配置→HITL→终端→结果,折叠式摘要 | ✅ |
|
||||
| 前端 | react-markdown | 综合报告 Markdown 正确渲染 + 引用链接可见化 | ✅ |
|
||||
| 后端 | SSE 流式客户端 | OpenAI Compatible SSE,实时推送 reasoning_content | ✅ |
|
||||
| 后端 | 段落化解析器 | 连续思考行合并为段落,搜索/阅读/分析独立成条 | ✅ |
|
||||
| 后端 | 需求扩写服务 | DeepSeek-V3 PICOS+MeSH 结构化扩写 | ✅ |
|
||||
| 后端 | Word 导出 | Pandoc 微服务,内嵌超链接 + 引用展开 | ✅ |
|
||||
| 后端 | pg-boss 队列 | 异步任务,离开页面不中断 | ✅ |
|
||||
| 数据库 | 6 个新字段 | targetSources/confirmedRequirement/aiIntentSummary/executionLogs/synthesisReport/resultList | ✅ |
|
||||
|
||||
| 站点 | 站内链接数 | 说明 |
|
||||
|------|-----------|------|
|
||||
| PubMed | 28 | 核心数据源,效果最佳 |
|
||||
| ClinicalTrials.gov | 38 | 必须英文查询,max_depth≥10 |
|
||||
| NCBI/PMC | 18 | 含 PMC 全文链接 |
|
||||
| Google Scholar | 10 | 跨库聚合 |
|
||||
| CBM/SinoMed | 9 | 中文生物医学 |
|
||||
| CNKI | 7 | 中文核心期刊 |
|
||||
| GeenMedical | 5 | 医学搜索引擎 |
|
||||
| Cochrane Library | 4 | 系统综述金标准 |
|
||||
| 维普 | 1 | 中文库 |
|
||||
**V2.0 核心技术决策:**
|
||||
1. **SSE 流式替代轮询**:从 create_task/query_task 轮询改为 OpenAI Compatible SSE 流。解决了"等很久才一股脑显示思考过程"的体验问题,reasoning_content 实时推送、每 2 秒刷写 DB。
|
||||
2. **段落化思考日志**:reasoning_content 按 `\n\n` 段落拆分,同段落内连续思考行用空格合并为一条,搜索/阅读/分析动作独立。Worker 每 200+ 字符批量解析,写入前再合并连续同类条目。
|
||||
3. **引用链接可见化**:Web 端通过 react-markdown 自定义 `<a>` 组件在链接后追加灰色 URL;Word 端通过 `expandReferenceLinks()` 将 `[[N]](url)` 展开为 `[N](url) (url)`。
|
||||
4. **中文数据源策略**:纯中文源(CNKI/中华医学期刊网)单独搜索效果好;混合中英文源时 PubMed 主导,建议分批搜索。
|
||||
|
||||
**开发计划**:5 天分阶段交付,详见 `04-开发计划/07-Deep Research V2.0 开发计划.md`
|
||||
**V2.0 选定数据源(5 个精选):**
|
||||
|
||||
| 数据源 | 类型 | 默认选中 | 说明 |
|
||||
|--------|------|----------|------|
|
||||
| PubMed | 英文 | ✅ | 核心数据源,效果最佳 |
|
||||
| ClinicalTrials.gov | 英文 | - | 临床试验注册库 |
|
||||
| Cochrane Library | 英文 | - | 系统综述金标准 |
|
||||
| 中国知网 CNKI | 中文 | - | 中文核心期刊 |
|
||||
| 中华医学期刊网 | 中文 | - | 中华医学会官方期刊 |
|
||||
|
||||
**V2.0 API 端点:**
|
||||
```http
|
||||
POST /api/v1/asl/deep-research/generate-requirement # LLM 需求扩写
|
||||
POST /api/v1/asl/deep-research/tasks # 创建任务(pg-boss)
|
||||
GET /api/v1/asl/deep-research/tasks/:taskId # 查询任务状态+日志+结果
|
||||
GET /api/v1/asl/deep-research/tasks/:taskId/export-word # Word 导出
|
||||
```
|
||||
|
||||
**V2.0 关键文件:**
|
||||
```
|
||||
backend/src/modules/asl/
|
||||
├── services/unifuncsSseClient.ts # SSE 流式客户端(AsyncGenerator)
|
||||
├── services/requirementExpansionService.ts # LLM 需求扩写
|
||||
├── services/wordExportService.ts # Word 导出(Pandoc + 引用展开)
|
||||
├── workers/deepResearchV2Worker.ts # SSE Worker(段落解析 + 2s 刷写)
|
||||
├── utils/reasoningParser.ts # 段落化解析器 + mergeConsecutiveThinking
|
||||
├── utils/resultParser.ts # content 解析(报告 + JSON 文献列表)
|
||||
├── controllers/deepResearchController.ts # 4 个 API 端点
|
||||
├── config/dataSources.ts # 5 个精选数据源配置
|
||||
└── __tests__/
|
||||
├── deep-research-v2-e2e.ts # 端到端测试
|
||||
├── deep-research-v2-smoke.ts # 冒烟测试
|
||||
└── deep-research-chinese-sources.ts # 中文数据源专项测试
|
||||
|
||||
frontend-v2/src/modules/asl/
|
||||
├── pages/DeepResearchPage.tsx # 瀑布流主页面(phase 0-4)
|
||||
├── components/deep-research/
|
||||
│ ├── LandingView.tsx # 搜索入口
|
||||
│ ├── SetupPanel.tsx # 数据源+过滤器配置(可折叠)
|
||||
│ ├── StrategyConfirm.tsx # HITL 策略确认(可折叠)
|
||||
│ ├── AgentTerminal.tsx # 暗色终端日志展示
|
||||
│ └── ResultsView.tsx # Markdown 报告 + 文献表格 + Word 导出
|
||||
├── hooks/useDeepResearchTask.ts # React Query 轮询 Hook
|
||||
├── types/deepResearch.ts # TypeScript 类型
|
||||
└── api/index.ts # 4 个 V2.0 API 函数
|
||||
```
|
||||
|
||||
**开发计划**:详见 `04-开发计划/07-Deep Research V2.0 开发计划.md`
|
||||
|
||||
**通用能力指南**:`docs/02-通用能力层/04-DeepResearch引擎/01-Unifuncs DeepSearch API 使用指南.md`
|
||||
|
||||
@@ -1319,28 +1370,28 @@ Drawer打开: <50ms
|
||||
|
||||
## 🎯 下一步开发计划
|
||||
|
||||
### 当前Sprint(全文复筛MVP)
|
||||
1. 🚧 **全文复筛 Day 4**:批处理任务服务(进行中)
|
||||
2. ⏳ **全文复筛 Day 5**:前端UI开发(待开始)
|
||||
3. ⏳ **全文复筛 Day 6**:API集成与联调(待开始)
|
||||
### 当前(Deep Research V2.0 优化)
|
||||
1. ⏳ **端到端回归测试**:完整流程测试(创建→扩写→确认→执行→结果→导出)
|
||||
2. ⏳ **用户体验打磨**:加载动画、错误提示、边界情况处理
|
||||
3. ⏳ **中文检索优化**:中英文混合检索策略调优(建议分批搜索)
|
||||
4. ⏳ **导出格式完善**:Word 模板美化、更多导出格式
|
||||
|
||||
### 短期优化(标题摘要初筛)
|
||||
1. ⏳ Prompt优化(提升准确率到85%+)
|
||||
2. ⏳ 添加任务暂停/取消功能
|
||||
3. ⏳ 实现并发处理(3-5个并发)
|
||||
4. ⏳ 添加估计剩余时间显示
|
||||
### 短期优化
|
||||
1. ⏳ Prompt 优化(需求扩写质量提升)
|
||||
2. ⏳ 搜索历史管理(历史任务列表、重新搜索)
|
||||
3. ⏳ 全文复筛前端 UI 开发
|
||||
4. ⏳ 标题摘要初筛 Prompt 优化(准确率 60% → 85%+)
|
||||
|
||||
### 中期(Month 2)
|
||||
1. 🚧 全文复筛功能(开发中)
|
||||
2. ⏳ 全文数据提取功能
|
||||
3. ⏳ 用户自定义边界情况
|
||||
4. ⏳ WebSocket实时推送
|
||||
1. ⏳ 全文复筛功能完善
|
||||
2. ⏳ 证据图谱可视化
|
||||
3. ⏳ 用户自定义数据源
|
||||
4. ⏳ 生产环境部署
|
||||
|
||||
### 长期(Month 3+)
|
||||
1. ⏳ 多用户支持(真实认证)
|
||||
2. ⏳ 消息队列(Bull/RabbitMQ)
|
||||
3. ⏳ 分布式处理
|
||||
4. ⏳ 成本控制和监控
|
||||
1. ⏳ 多语言检索策略自动优化
|
||||
2. ⏳ 批量文献检索
|
||||
3. ⏳ 成本控制和监控
|
||||
|
||||
---
|
||||
|
||||
@@ -1350,15 +1401,14 @@ Drawer打开: <50ms
|
||||
|
||||
---
|
||||
|
||||
**最后更新**:2025-11-22(全文复筛 Day 2-3完成)
|
||||
**最后更新**:2026-02-23(Deep Research V2.0 核心功能完成)
|
||||
**文档状态**:✅ 反映真实状态
|
||||
**下次更新时机**:全文复筛MVP完成 或 标题摘要Prompt优化完成
|
||||
**下次更新时机**:V2.0 端到端回归测试完成 或 全文复筛前端开发启动
|
||||
|
||||
**本次更新内容**(v1.1):
|
||||
- ✅ 更新当前状态(新增全文复筛开发进度)
|
||||
- ✅ 更新关键里程碑(Day 2-3完成)
|
||||
- ✅ 新增后端代码结构(common层 + fulltext-screening层)
|
||||
- ✅ 新增开发记录链接(Day 2-3工作总结)
|
||||
- ✅ 更新下一步开发计划(当前Sprint)
|
||||
**本次更新内容**(v2.0):
|
||||
- ✅ 更新当前状态:V2.0 核心功能开发完成(SSE 流式 + 瀑布流 UI + Word 导出)
|
||||
- ✅ 新增 V2.0 完整架构表、技术决策、API 端点、关键文件列表
|
||||
- ✅ 新增 5 个精选数据源配置(替代 9 站全量展示)
|
||||
- ✅ 更新下一步开发计划(V2.0 优化 + 短期/中期/长期)
|
||||
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
# Deep Research V2.0 开发计划
|
||||
|
||||
> **文档版本:** v1.0
|
||||
> **文档版本:** v1.1
|
||||
> **创建日期:** 2026-02-22
|
||||
> **维护者:** 开发团队
|
||||
> **前置文档:** PRD V4.1 / 原型图 V4.2 / 技术设计 V4.1
|
||||
> **预计工期:** 5 天
|
||||
> **核心理念:** 单页瀑布流 + 自然语言需求扩写 + 异步执行 + 务实结果展示
|
||||
> **v1.1 更新:** 融入审查建议(Worker 重试、JSON 防崩溃、条件滚动、MeSH 扩展、Prompt 管理、数据源精简)
|
||||
|
||||
---
|
||||
|
||||
@@ -30,6 +31,9 @@
|
||||
| 异步下的实时性 | **Worker 5s 轮询 + 前端 3s 轮询** | 用户每 3-5s 看到一条新日志,对分钟级 Agent 任务来说体验自然,比逐字流更适合终端 UI |
|
||||
| 结果展示复杂度 | **报告 + 表格,不做图表看板** | 研究人员要的是内容本身(综合报告 + 文献清单),图表是锦上添花非刚需,MVP 不做 |
|
||||
| Word 导出 | **复用 Pandoc** | Protocol Agent 已验证 Pandoc → Word 方案,零额外依赖 |
|
||||
| 需求扩写 Prompt | **Prompt 管理服务(运营端可配)** | 使用 `ASL_DEEP_RESEARCH_EXPANSION`,运营管理端可在线调优,代码中写兜底 Fallback |
|
||||
| 数据源范围 | **精简为 5 个(3英文+2中文)** | 基于 18 站实测结果精选,PubMed 默认勾选,ClinicalTrials 标记需英文查询 |
|
||||
| 状态管理 | **React Query + useState** | 服务端状态用 useQuery 轮询(自带缓存+去重),页面步骤用 useState,不引入 Zustand |
|
||||
|
||||
---
|
||||
|
||||
@@ -135,9 +139,82 @@ npx prisma migrate dev --name add_deep_research_v2_fields
|
||||
|
||||
---
|
||||
|
||||
## 4. API 契约
|
||||
## 4. 需求扩写 Prompt 设计
|
||||
|
||||
### 4.1 需求扩写(同步)
|
||||
### 4.1 设计原则
|
||||
|
||||
需求扩写 Prompt 通过 **Prompt 管理服务**(`ASL_DEEP_RESEARCH_EXPANSION`)进行管理,运营端可在线调优,代码内置 Fallback 兜底。
|
||||
|
||||
| 原则 | 说明 |
|
||||
|------|------|
|
||||
| PICOS 结构化 | 引导 LLM 按 Population-Intervention-Comparison-Outcome-Study Design 拆解用户模糊需求 |
|
||||
| MeSH 同义词扩展 | 自动补充专业 MeSH 术语(如 "他汀" → "Statins, Hydroxymethylglutaryl-CoA Reductase Inhibitors") |
|
||||
| 默认高质量研究设计 | 若用户未指定,默认偏向 RCT、SR/MA、Cohort Study |
|
||||
| 自然语言对话风格 | 输出"像资深医学信息官写给检索助手的一封邮件",方便 HITL 编辑 |
|
||||
| 数据源感知 | Prompt 接收用户选择的数据源列表,ClinicalTrials.gov 时自动生成英文指令段 |
|
||||
| 不硬编码约束 | 不强制 Open Access / 不排除非英文文献 — 这些由用户在配置面板自主选择 |
|
||||
|
||||
### 4.2 Prompt 模板结构(Fallback)
|
||||
|
||||
```typescript
|
||||
// backend/src/common/prompt/prompt.fallbacks.ts(新增)
|
||||
const ASL_DEEP_RESEARCH_EXPANSION = `
|
||||
你是一位经验丰富的医学信息官(Medical Information Officer),
|
||||
擅长将研究者的模糊想法转化为精准的文献检索需求指令。
|
||||
|
||||
## 任务
|
||||
根据用户输入的粗略研究想法,生成一份结构化的深度文献检索指令书。
|
||||
|
||||
## 输出规则
|
||||
1. **自然语言风格**:像写邮件一样,口语化但专业,方便研究者阅读和编辑
|
||||
2. **PICOS 拆解**:明确 Population / Intervention / Comparison / Outcome / Study Design
|
||||
3. **MeSH 扩展**:为关键术语补充 MeSH 同义词(用括号标注英文 MeSH 术语)
|
||||
4. **研究设计偏好**:若用户未指定,默认优先 RCT、Systematic Review/Meta-Analysis、Cohort Study
|
||||
5. **数据源适配**:根据用户选择的数据源列表调整语言和策略
|
||||
- 若包含 ClinicalTrials.gov → 追加一段英文检索指令
|
||||
- 若包含中文数据源(CNKI/中华医学期刊网)→ 保留中文检索关键词
|
||||
6. **不得自行添加约束**:不要擅自限定"仅开放获取"或"仅英文文献"
|
||||
|
||||
## 用户输入
|
||||
- 研究想法:{{originalQuery}}
|
||||
- 选择的数据源:{{targetSources}}
|
||||
- 时间范围:{{yearRange}}
|
||||
- 目标数量:{{targetCount}}
|
||||
|
||||
## 输出格式
|
||||
请同时输出两部分:
|
||||
### Part 1: 自然语言检索指令书
|
||||
(可编辑的完整检索需求描述)
|
||||
|
||||
### Part 2: 结构化摘要(JSON)
|
||||
\`\`\`json
|
||||
{
|
||||
"objective": "...",
|
||||
"population": "...",
|
||||
"intervention": "...",
|
||||
"comparison": "...",
|
||||
"outcome": "...",
|
||||
"studyDesign": ["RCT", "Meta-analysis", ...],
|
||||
"meshTerms": ["term1", "term2", ...],
|
||||
"condition": "..."
|
||||
}
|
||||
\`\`\`
|
||||
`;
|
||||
```
|
||||
|
||||
### 4.3 Prompt 管理集成
|
||||
|
||||
| 层级 | 说明 |
|
||||
|------|------|
|
||||
| **运营端** | Prompt 管理界面 → `ASL_DEEP_RESEARCH_EXPANSION` → 可在线编辑、版本管理、A/B 测试 |
|
||||
| **代码 Fallback** | `prompt.fallbacks.ts` 写入默认模板,数据库无记录时自动使用 |
|
||||
| **调用方式** | `promptService.getPrompt('ASL_DEEP_RESEARCH_EXPANSION')` → 填充变量 → LLMFactory 调用 |
|
||||
|
||||
---
|
||||
|
||||
## 5. API 契约
|
||||
|
||||
### 5.1 需求扩写(同步)
|
||||
|
||||
**POST /api/v1/asl/research/generate-requirement**
|
||||
|
||||
@@ -145,7 +222,7 @@ npx prisma migrate dev --name add_deep_research_v2_fields
|
||||
// 请求
|
||||
{
|
||||
originalQuery: string, // "他汀预防心血管疾病,要能下载PDF的"
|
||||
targetSources: string[], // ["pubmed.ncbi.nlm.nih.gov", "bmjopen.bmj.com"]
|
||||
targetSources: string[], // 从精选数据源列表中选择(见 5.1.1)
|
||||
filters: {
|
||||
yearRange?: string, // "2010至今" | "过去5年" | "不限"
|
||||
targetCount?: string, // "~100篇" | "全面检索"
|
||||
@@ -159,22 +236,40 @@ npx prisma migrate dev --name add_deep_research_v2_fields
|
||||
data: {
|
||||
taskId: "uuid", // 已创建DB记录(status=draft)
|
||||
generatedRequirement: "请帮我执行一次深度的医学文献检索...", // LLM扩写结果
|
||||
intentSummary: { // 结构化摘要
|
||||
intentSummary: { // PICOS + MeSH 结构化摘要
|
||||
objective: "为Meta分析构建测试语料库",
|
||||
intervention: "他汀类药物 (Statins)",
|
||||
condition: "心血管疾病 (CVD)",
|
||||
literatureStandard: "高质量临床研究,PDF全文可下载"
|
||||
population: "心血管疾病高危患者",
|
||||
intervention: "他汀类药物 (Statins, HMG-CoA Reductase Inhibitors)",
|
||||
comparison: "安慰剂/未使用他汀",
|
||||
outcome: "主要不良心血管事件 (MACE) 发生率",
|
||||
studyDesign: ["RCT", "Meta-analysis", "Cohort"],
|
||||
meshTerms: ["Hydroxymethylglutaryl-CoA Reductase Inhibitors", "Cardiovascular Diseases", "Primary Prevention"],
|
||||
condition: "心血管疾病 (CVD)"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**实现要点:**
|
||||
- 调用 `LLMFactory.getAdapter('deepseek-v3')` 进行需求扩写
|
||||
- System Prompt 要求 LLM 输出结构化自然语言指令(非布尔检索式)
|
||||
- 同时创建 DB 记录,status = `draft`
|
||||
#### 5.1.1 精选数据源配置
|
||||
|
||||
### 4.2 启动执行(进入异步队列)
|
||||
基于 Unifuncs API 18 站实测结果精选的 5 个数据源:
|
||||
|
||||
| 类别 | 数据源 | domain_scope 值 | 默认 | 备注 |
|
||||
|------|--------|-----------------|------|------|
|
||||
| 🌍 英文 | **PubMed** | `https://pubmed.ncbi.nlm.nih.gov/` | ✅ 默认勾选 | 一级可用,核心数据源 |
|
||||
| 🌍 英文 | **ClinicalTrials.gov** | `https://clinicaltrials.gov/` | ☐ 可选 | ⚠️ 前端提示"需英文查询" |
|
||||
| 🌍 英文 | **Cochrane Library** | `https://www.cochranelibrary.com/` | ☐ 可选 | 一级可用,系统综述金标准 |
|
||||
| 🇨🇳 中文 | **中国知网 CNKI** | `https://www.cnki.net/` | ☐ 可选 | 二级可达,中文文献 |
|
||||
| 🇨🇳 中文 | **中华医学期刊网** | `https://medjournals.cn/` | ☐ 可选 | 二级可达,中文文献 |
|
||||
|
||||
**实现要点:**
|
||||
- 调用 `promptService.getPrompt('ASL_DEEP_RESEARCH_EXPANSION')` 获取 Prompt(无数据库记录时走 Fallback)
|
||||
- 填充变量 `{{originalQuery}}`、`{{targetSources}}`、`{{yearRange}}`、`{{targetCount}}`
|
||||
- 调用 `LLMFactory.getAdapter('deepseek-v3')` 执行扩写
|
||||
- 解析 LLM 输出:Part 1 → `generatedRequirement`,Part 2 JSON → `intentSummary`
|
||||
- 创建 DB 记录,status = `draft`
|
||||
|
||||
### 5.2 启动执行(进入异步队列)
|
||||
|
||||
**PUT /api/v1/asl/research/tasks/:id/execute**
|
||||
|
||||
@@ -193,7 +288,7 @@ npx prisma migrate dev --name add_deep_research_v2_fields
|
||||
- `jobQueue.push('asl_deep_research_v2', { taskId })` 推入 pg-boss
|
||||
- status 更新为 `pending`
|
||||
|
||||
### 4.3 任务状态与日志轮询
|
||||
### 5.3 任务状态与日志轮询
|
||||
|
||||
**GET /api/v1/asl/research/tasks/:id**
|
||||
|
||||
@@ -221,7 +316,7 @@ npx prisma migrate dev --name add_deep_research_v2_fields
|
||||
}
|
||||
```
|
||||
|
||||
### 4.4 Word 导出
|
||||
### 5.4 Word 导出
|
||||
|
||||
**GET /api/v1/asl/research/tasks/:id/export-word**
|
||||
|
||||
@@ -230,7 +325,7 @@ npx prisma migrate dev --name add_deep_research_v2_fields
|
||||
- 调用 Pandoc 转 Word
|
||||
- 返回 `.docx` 文件流
|
||||
|
||||
### 4.5 路由汇总
|
||||
### 5.5 路由汇总
|
||||
|
||||
| 方法 | 路径 | 说明 | 新增/改造 |
|
||||
|------|------|------|----------|
|
||||
@@ -243,9 +338,9 @@ npx prisma migrate dev --name add_deep_research_v2_fields
|
||||
|
||||
---
|
||||
|
||||
## 5. 后台 Worker 逻辑
|
||||
## 6. 后台 Worker 逻辑
|
||||
|
||||
### 5.1 核心流程(伪代码)
|
||||
### 6.1 核心流程(伪代码)
|
||||
|
||||
```typescript
|
||||
// backend/src/modules/asl/workers/deepResearchV2Worker.ts
|
||||
@@ -278,14 +373,33 @@ export async function processDeepResearchV2(job: Job) {
|
||||
data: { externalTaskId: unifuncsTaskId, status: 'running' }
|
||||
});
|
||||
|
||||
// 2. 轮询 Unifuncs 直到完成
|
||||
// 2. 轮询 Unifuncs 直到完成(含指数退避重试)
|
||||
let previousReasoning = '';
|
||||
const MAX_POLLS = 180; // 最多 15 分钟(180 × 5s)
|
||||
let consecutiveErrors = 0; // 连续错误计数
|
||||
const MAX_CONSECUTIVE_ERRORS = 5;
|
||||
|
||||
for (let i = 0; i < MAX_POLLS; i++) {
|
||||
await sleep(5000);
|
||||
|
||||
let data: any;
|
||||
try {
|
||||
const queryRes = await unifuncsClient.queryTask(unifuncsTaskId);
|
||||
const data = queryRes.data;
|
||||
data = queryRes.data;
|
||||
consecutiveErrors = 0; // 成功后重置
|
||||
} catch (err) {
|
||||
consecutiveErrors++;
|
||||
logger.warn(`Unifuncs query_task 第 ${consecutiveErrors} 次失败`, { taskId, error: err.message });
|
||||
|
||||
if (consecutiveErrors >= MAX_CONSECUTIVE_ERRORS) {
|
||||
throw new Error(`Unifuncs 连续 ${MAX_CONSECUTIVE_ERRORS} 次查询失败: ${err.message}`);
|
||||
}
|
||||
|
||||
// 指数退避:2s → 4s → 8s → 16s → 32s
|
||||
const backoffMs = Math.min(2000 * Math.pow(2, consecutiveErrors - 1), 32000);
|
||||
await sleep(backoffMs);
|
||||
continue;
|
||||
}
|
||||
|
||||
// 解析增量日志
|
||||
const currentReasoning = data.result?.reasoning_content || '';
|
||||
@@ -306,7 +420,7 @@ export async function processDeepResearchV2(job: Job) {
|
||||
const content = data.result?.content || '';
|
||||
const report = extractSection(content, 'REPORT_SECTION');
|
||||
const jsonList = extractSection(content, 'JSON_LIST_SECTION');
|
||||
const parsedList = safeParseJsonList(jsonList);
|
||||
const parsedList = safeParseJsonList(jsonList); // 防崩溃 JSON 解析,见 6.4
|
||||
|
||||
await prisma.aslResearchTask.update({
|
||||
where: { id: taskId },
|
||||
@@ -336,7 +450,15 @@ export async function processDeepResearchV2(job: Job) {
|
||||
}
|
||||
```
|
||||
|
||||
### 5.2 日志解析逻辑
|
||||
**v1.1 新增 — 轮询韧性设计:**
|
||||
|
||||
| 机制 | 策略 | 说明 |
|
||||
|------|------|------|
|
||||
| 瞬态失败重试 | 指数退避 2s → 4s → 8s → 16s → 32s | 网络抖动、Unifuncs 临时不可用时自动恢复 |
|
||||
| 连续失败阈值 | `MAX_CONSECUTIVE_ERRORS = 5` | 连续 5 次查询全失败才标记任务 failed |
|
||||
| 成功后重置 | `consecutiveErrors = 0` | 中间穿插成功不累计 |
|
||||
|
||||
### 6.2 日志解析逻辑
|
||||
|
||||
```typescript
|
||||
function parseReasoningToLogs(increment: string): LogEntry[] {
|
||||
@@ -360,7 +482,7 @@ function parseReasoningToLogs(increment: string): LogEntry[] {
|
||||
}
|
||||
```
|
||||
|
||||
### 5.3 output_prompt 设计
|
||||
### 6.3 output_prompt 设计
|
||||
|
||||
```typescript
|
||||
function buildOutputPrompt(): string {
|
||||
@@ -381,11 +503,55 @@ function buildOutputPrompt(): string {
|
||||
}
|
||||
```
|
||||
|
||||
### 6.4 JSON 解析防崩溃(v1.1 新增)
|
||||
|
||||
LLM 输出的 JSON 常携带 ` ```json ` 代码围栏或尾部逗号,直接 `JSON.parse` 会崩溃。
|
||||
|
||||
```typescript
|
||||
function safeParseJsonList(raw: string | null): any[] | null {
|
||||
if (!raw) return null;
|
||||
|
||||
// Step 1: 去除 Markdown 代码围栏
|
||||
let cleaned = raw.replace(/```json\s*/gi, '').replace(/```\s*/g, '');
|
||||
|
||||
// Step 2: 去除尾部逗号(数组/对象末尾的 ,] 或 ,})
|
||||
cleaned = cleaned.replace(/,\s*([}\]])/g, '$1');
|
||||
|
||||
// Step 3: 尝试解析
|
||||
try {
|
||||
const parsed = JSON.parse(cleaned);
|
||||
return Array.isArray(parsed) ? parsed : [parsed];
|
||||
} catch (e) {
|
||||
logger.warn('JSON 解析失败,尝试正则提取', { error: e.message });
|
||||
// Step 4: 降级 — 尝试逐行提取 JSON 对象
|
||||
const objects: any[] = [];
|
||||
const regex = /\{[^{}]*\}/g;
|
||||
let match;
|
||||
while ((match = regex.exec(cleaned)) !== null) {
|
||||
try {
|
||||
objects.push(JSON.parse(match[0]));
|
||||
} catch { /* 跳过无法解析的单条 */ }
|
||||
}
|
||||
return objects.length > 0 ? objects : null;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**防崩溃策略总结:**
|
||||
|
||||
| 层级 | 处理 | 覆盖场景 |
|
||||
|------|------|---------|
|
||||
| L1 | 去除 ` ```json ` 围栏 | LLM 习惯性包裹代码块 |
|
||||
| L2 | 去除尾部逗号 | `[{...}, {...},]` → `[{...}, {...}]` |
|
||||
| L3 | 标准 JSON.parse | 正常路径 |
|
||||
| L4 | 正则逐条提取 | JSON 结构被破坏但单条仍有效 |
|
||||
| L5 | 返回 null | 彻底无法解析,前端降级展示报告 |
|
||||
|
||||
---
|
||||
|
||||
## 6. 前端组件设计
|
||||
## 7. 前端组件设计
|
||||
|
||||
### 6.1 页面结构
|
||||
### 7.1 页面结构
|
||||
|
||||
```
|
||||
frontend-v2/src/modules/asl/pages/
|
||||
@@ -400,10 +566,18 @@ frontend-v2/src/modules/asl/components/
|
||||
│ └── ResultsView.tsx # Step 4: 结果展示(报告+表格)
|
||||
```
|
||||
|
||||
### 6.2 状态管理
|
||||
### 7.2 状态管理
|
||||
|
||||
**设计决策:** React Query + useState,不引入 Zustand(v1.1 确认)
|
||||
|
||||
| 状态类型 | 工具 | 理由 |
|
||||
|---------|------|------|
|
||||
| 服务端数据(任务状态、日志、结果) | `@tanstack/react-query` | 自带缓存、去重、条件轮询(refetchInterval),完美匹配轮询场景 |
|
||||
| 页面步骤流转 | `useState` | 仅 5 个步骤的 FSM,组件树内部流转,无需全局状态 |
|
||||
| 组件间共享(如 taskId) | `props drilling` / `React.memo` | 组件层级仅 2-3 层,prop 传递足够,不需要 Context/Zustand |
|
||||
|
||||
```typescript
|
||||
// 页面级状态(useState 即可,无需 Zustand)
|
||||
// 页面级状态(useState 即可,不引入 Zustand)
|
||||
interface DeepResearchState {
|
||||
currentStep: 'landing' | 'setup' | 'strategy' | 'terminal' | 'results';
|
||||
taskId: string | null;
|
||||
@@ -412,29 +586,55 @@ interface DeepResearchState {
|
||||
intentSummary: IntentSummary | null;
|
||||
isGenerating: boolean; // 需求扩写中
|
||||
}
|
||||
|
||||
// PICOS + MeSH 结构化摘要(v1.1 新增)
|
||||
interface IntentSummary {
|
||||
objective: string;
|
||||
population: string;
|
||||
intervention: string;
|
||||
comparison: string;
|
||||
outcome: string;
|
||||
studyDesign: string[];
|
||||
meshTerms: string[];
|
||||
condition: string;
|
||||
}
|
||||
```
|
||||
|
||||
### 6.3 各组件核心逻辑
|
||||
### 7.3 各组件核心逻辑
|
||||
|
||||
**LandingView(Landing 大搜索框)**
|
||||
- 居中大输入框 + "开始研究"按钮 + 推荐预置词
|
||||
- 点击后携带输入值,平滑过渡到 SetupPanel
|
||||
- 参考原型图 V4.2 的 `#landing-view` 部分
|
||||
|
||||
**SetupPanel(Step 1: 配置)**
|
||||
**SetupPanel(Step 1: 配置)** *(v1.1 数据源更新)*
|
||||
- 继承 Landing 输入值到 textarea
|
||||
- 数据源 Checkbox(PubMed/PMC, BMJ Open, Cochrane)
|
||||
- 高级过滤(年份下拉、目标数量、OA 强制)
|
||||
- 数据源 Checkbox(基于实测精选 5 个,见 5.1.1):
|
||||
- 🌍 **英文数据源**
|
||||
- [x] PubMed(默认勾选,不可取消)
|
||||
- [ ] ClinicalTrials.gov — ⚠️ 旁标橙色提示:"该站点需要英文查询,系统将自动为此数据源生成英文检索指令"
|
||||
- [ ] Cochrane Library
|
||||
- 🇨🇳 **中文数据源**
|
||||
- [ ] 中国知网 CNKI
|
||||
- [ ] 中华医学期刊网
|
||||
- 高级过滤(年份下拉、目标数量、OA 偏好 — 注意是偏好非强制)
|
||||
- 点击"解析并生成检索需求书" → POST /generate-requirement
|
||||
- Loading 后平滑展开 Step 2
|
||||
|
||||
**StrategyConfirm(Step 2: HITL 确认)**
|
||||
- 左侧 1/3:AI 意图提炼卡片(只读,来自 `intentSummary`)
|
||||
- 右侧 2/3:可编辑 textarea(内容为 `generatedRequirement`)
|
||||
- 提示文案:"您可以像写邮件一样在这里补充任何大白话要求"
|
||||
**StrategyConfirm(Step 2: HITL 确认)** *(v1.1 PICOS + MeSH)*
|
||||
- 左侧 1/3:AI 意图提炼卡片(只读,PICOS 结构化展示)
|
||||
- 🎯 研究目标:`objective`
|
||||
- 👥 研究人群:`population`
|
||||
- 💊 干预措施:`intervention`(含 MeSH 英文术语)
|
||||
- ⚖️ 对照组:`comparison`
|
||||
- 📊 结局指标:`outcome`
|
||||
- 📋 研究设计:`studyDesign` Tag 列表
|
||||
- 🏷️ MeSH 术语:`meshTerms` 小标签展示
|
||||
- 右侧 2/3:可编辑 textarea(内容为 `generatedRequirement`,自然语言对话风格)
|
||||
- 提示文案:"这是 AI 以医学信息官的视角为您扩写的检索需求,您可以像写邮件一样直接编辑修改"
|
||||
- 点击"确认需求,启动 Deep Research" → PUT /execute
|
||||
|
||||
**AgentTerminal(Step 3: 暗黑终端)**
|
||||
**AgentTerminal(Step 3: 暗黑终端)** *(v1.1 条件滚动)*
|
||||
- 暗色背景(bg-slate-900),固定高度 550px,内部滚动
|
||||
- 顶部状态栏:红/黄/绿圆点 + "Running" 脉冲指示灯
|
||||
- 日志渲染:
|
||||
@@ -443,7 +643,10 @@ interface DeepResearchState {
|
||||
- `done` → 绿色 + ✅ 图标
|
||||
- `summary` → 黄色 + 📋 图标
|
||||
- 轮询逻辑:`useQuery` + refetchInterval: 3000(running 时启用)
|
||||
- 新日志出现时 auto-scroll 到底部
|
||||
- **条件自动滚动**(v1.1 新增):
|
||||
- 用户**未手动上滚**时 → 新日志自动滚到底部
|
||||
- 用户**已手动上滚**查看历史 → 停止自动滚动,避免打断阅读
|
||||
- 实现:`onScroll` 检测 `scrollTop + clientHeight < scrollHeight - threshold` 设 `userScrolled` flag
|
||||
- 完成后状态灯变灰 "Finished",终端可折叠
|
||||
|
||||
**ResultsView(Step 4: 结果)**
|
||||
@@ -454,8 +657,9 @@ interface DeepResearchState {
|
||||
- 列:标题(可点击跳转 PubMed)、期刊、年份、类型 Tag、PDF 状态
|
||||
- 支持简单搜索过滤
|
||||
- 分页(前端分页即可,数据量 ~100 条)
|
||||
- **降级展示**:若 `resultList` 为 null(JSON 解析失败),隐藏表格,仅展示综合报告
|
||||
|
||||
### 6.4 轮询 Hook
|
||||
### 7.4 轮询 Hook
|
||||
|
||||
```typescript
|
||||
// hooks/useDeepResearchTask.ts
|
||||
@@ -474,94 +678,105 @@ function useDeepResearchTask(taskId: string | null) {
|
||||
|
||||
---
|
||||
|
||||
## 7. 复用清单(不重复造轮子)
|
||||
## 8. 复用清单(不重复造轮子)
|
||||
|
||||
| 能力 | 来源 | 用法 |
|
||||
|------|------|------|
|
||||
| LLM 调用 | `common/llm/LLMFactory` | DeepSeek-V3 需求扩写 |
|
||||
| **Prompt 管理** | `common/prompt/promptService` | `ASL_DEEP_RESEARCH_EXPANSION` Prompt 获取(运营端可配 + 代码 Fallback) |
|
||||
| pg-boss 队列 | `common/jobs/jobQueue` | Worker 注册与任务推送 |
|
||||
| 日志服务 | `common/logging/logger` | 全程结构化日志 |
|
||||
| 认证中间件 | `common/auth/authenticate` | 所有 API 路由 |
|
||||
| Prisma 全局实例 | `config/database` | 数据库操作 |
|
||||
| Word 导出 | Pandoc(Python 微服务) | 复用 Protocol Agent 验证的方案 |
|
||||
| DeepResearch 引擎 | `common/deepresearch/` | Unifuncs API 封装(create_task / query_task) |
|
||||
| 前端 API Client | `common/api/axios` | 带认证的请求 |
|
||||
| 前端布局 | `ASLLayout.tsx` | 左侧导航 |
|
||||
|
||||
---
|
||||
|
||||
## 8. 分阶段开发计划
|
||||
## 9. 分阶段开发计划
|
||||
|
||||
### Phase 1: 数据库 + 需求扩写(Day 1)
|
||||
### Phase 1: 数据库 + Prompt 管理 + 需求扩写(Day 1)
|
||||
|
||||
**目标:** 用户输入粗略想法 → AI 扩写为结构化指令书 → 用户可编辑修改
|
||||
**目标:** 用户输入粗略想法 → AI 按 PICOS 框架扩写为结构化指令书 → 用户可编辑修改
|
||||
|
||||
| 任务 | 文件 | 说明 |
|
||||
|------|------|------|
|
||||
| Schema 迁移 | `prisma/schema.prisma` | 新增 6 个字段,`prisma migrate dev` |
|
||||
| 需求扩写 Prompt | `services/requirementExpansionService.ts` | 新建服务,调用 DeepSeek-V3 扩写 |
|
||||
| **Prompt Fallback** | `common/prompt/prompt.fallbacks.ts` | 新增 `ASL_DEEP_RESEARCH_EXPANSION` 兜底模板(PICOS + MeSH) |
|
||||
| 需求扩写服务 | `services/requirementExpansionService.ts` | 新建,调用 promptService → LLMFactory → 解析 Part 1/Part 2 输出 |
|
||||
| 扩写 API | `controllers/researchController.ts` | 新增 `POST /generate-requirement` |
|
||||
| 启动 API | `controllers/researchController.ts` | 新增 `PUT /tasks/:id/execute` |
|
||||
| 状态 API 改造 | `controllers/researchController.ts` | 改造 `GET /tasks/:id`,返回新字段 |
|
||||
| 状态 API 改造 | `controllers/researchController.ts` | 改造 `GET /tasks/:id`,返回新字段(含 PICOS intentSummary) |
|
||||
| **数据源配置** | `config/dataSources.ts` | 新建,定义 5 个精选数据源常量(domain、label、默认状态、备注) |
|
||||
| 路由注册 | `routes/index.ts` | 注册新端点 |
|
||||
|
||||
**验收标准:**
|
||||
- [ ] `POST /generate-requirement` 返回扩写后的指令书
|
||||
- [ ] `POST /generate-requirement` 返回 PICOS 结构化摘要 + 自然语言指令书
|
||||
- [ ] Prompt 从数据库加载,无记录时自动走 Fallback
|
||||
- [ ] `PUT /tasks/:id/execute` 成功推入 pg-boss 队列
|
||||
- [ ] `GET /tasks/:id` 返回含新字段的完整数据
|
||||
- [ ] 选择 ClinicalTrials.gov 时,扩写结果包含英文检索指令段
|
||||
|
||||
### Phase 2: Worker 改造 — Unifuncs 异步模式(Day 2)
|
||||
|
||||
**目标:** Worker 使用 create_task + query_task 轮询,增量日志写入 DB
|
||||
**目标:** Worker 使用 create_task + query_task 轮询(含指数退避),增量日志写入 DB
|
||||
|
||||
| 任务 | 文件 | 说明 |
|
||||
|------|------|------|
|
||||
| Unifuncs 异步客户端 | `services/unifuncsAsyncClient.ts` | 新建,封装 create_task / query_task |
|
||||
| V2 Worker | `workers/deepResearchV2Worker.ts` | 新建,轮询 + 日志解析 + 结果切割 |
|
||||
| V2 Worker | `workers/deepResearchV2Worker.ts` | 新建,轮询 + 指数退避重试 + 日志解析 + 结果切割 |
|
||||
| 日志解析器 | `utils/reasoningParser.ts` | 新建,reasoning_content → 结构化日志 |
|
||||
| 结果解析器 | `utils/resultParser.ts` | 新建,XML 标签切割报告与 JSON 列表 |
|
||||
| **结果解析器** | `utils/resultParser.ts` | 新建,XML 标签切割 + `safeParseJsonList` 防崩溃解析 |
|
||||
| Worker 注册 | `workers/researchWorker.ts` | 注册新 Worker `asl_deep_research_v2` |
|
||||
|
||||
**验收标准:**
|
||||
- [ ] Worker 成功调用 unifuncs create_task
|
||||
- [ ] 轮询期间 execution_logs 持续增量更新
|
||||
- [ ] 完成后 synthesis_report 和 result_list 正确入库
|
||||
- [ ] **韧性测试**:模拟单次 query_task 失败 → 指数退避后自动恢复
|
||||
- [ ] **JSON 防崩溃**:LLM 输出带 ` ```json ` 围栏 → safeParseJsonList 正确解析
|
||||
- [ ] 超时保护(15 分钟)和错误处理正常
|
||||
|
||||
### Phase 3: 前端 — Landing + 配置 + HITL 确认(Day 3)
|
||||
|
||||
**目标:** 完成 Step 1-2 的前端交互,瀑布流渐进展开
|
||||
**目标:** 完成 Step 1-2 的前端交互,瀑布流渐进展开,PICOS 结构化展示
|
||||
|
||||
| 任务 | 文件 | 说明 |
|
||||
|------|------|------|
|
||||
| 主页面骨架 | `pages/DeepResearchPage.tsx` | 新建,管理瀑布流状态 |
|
||||
| 主页面骨架 | `pages/DeepResearchPage.tsx` | 新建,useState 管理瀑布流步骤 |
|
||||
| Landing 组件 | `components/deep-research/LandingView.tsx` | 大搜索框 + 推荐预置词 |
|
||||
| 配置面板 | `components/deep-research/SetupPanel.tsx` | 数据源 + 高级过滤 + 生成按钮 |
|
||||
| HITL 确认 | `components/deep-research/StrategyConfirm.tsx` | 左右分栏 + 可编辑 textarea |
|
||||
| **配置面板** | `components/deep-research/SetupPanel.tsx` | **精选 5 数据源 Checkbox**(含 ClinicalTrials.gov 英文提示)+ 高级过滤 |
|
||||
| **HITL 确认** | `components/deep-research/StrategyConfirm.tsx` | 左侧 **PICOS + MeSH 卡片** + 右侧可编辑 textarea |
|
||||
| API 函数 | `api/index.ts` | 新增 generateRequirement / executeTask |
|
||||
| 路由注册 | `pages/index.tsx` | 新增 V2 路由 |
|
||||
|
||||
**验收标准:**
|
||||
- [ ] Landing 输入 → Step 1 配置面板流畅过渡
|
||||
- [ ] 数据源显示 5 个选项,PubMed 默认勾选,ClinicalTrials.gov 标注英文提示
|
||||
- [ ] 点击"生成需求书" → Loading → Step 2 展开
|
||||
- [ ] Step 2 左侧摘要卡片正确展示,右侧 textarea 可编辑
|
||||
- [ ] Step 2 左侧 PICOS 结构化摘要 + MeSH 术语标签正确展示
|
||||
- [ ] Step 2 右侧 textarea 显示自然语言对话风格的检索指令书,可编辑
|
||||
- [ ] 点击"启动 Deep Research" → 进入 Step 3
|
||||
|
||||
### Phase 4: 前端 — 终端 + 结果展示(Day 4)
|
||||
|
||||
**目标:** 完成 Step 3-4,终端实时日志 + 结果报告/表格
|
||||
**目标:** 完成 Step 3-4,终端实时日志(条件滚动)+ 结果报告/表格(含降级)
|
||||
|
||||
| 任务 | 文件 | 说明 |
|
||||
|------|------|------|
|
||||
| 暗黑终端 | `components/deep-research/AgentTerminal.tsx` | 日志渲染 + auto-scroll + 状态灯 |
|
||||
| 结果视图 | `components/deep-research/ResultsView.tsx` | 横幅 + 报告 + 文献表格 |
|
||||
| 轮询 Hook | `hooks/useDeepResearchTask.ts` | 3s 轮询,running 时启用 |
|
||||
| **暗黑终端** | `components/deep-research/AgentTerminal.tsx` | 日志渲染 + **条件 auto-scroll** + 状态灯 |
|
||||
| 结果视图 | `components/deep-research/ResultsView.tsx` | 横幅 + 报告 + 文献表格 + **降级展示** |
|
||||
| 轮询 Hook | `hooks/useDeepResearchTask.ts` | React Query 3s 轮询,running 时启用 |
|
||||
| 终端样式 | CSS / Tailwind | 暗色主题 + 日志类型着色 |
|
||||
|
||||
**验收标准:**
|
||||
- [ ] 终端日志按类型着色,新日志 auto-scroll
|
||||
- [ ] 终端日志按类型着色,未手动滚动时 auto-scroll,手动上滚时暂停
|
||||
- [ ] 完成后终端折叠,结果区展开
|
||||
- [ ] 综合报告 Markdown 渲染正确
|
||||
- [ ] 文献清单表格展示(标题可点击跳转 PubMed)
|
||||
- [ ] **降级验证**:resultList 为 null 时,隐藏表格仅展示报告
|
||||
- [ ] 全流程端到端联调通过
|
||||
|
||||
### Phase 5: Word 导出 + 收尾(Day 5)
|
||||
@@ -585,7 +800,7 @@ function useDeepResearchTask(taskId: string | null) {
|
||||
|
||||
---
|
||||
|
||||
## 9. 验收标准总览
|
||||
## 10. 验收标准总览
|
||||
|
||||
### 功能验收
|
||||
|
||||
@@ -608,17 +823,36 @@ function useDeepResearchTask(taskId: string | null) {
|
||||
|
||||
---
|
||||
|
||||
## 10. 风险与应对
|
||||
## 11. 风险与应对
|
||||
|
||||
| 风险 | 概率 | 影响 | 应对措施 |
|
||||
|------|------|------|---------|
|
||||
| Unifuncs 异步模式下 reasoning_content 不增量更新 | 低 | 终端日志为空 | 降级方案:只显示 progress.message |
|
||||
| output_prompt XML 标签分割不可靠 | 中 | 报告和列表无法分离 | 降级方案:整体作为报告展示,文献从 PubMed 链接提取 |
|
||||
| **LLM 输出 JSON 格式不规范** | 中 | 文献列表解析失败 | `safeParseJsonList` 四层防崩溃(围栏清理 → 尾逗号 → 标准解析 → 正则逐条),见 6.4 |
|
||||
| **Unifuncs query_task 瞬态失败** | 中 | 轮询中断 | 指数退避重试(2s→32s),连续 5 次失败才标记 failed,见 6.1 |
|
||||
| Unifuncs 长任务超时 | 低 | 任务失败 | MAX_POLLS=180(15分钟),超时标记 failed,用户可重试 |
|
||||
| **ClinicalTrials.gov 中文查询失败** | 高 | 临床试验检索无结果 | Prompt 自动为该数据源生成英文检索指令段,前端标注提示 |
|
||||
| Pandoc Word 导出在 SAE 不可用 | 低 | 导出失败 | 降级方案:导出为 Markdown 文件 |
|
||||
| **Prompt 管理服务不可用** | 低 | 需求扩写失败 | 代码内置 Fallback 模板,数据库无记录时自动使用 |
|
||||
|
||||
---
|
||||
|
||||
## 附录:v1.1 更新变更记录
|
||||
|
||||
| 变更项 | 章节 | 说明 |
|
||||
|--------|------|------|
|
||||
| Prompt 管理集成 | §4(新增) | 需求扩写 Prompt 通过 Prompt 管理服务配置,含 PICOS + MeSH 扩展 |
|
||||
| 精选数据源 | §5.1.1(新增) | 基于 18 站实测精选 5 个数据源(3英文+2中文) |
|
||||
| 指数退避重试 | §6.1(更新) | Worker 轮询增加瞬态失败指数退避(2s→32s) |
|
||||
| JSON 防崩溃 | §6.4(新增) | safeParseJsonList 四层解析策略 |
|
||||
| PICOS 摘要 | §7.2, §7.3(更新) | IntentSummary 扩展为 PICOS + MeSH 结构 |
|
||||
| 条件自动滚动 | §7.3(更新) | AgentTerminal 手动上滚时暂停 auto-scroll |
|
||||
| 状态管理确认 | §1.2, §7.2(更新) | 确认 React Query + useState,不引入 Zustand |
|
||||
| 降级展示 | §7.3(更新) | ResultsView 在 resultList=null 时仅展示报告 |
|
||||
|
||||
---
|
||||
|
||||
**文档维护者:** 开发团队
|
||||
**最后更新:** 2026-02-22
|
||||
**文档状态:** ✅ 方案确认,待开发启动
|
||||
**文档状态:** ✅ v1.1 方案确认(含审查建议),待开发启动
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
# **Deep Research V2.0 开发计划审查与优化建议书**
|
||||
|
||||
**审查对象:** 07-Deep Research V2.0 开发计划.md
|
||||
|
||||
**审查基准:** V4.1 PRD、Unifuncs API 官方文档、系统高可用性架构标准
|
||||
|
||||
**审查结论:** 整体架构设计(pg-boss \+ 异步轮询 \+ 单页瀑布流)非常优秀。但在**容错机制、数据解析提取、前端状态管理、以及终端 UX 细节**上存在明显隐患,需进行针对性修正。
|
||||
|
||||
## **🔴 一、 核心高危风险与修正建议 (Critical Risks & Fixes)**
|
||||
|
||||
### **1\. Unifuncs 轮询逻辑的“脆弱性” (Worker 改造部分)**
|
||||
|
||||
* **原计划缺陷:** 在 deepResearchV2Worker.ts 的伪代码中,轮询逻辑简单地使用了 await unifuncsClient.queryTask()。如果在长达 15 分钟的轮询中,发生了一次短暂的网络抖动或 Unifuncs 网关返回了 502/504,整个 try-catch 就会抛出异常,导致这个耗费了大量时间的长任务直接 failed。
|
||||
* **修正方案:**
|
||||
在 Worker 的 while 循环内部,**必须包裹 try-catch 并引入重试机制(Exponential Backoff)**。遇到网络请求失败时,不能直接退出,而是应该记录 warning 并 continue 等待下一次 5s 后的轮询,连续失败超过 5 次才判定任务崩溃。
|
||||
|
||||
### **2\. 输出解析的“幻觉陷阱” (JSON Parsing)**
|
||||
|
||||
* **原计划缺陷:** 计划中提到使用 extractSection 提取 \<JSON\_LIST\_SECTION\> 后,直接调用 safeParseJsonList。但在实际的大模型输出中,即便你规定了 XML 标签,模型极大概率会在标签内输出 Markdown 代码块,例如:
|
||||
\<JSON\_LIST\_SECTION\>
|
||||
\`\`\`json
|
||||
\[{"title": "..."}\]
|
||||
|
||||
\</JSON\_LIST\_SECTION\>
|
||||
如果直接 \`JSON.parse()\` 必定报错崩溃。
|
||||
|
||||
* **修正方案:**
|
||||
utils/resultParser.ts 中的 safeParseJsonList 方法**必须**包含预处理逻辑:
|
||||
1. 使用正则清洗掉可能存在的 json\` 和 首尾包裹符。
|
||||
2. 增加**兜底方案 (Fallback)**:如果正则和清洗均宣告失败,调用一次系统自带的 DeepSeek-V3,传入提取出的坏字符串,要求其强制吐出标准 JSON Array(利用 response\_format: { type: "json\_object" }),确保 Step 4 的表格必定能渲染。
|
||||
|
||||
## **🟡 二、 前端架构与交互体验优化 (Frontend UX/Architecture)**
|
||||
|
||||
### **1\. 终端日志自动滚动的“反人类”交互 (AgentTerminal)**
|
||||
|
||||
* **原计划缺陷:** 计划中提到“新日志出现时 auto-scroll 到底部”。如果用户在长达 5 分钟的执行过程中,向上滚动滚轮想查看之前的思考日志,此时新日志一推过来,页面又被强制拉回到底部,这种体验非常“反人类”。
|
||||
* **修正方案:**
|
||||
修改 Auto-scroll 触发逻辑:前端通过 ref 判断当前滚动条位置,**只有当用户处于滚动条最底部(或距离底部 \< 50px)时,收到新日志才自动滚动到底部**。如果用户已经向上滚动阅读历史,则只在容器内部静默追加日志,不改变视口位置(可浮现一个“↓ 有新日志”的提示小红点)。
|
||||
|
||||
### **2\. 前端状态管理的“螺旋地狱” (State Management)**
|
||||
|
||||
* **原计划缺陷:** 计划在 6.2 节中明确写道:“页面级状态(useState 即可,无需 Zustand)”。
|
||||
这是一个有 4 个复杂步骤(Landing \-\> Setup \-\> HITL \-\> Terminal \-\> Results)的单页应用。如果将 taskId、query、logs、step 全塞在父组件的 useState 里,会导致极其严重的**属性透传(Prop Drilling)和父组件频繁全量重渲染**。
|
||||
* **修正方案:**
|
||||
坚决反对在此场景下仅使用 useState。建议使用现有的 @tanstack/react-query 缓存状态,或者引入极其轻量的 Context API / Zustand。特别是终端日志(Logs)频繁追加时,不能让整个大页面跟着一起重绘,终端组件应当是局部渲染的。
|
||||
|
||||
## **🟢 三、 业务细节与数据闭环补充 (Business Details)**
|
||||
|
||||
### **1\. Word 导出功能的缺失逻辑 (Word Export)**
|
||||
|
||||
* **原计划缺陷:** 提到“报告 \+ 文献表格 → 完整 Markdown → Pandoc 转 Word”。但 result\_list 存的是 JSON Array,Pandoc 不认识 JSON。
|
||||
* **修正方案:**
|
||||
在 services/wordExportService.ts 中,需要明确增加一步\*\*“JSON to Markdown Table”\*\*的转化方法。
|
||||
即把 JSON 数组转化为:
|
||||
| 文献标题 | 期刊 | 发表年份 | 链接 |
|
||||
|---|---|---|---|
|
||||
| Efficacy of... | Lancet | 2023 | \[PubMed\](...) |
|
||||
|
||||
再将这个生成的 Markdown Table 拼接到 synthesis\_report 的末尾,最后统一送给 Pandoc 渲染。
|
||||
|
||||
### **2\. 第一步“需求扩写”的成本追踪漏洞**
|
||||
|
||||
* **原计划缺陷:**
|
||||
数据库 Schema 只增加了 Unifuncs 任务的 tokenUsage 记录。但 Step 1 中,调用本地 DeepSeek-V3 把一句话扩写成几百字的“自然语言指令书”,也是要消耗 Token 的。
|
||||
* **修正方案:**
|
||||
在 AslResearchTask 模型中,应将 tokenUsage 分离或扩展为能够记录两次消耗的结构。例如:internal\_token\_usage (记录扩写的成本) 和 external\_token\_usage (记录 Unifuncs 成本),以实现精细化的财务核算。
|
||||
|
||||
## **📝 最终评审结论**
|
||||
|
||||
请开发团队长仔细阅读上述 6 条修正建议,并同步更新到研发 Task 列表中。
|
||||
|
||||
特别是在\*\*【JSON解析防崩溃】**和**【终端条件自动滚动】\*\*这两点上,直接决定了本次 V2.0 交付的“工业级质感”。只要把控好这几个细节,原有的技术路线完全可以跑通,且效果会非常惊艳!
|
||||
@@ -0,0 +1,114 @@
|
||||
# **Deep Research 需求扩写 Prompt 设计与检索维度指南**
|
||||
|
||||
**设计者:** 医学信息官 (MIO)
|
||||
|
||||
**应用场景:** ASL Deep Research V2.0 (MVP) \- Step 1 需求转化阶段
|
||||
|
||||
**底层引擎:** 本地 DeepSeek-V3 / Qwen-Max (负责扩写) \-\> Unifuncs (负责执行)
|
||||
|
||||
## **一、 文献检索维度的解构 (MIO 视角)**
|
||||
|
||||
要把用户的“一句话”扩写成一份严谨的检索指令书,LLM 必须脑补并结构化以下维度。这些维度将直接决定 Unifuncs 引擎去哪些网页、抓取什么关键词、过滤什么条件。
|
||||
|
||||
### **🚨 1\. 必须项 (Mandatory Fields \- 决定检索成败)**
|
||||
|
||||
如果用户没提,LLM 必须根据医学常识自动补齐其合理范围;如果补不齐,应设置默认的最优标准。
|
||||
|
||||
1. **核心疾病/目标人群 (Population):** 必须明确适应症。*(例如:用户说“他汀”,LLM必须根据常识扩写出可能针对的“高脂血症”或“心血管疾病预防”人群)*。
|
||||
2. **核心干预措施 (Intervention):** 具体的药物、疗法或器械。
|
||||
3. **目标文献类型 (Study Design \- 极度重要):** 在循证医学中,文献类型决定了证据等级。如果用户没说,LLM 默认必须限定为:**高质量的随机对照试验 (RCT)、前瞻性队列研究、系统综述与 Meta 分析**。绝不能让引擎去抓取个案报道 (Case Report) 或动物实验。
|
||||
4. **获取约束 (Access Rule):** 这是我们系统的硬杠杠。必须强制要求:**仅限开放获取 (Open Access) 且包含完整 PDF 链接的文献**。
|
||||
|
||||
### **💡 2\. 可选项 (Optional Fields \- 提升查准率)**
|
||||
|
||||
用户如果有提及,要强化;如果没提及,LLM 可以适当发散或留白。
|
||||
|
||||
1. **对照组 (Comparison):** 安慰剂 (Placebo) 还是标准疗法 (Standard of Care)。
|
||||
2. **特定结局指标 (Outcomes):** 比如 OS (总生存期)、MACE (主要心血管不良事件)、AEs (不良反应)。
|
||||
3. **同义词与 MeSH 词扩展 (Synonyms Expansion):** 这是体现“专业性”的核心!用户可能只输入了 "心衰",LLM 必须在指令书里附带提供 "Heart Failure", "HFrEF", "HFpEF" 等检索词,供 Unifuncs 扩大查全率。
|
||||
|
||||
## **二、 核心 System Prompt 设计 (可以直接写入代码)**
|
||||
|
||||
请将以下 Prompt 部署到 services/requirementExpansionService.ts 中。
|
||||
|
||||
\# Role
|
||||
你是一名受过严谨训练的“临床医学信息专家(Medical Information Officer)”与循证医学图书管理员。
|
||||
|
||||
\# Task
|
||||
用户的输入往往是口语化且不完整的临床检索诉求。你的任务是将用户简短的一句话,扩写、翻译并结构化为一份《深度检索指令书》。
|
||||
这份指令书将直接发送给另一个拥有自主上网能力的 AI 深搜引擎(Agent)去执行。因此,你的语言必须清晰、专业、逻辑严密,并利用你的医学知识为用户补全隐含的专业要求。
|
||||
|
||||
\# Instructions & Rules
|
||||
1\. \*\*理解与补全 (PICOS):\*\* 基于用户的输入,识别 P (人群)、I (干预)、C (对照)、O (结局)、S (研究类型)。
|
||||
2\. \*\*强制文献质量过滤:\*\* 如果用户没有指定文献类型,你必须强制添加:“优先获取 随机对照试验(RCT)、前瞻性队列研究、系统综述与Meta分析;排除动物实验、体外研究、个案报道及非英文文献”。
|
||||
3\. \*\*强制数据可及性:\*\* 必须在指令中强调:“只提取开放获取 (Open Access) 且附带有效 PDF 全文下载链接的文献”。
|
||||
4\. \*\*扩展专业检索词 (MeSH):\*\* 利用你的医学知识,将用户的中文或俗称,扩展为标准的英文医学主题词 (MeSH) 和常见缩写,写在【扩展检索词库】中,指导深搜引擎扩大召回率。
|
||||
|
||||
\# Output Format (严格遵循以下 Markdown 结构,不要输出额外废话)
|
||||
|
||||
请帮我执行一次深度的医学文献检索。以下是具体的检索要求:
|
||||
|
||||
【核心检索主题】
|
||||
(一句话总结用户意图,例如:评估XXX在YYY中的疗效与安全性)
|
||||
|
||||
【目标人群与干预措施】
|
||||
\- 人群 (Population):(详细描述疾病阶段、特征,如无明确要求则合理推断)
|
||||
\- 干预 (Intervention / Comparison):(具体的药物、疗法及可能的对照)
|
||||
|
||||
【关注的结局指标 (Outcomes)】
|
||||
(列出该领域常见的核心指标,如 OS, PFS, 并发症发生率等。如果用户指定了则用用户的)
|
||||
|
||||
【目标文献类型与质量要求】
|
||||
\- 必须是:随机对照试验 (RCT)、队列研究、系统综述与 Meta 分析。
|
||||
\- 强制排除:动物实验、体外细胞研究、个案报道。
|
||||
\- 强制获取要求:必须是开放获取 (Open Access) 的文献,且必须能获取 PDF 全文或完整 XML 数据。对于需要付费墙 (Paywall) 的文献请予以忽略。
|
||||
|
||||
【扩展检索词库 (供搜索引擎参考)】
|
||||
(列出 5-8 个核心的英文专业词汇、MeSH词或缩写,用逗号分隔。例如:Hydroxymethylglutaryl-CoA Reductase Inhibitors, Heart Failure, MACE...)
|
||||
|
||||
## **三、 效果演示 (Before vs After)**
|
||||
|
||||
你可以看到,经过这个 Prompt 处理后,原本干瘪的需求变得极其丰满,且直接指导了 Unifuncs 引擎该怎么做。
|
||||
|
||||
### **🧑⚕️ 医生的粗略输入 (前端传入)**
|
||||
|
||||
"替雷利珠单抗治疗非小细胞肺癌,只要带PDF的"
|
||||
|
||||
### **🤖 LLM 扩写后的指令书 (前端 Step 2 展示给用户确认)**
|
||||
|
||||
请帮我执行一次深度的医学文献检索。以下是具体的检索要求:
|
||||
|
||||
**【核心检索主题】**
|
||||
|
||||
评估替雷利珠单抗(Tislelizumab)在治疗非小细胞肺癌(NSCLC)中的疗效与安全性。
|
||||
|
||||
**【目标人群与干预措施】**
|
||||
|
||||
* **人群 (Population):** 晚期、局部晚期或转移性非小细胞肺癌患者(可包含一线或二线治疗人群)。
|
||||
* **干预 (Intervention / Comparison):** 替雷利珠单抗单药或联合化疗,对照组可为标准含铂双药化疗或安慰剂。
|
||||
|
||||
**【关注的结局指标 (Outcomes)】**
|
||||
|
||||
重点关注总生存期 (OS)、无进展生存期 (PFS)、客观缓解率 (ORR) 以及免疫相关不良事件 (irAEs) 的发生率。最好能提取到 HR 值及 95% CI。
|
||||
|
||||
**【目标文献类型与质量要求】**
|
||||
|
||||
* 必须是:随机对照试验 (RCT)、前瞻性队列研究、系统综述与 Meta 分析。
|
||||
* 强制排除:动物实验、体外细胞研究、个案报道。
|
||||
* 强制获取要求:必须是开放获取 (Open Access) 的文献,且必须能获取 PDF 全文或完整 XML 数据。对于需要付费墙 (Paywall) 的文献请予以忽略。
|
||||
|
||||
**【扩展检索词库 (供搜索引擎参考)】**
|
||||
|
||||
Tislelizumab, BGB-A317, Non-Small Cell Lung Cancer, NSCLC, Carcinoma, Non-Small-Cell Lung (MeSH), PD-1 Inhibitors, Overall Survival.
|
||||
|
||||
## **四、 为什么这种设计对 Unifuncs API 特别有效?**
|
||||
|
||||
Unifuncs 是一个基于大模型推理的 Agent。如果你只给它扔一句 "替雷利珠单抗治疗非小细胞肺癌",它会像个无头苍蝇一样在网上随便搜几个百度百科或知乎的新闻来交差。
|
||||
|
||||
但当你把上面这篇**充斥着 MeSH词、明确排除了动物实验、明确要求找 RCT、并且要求带有 PDF** 的大白话发给 Unifuncs 的 messages 时:
|
||||
|
||||
1. Unifuncs 会直接把 Tislelizumab AND NSCLC AND Randomized Controlled Trial 作为 PubMed 的搜索词。
|
||||
2. 它在阅读网页时,看到没有 PDF 下载链接的,会自动根据指令里的【强制获取要求】执行 Discard(放弃)动作。
|
||||
3. 它在总结时,会自动围绕【关注的结局指标】去撰写你的 synthesis\_report。
|
||||
|
||||
这就是大模型时代\*\*“用魔法(本地大模型)打败魔法(搜索大模型)”\*\*的最佳实践!
|
||||
@@ -157,9 +157,114 @@ interface DecodedToken {
|
||||
| IIT | N/A (企业微信) | N/A | ✅ 企业微信userId | ✅ |
|
||||
| Prompt管理 | ✅ getAuthHeaders | ✅ authenticate | ✅ getUserId | ✅ |
|
||||
|
||||
## 5. 常见错误和解决方案
|
||||
## 5. 测试脚本认证规范
|
||||
|
||||
### 5.1 401 Unauthorized
|
||||
> 编写 API 测试脚本时,需要先通过登录接口获取 Token,再携带到后续请求中。
|
||||
|
||||
### 5.1 登录接口
|
||||
|
||||
```
|
||||
POST /api/v1/auth/login/password
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"phone": "13800000001",
|
||||
"password": "123456"
|
||||
}
|
||||
```
|
||||
|
||||
### 5.2 登录响应结构
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
"user": { "id": "...", "name": "...", "role": "SUPER_ADMIN" },
|
||||
"tokens": {
|
||||
"accessToken": "eyJhbGciOiJIUzI1NiIs...",
|
||||
"refreshToken": "..."
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Token 提取路径**:`res.data.data.tokens.accessToken`
|
||||
|
||||
### 5.3 测试脚本通用模板
|
||||
|
||||
```typescript
|
||||
const BASE_URL = process.env.TEST_BASE_URL || 'http://localhost:3001';
|
||||
const API_PREFIX = `${BASE_URL}/api/v1`;
|
||||
|
||||
const TEST_PHONE = process.env.TEST_PHONE || '13800000001';
|
||||
const TEST_PASSWORD = process.env.TEST_PASSWORD || '123456';
|
||||
|
||||
let authToken = '';
|
||||
|
||||
async function api(path: string, options: RequestInit = {}) {
|
||||
const res = await fetch(`${API_PREFIX}${path}`, {
|
||||
...options,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(authToken ? { Authorization: `Bearer ${authToken}` } : {}),
|
||||
...(options.headers || {}),
|
||||
},
|
||||
});
|
||||
const contentType = res.headers.get('content-type') || '';
|
||||
const data = contentType.includes('json') ? await res.json() : await res.text();
|
||||
return { status: res.status, data };
|
||||
}
|
||||
|
||||
// 登录获取 Token
|
||||
async function login() {
|
||||
const res = await api('/auth/login/password', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ phone: TEST_PHONE, password: TEST_PASSWORD }),
|
||||
});
|
||||
if (res.status !== 200 || !res.data.success) {
|
||||
throw new Error(`登录失败: ${JSON.stringify(res.data)}`);
|
||||
}
|
||||
authToken = res.data.data.tokens.accessToken;
|
||||
}
|
||||
```
|
||||
|
||||
### 5.4 测试账号说明
|
||||
|
||||
| 手机号 | 密码 | 角色 | 说明 |
|
||||
|--------|------|------|------|
|
||||
| 13800000001 | 123456 | SUPER_ADMIN | 超级管理员,可访问所有模块 |
|
||||
| 13800000000 | 123456 | USER | 普通测试用户 |
|
||||
|
||||
> **注意**:`SUPER_ADMIN` 和 `PROMPT_ENGINEER` 角色在 `requireModule()` 中间件中自动豁免模块权限检查,无需在 `user_modules` 表中分配模块。普通 `USER` 角色需要先在 `platform_schema.user_modules` 中分配对应模块才能访问。
|
||||
|
||||
### 5.5 运行测试脚本
|
||||
|
||||
```bash
|
||||
# 前置条件:后端服务运行中 + PostgreSQL 运行中
|
||||
cd backend
|
||||
|
||||
# 方式一:使用默认测试账号
|
||||
npx tsx src/modules/xxx/__tests__/test-script.ts
|
||||
|
||||
# 方式二:通过环境变量指定账号
|
||||
$env:TEST_PHONE="13800000001"; $env:TEST_PASSWORD="123456"; npx tsx src/modules/xxx/__tests__/test-script.ts
|
||||
```
|
||||
|
||||
### 5.6 用户表说明
|
||||
|
||||
用户数据存储在 `platform_schema.users` 表中(**不是** `public.users`)。Prisma schema 中的 `User` model 映射到此表。关键字段:
|
||||
|
||||
| 字段 | 说明 |
|
||||
|------|------|
|
||||
| `phone` | 登录手机号(唯一) |
|
||||
| `password` | bcrypt 哈希密码 |
|
||||
| `role` | 角色:SUPER_ADMIN / HOSPITAL_ADMIN / DEPARTMENT_ADMIN / PROMPT_ENGINEER / USER 等 |
|
||||
| `tenant_id` | 租户 ID |
|
||||
| `status` | active / disabled |
|
||||
|
||||
## 6. 常见错误和解决方案
|
||||
|
||||
### 6.1 401 Unauthorized
|
||||
|
||||
**原因**: 前端没有携带 JWT Token 或 Token 过期
|
||||
|
||||
@@ -168,24 +273,38 @@ interface DecodedToken {
|
||||
2. 检查 localStorage 中是否有 `accessToken`
|
||||
3. 如果 Token 过期,尝试刷新或重新登录
|
||||
|
||||
### 5.2 User not authenticated
|
||||
### 6.2 User not authenticated
|
||||
|
||||
**原因**: 后端路由没有添加 `authenticate` 中间件
|
||||
|
||||
**解决**: 在路由定义中添加 `preHandler: [authenticate]`
|
||||
|
||||
### 5.3 TypeError: Cannot read property 'userId' of undefined
|
||||
### 6.3 TypeError: Cannot read property 'userId' of undefined
|
||||
|
||||
**原因**: 使用了错误的属性名(`request.user.id` 而非 `request.user.userId`)
|
||||
|
||||
**解决**: 使用 `(request as any).user?.userId`
|
||||
|
||||
## 6. 参考文件
|
||||
### 6.4 测试脚本 Token 为 NULL
|
||||
|
||||
**原因**: 登录返回的 Token 路径不对
|
||||
|
||||
**解决**: Token 位于 `res.data.data.tokens.accessToken`,不是 `res.data.data.accessToken` 或 `res.data.data.token`
|
||||
|
||||
### 6.5 测试脚本登录成功但 API 返回 401
|
||||
|
||||
**排查步骤**:
|
||||
1. 确认 Token 提取路径:`res.data.data.tokens.accessToken`
|
||||
2. 确认 Authorization header 格式:`Bearer <token>`(注意空格)
|
||||
3. 确认后端服务使用的 JWT_SECRET 与签发时一致
|
||||
|
||||
## 7. 参考文件
|
||||
|
||||
- 前端 axios 实例: `frontend-v2/src/common/api/axios.ts`
|
||||
- 前端 Token 管理: `frontend-v2/src/framework/auth/api.ts`
|
||||
- 后端认证中间件: `backend/src/common/auth/auth.middleware.ts`
|
||||
- 后端 JWT 服务: `backend/src/common/auth/jwt.service.ts`
|
||||
- 测试脚本示例: `backend/src/modules/asl/__tests__/deep-research-v2-smoke.ts`
|
||||
|
||||
|
||||
|
||||
|
||||
1315
frontend-v2/package-lock.json
generated
1315
frontend-v2/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -38,7 +38,9 @@
|
||||
"prismjs": "^1.30.0",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-router-dom": "^7.9.5",
|
||||
"remark-gfm": "^4.0.1",
|
||||
"xlsx": "^0.18.5",
|
||||
"zustand": "^5.0.8"
|
||||
},
|
||||
|
||||
@@ -991,9 +991,7 @@
|
||||
.message-bubble .markdown-content ol {
|
||||
margin: 8px 0 12px 0;
|
||||
padding-left: 24px;
|
||||
}
|
||||
|
||||
.message-bubble .markdown-content ul {
|
||||
}.message-bubble .markdown-content ul {
|
||||
list-style-type: disc;
|
||||
}.message-bubble .markdown-content ol {
|
||||
list-style-type: decimal;
|
||||
|
||||
@@ -478,6 +478,56 @@ export async function getResearchTaskStatus(
|
||||
return request(`/research/tasks/${taskId}/status`);
|
||||
}
|
||||
|
||||
// ==================== Deep Research V2.0 API ====================
|
||||
|
||||
import type {
|
||||
DataSourceConfig,
|
||||
GenerateRequirementRequest,
|
||||
GenerateRequirementResponse,
|
||||
DeepResearchTask,
|
||||
} from '../types/deepResearch';
|
||||
|
||||
/**
|
||||
* 获取数据源配置列表
|
||||
*/
|
||||
export async function getDeepResearchDataSources(): Promise<ApiResponse<DataSourceConfig[]>> {
|
||||
return request('/research/data-sources');
|
||||
}
|
||||
|
||||
/**
|
||||
* 需求扩写(PICOS + MeSH)
|
||||
*/
|
||||
export async function generateRequirement(
|
||||
data: GenerateRequirementRequest
|
||||
): Promise<ApiResponse<GenerateRequirementResponse>> {
|
||||
return request('/research/generate-requirement', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(data),
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* 启动异步执行
|
||||
*/
|
||||
export async function executeDeepResearchTask(
|
||||
taskId: string,
|
||||
confirmedRequirement: string
|
||||
): Promise<ApiResponse<void>> {
|
||||
return request(`/research/tasks/${taskId}/execute`, {
|
||||
method: 'PUT',
|
||||
body: JSON.stringify({ confirmedRequirement }),
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取 V2.0 任务详情(状态 + 日志 + 结果)
|
||||
*/
|
||||
export async function getDeepResearchTask(
|
||||
taskId: string
|
||||
): Promise<ApiResponse<DeepResearchTask>> {
|
||||
return request(`/research/tasks/${taskId}`);
|
||||
}
|
||||
|
||||
// ==================== 统一导出API对象 ====================
|
||||
|
||||
/**
|
||||
@@ -525,7 +575,13 @@ export const aslApi = {
|
||||
// 健康检查
|
||||
healthCheck,
|
||||
|
||||
// 智能文献检索 (DeepSearch)
|
||||
// 智能文献检索 (DeepSearch V1.x)
|
||||
createResearchTask,
|
||||
getResearchTaskStatus,
|
||||
|
||||
// Deep Research V2.0
|
||||
getDeepResearchDataSources,
|
||||
generateRequirement,
|
||||
executeDeepResearchTask,
|
||||
getDeepResearchTask,
|
||||
};
|
||||
|
||||
@@ -39,7 +39,7 @@ const ASLLayout = () => {
|
||||
title: '敬请期待'
|
||||
},
|
||||
{
|
||||
key: '/literature/research/search',
|
||||
key: '/literature/research/deep',
|
||||
icon: <SearchOutlined />,
|
||||
label: '2. 智能文献检索',
|
||||
},
|
||||
|
||||
@@ -0,0 +1,165 @@
|
||||
/**
|
||||
* Deep Research V2.0 — Agent Terminal
|
||||
*
|
||||
* 暗色终端风格展示 AI 思考与执行过程:
|
||||
* - 每条日志展示完整思考内容
|
||||
* - 自动过滤无意义内容(任务 ID 等)
|
||||
* - 条件 auto-scroll
|
||||
*/
|
||||
|
||||
import { useRef, useEffect, useCallback, useState } from 'react';
|
||||
import { Spin, Typography, Alert } from 'antd';
|
||||
import { LoadingOutlined } from '@ant-design/icons';
|
||||
import type { DeepResearchTask, ExecutionLogEntry } from '../../types/deepResearch';
|
||||
|
||||
const { Text } = Typography;
|
||||
|
||||
interface AgentTerminalProps {
|
||||
task: DeepResearchTask | null;
|
||||
isRunning: boolean;
|
||||
isFailed: boolean;
|
||||
}
|
||||
|
||||
const LOG_COLORS: Record<string, string> = {
|
||||
thinking: '#8b949e',
|
||||
searching: '#58a6ff',
|
||||
reading: '#d2a8ff',
|
||||
analyzing: '#7ee787',
|
||||
summary: '#ffa657',
|
||||
info: '#79c0ff',
|
||||
};
|
||||
|
||||
const LOG_PREFIXES: Record<string, string> = {
|
||||
thinking: '🤔 思考',
|
||||
searching: '🔍 搜索',
|
||||
reading: '📖 阅读',
|
||||
analyzing: '🧪 分析',
|
||||
summary: '📊 总结',
|
||||
info: 'ℹ️ 信息',
|
||||
};
|
||||
|
||||
const AgentTerminal: React.FC<AgentTerminalProps> = ({ task, isRunning, isFailed }) => {
|
||||
const containerRef = useRef<HTMLDivElement>(null);
|
||||
const [userScrolled, setUserScrolled] = useState(false);
|
||||
const prevLogCountRef = useRef(0);
|
||||
|
||||
const logs = (task?.executionLogs || []).filter(
|
||||
(log: ExecutionLogEntry) => !log.text.includes('Unifuncs 任务 ID')
|
||||
);
|
||||
|
||||
const handleScroll = useCallback(() => {
|
||||
const el = containerRef.current;
|
||||
if (!el) return;
|
||||
const distanceFromBottom = el.scrollHeight - el.scrollTop - el.clientHeight;
|
||||
setUserScrolled(distanceFromBottom > 50);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!userScrolled && containerRef.current && logs.length > prevLogCountRef.current) {
|
||||
containerRef.current.scrollTop = containerRef.current.scrollHeight;
|
||||
}
|
||||
prevLogCountRef.current = logs.length;
|
||||
}, [logs.length, userScrolled]);
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="flex items-center gap-3 mb-4">
|
||||
{isRunning && <Spin indicator={<LoadingOutlined spin />} size="small" />}
|
||||
<Text strong className="text-lg">
|
||||
{isRunning ? 'Deep Research 执行中...' : isFailed ? '执行失败' : '执行完成'}
|
||||
</Text>
|
||||
</div>
|
||||
|
||||
{isFailed && task?.errorMessage && (
|
||||
<Alert
|
||||
type="error"
|
||||
message="执行失败"
|
||||
description={task.errorMessage}
|
||||
className="mb-4"
|
||||
showIcon
|
||||
/>
|
||||
)}
|
||||
|
||||
<div
|
||||
ref={containerRef}
|
||||
onScroll={handleScroll}
|
||||
className="rounded-xl overflow-hidden"
|
||||
style={{
|
||||
background: '#0d1117',
|
||||
color: '#c9d1d9',
|
||||
fontFamily: "'JetBrains Mono', 'Fira Code', 'Consolas', monospace",
|
||||
fontSize: 13,
|
||||
lineHeight: 1.8,
|
||||
padding: '16px 20px',
|
||||
maxHeight: 500,
|
||||
minHeight: 200,
|
||||
overflowY: 'auto',
|
||||
}}
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="flex items-center gap-2 mb-4 pb-3" style={{ borderBottom: '1px solid #21262d' }}>
|
||||
<span style={{ width: 12, height: 12, borderRadius: '50%', background: '#ff5f56', display: 'inline-block' }} />
|
||||
<span style={{ width: 12, height: 12, borderRadius: '50%', background: '#ffbd2e', display: 'inline-block' }} />
|
||||
<span style={{ width: 12, height: 12, borderRadius: '50%', background: '#27c93f', display: 'inline-block' }} />
|
||||
<span className="ml-3" style={{ color: '#8b949e' }}>Deep Research Agent</span>
|
||||
</div>
|
||||
|
||||
{/* Log entries */}
|
||||
{logs.map((log: ExecutionLogEntry, i: number) => (
|
||||
<div key={i} className="mb-3 pb-2" style={{ borderBottom: '1px solid #161b22' }}>
|
||||
<div className="flex items-center gap-2 mb-1">
|
||||
<span style={{ color: LOG_COLORS[log.type] || '#c9d1d9', fontWeight: 600, fontSize: 12 }}>
|
||||
{LOG_PREFIXES[log.type] || log.title}
|
||||
</span>
|
||||
<span style={{ color: '#484f58', fontSize: 11 }}>
|
||||
{new Date(log.ts).toLocaleTimeString()}
|
||||
</span>
|
||||
</div>
|
||||
<div style={{ color: '#c9d1d9', paddingLeft: 4, whiteSpace: 'pre-wrap', wordBreak: 'break-word' }}>
|
||||
{log.text}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* Blinking cursor */}
|
||||
{isRunning && (
|
||||
<span className="inline-block mt-1" style={{
|
||||
width: 8,
|
||||
height: 16,
|
||||
background: '#58a6ff',
|
||||
animation: 'blink 1s step-end infinite',
|
||||
}} />
|
||||
)}
|
||||
|
||||
{logs.length === 0 && (
|
||||
<div style={{ color: '#484f58' }}>等待任务启动...</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{userScrolled && isRunning && (
|
||||
<div className="text-center mt-2">
|
||||
<Text
|
||||
type="secondary"
|
||||
className="text-xs cursor-pointer hover:text-blue-400 transition-colors"
|
||||
onClick={() => {
|
||||
setUserScrolled(false);
|
||||
if (containerRef.current) {
|
||||
containerRef.current.scrollTop = containerRef.current.scrollHeight;
|
||||
}
|
||||
}}
|
||||
>
|
||||
↓ 回到底部查看最新日志
|
||||
</Text>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<style>{`
|
||||
@keyframes blink {
|
||||
50% { opacity: 0; }
|
||||
}
|
||||
`}</style>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default AgentTerminal;
|
||||
@@ -0,0 +1,92 @@
|
||||
/**
|
||||
* Deep Research V2.0 — Landing 视图
|
||||
*
|
||||
* 大搜索框 + 推荐预置词,居中展示。
|
||||
*/
|
||||
|
||||
import { useState } from 'react';
|
||||
import { Input, Button, Tag } from 'antd';
|
||||
import { SearchOutlined, ExperimentOutlined } from '@ant-design/icons';
|
||||
|
||||
const { TextArea } = Input;
|
||||
|
||||
interface LandingViewProps {
|
||||
onSubmit: (query: string) => void;
|
||||
}
|
||||
|
||||
const PRESET_QUERIES = [
|
||||
'他汀类药物在心血管疾病一级预防中的最新RCT证据',
|
||||
'PD-1/PD-L1抑制剂联合化疗治疗非小细胞肺癌的Meta分析',
|
||||
'糖尿病肾病患者SGLT2抑制剂的肾脏保护作用',
|
||||
'阿尔茨海默病早期诊断生物标志物研究进展',
|
||||
];
|
||||
|
||||
const LandingView: React.FC<LandingViewProps> = ({ onSubmit }) => {
|
||||
const [query, setQuery] = useState('');
|
||||
|
||||
const handleSubmit = () => {
|
||||
const trimmed = query.trim();
|
||||
if (!trimmed) return;
|
||||
onSubmit(trimmed);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center" style={{ minHeight: 'calc(100vh - 64px)' }}>
|
||||
<div className="text-center mb-8">
|
||||
<ExperimentOutlined className="text-5xl text-blue-500 mb-4" />
|
||||
<h1 className="text-3xl font-bold text-gray-800 mb-2">Deep Research</h1>
|
||||
<p className="text-gray-500 text-base">AI 驱动的深度文献检索,输入您的研究想法</p>
|
||||
</div>
|
||||
|
||||
<div
|
||||
className="w-full bg-white rounded-2xl shadow-md border border-gray-200 p-5 flex flex-col gap-3"
|
||||
style={{ maxWidth: 700 }}
|
||||
>
|
||||
<TextArea
|
||||
value={query}
|
||||
onChange={(e) => setQuery(e.target.value)}
|
||||
placeholder="输入您的研究想法,例如:他汀类药物预防心血管疾病的系统评价..."
|
||||
autoSize={{ minRows: 3, maxRows: 6 }}
|
||||
className="!border-none !shadow-none !text-base !resize-none"
|
||||
onPressEnter={(e) => {
|
||||
if (!e.shiftKey) {
|
||||
e.preventDefault();
|
||||
handleSubmit();
|
||||
}
|
||||
}}
|
||||
/>
|
||||
<div className="flex justify-end">
|
||||
<Button
|
||||
type="primary"
|
||||
icon={<SearchOutlined />}
|
||||
size="large"
|
||||
onClick={handleSubmit}
|
||||
disabled={!query.trim()}
|
||||
className="!rounded-lg !px-6"
|
||||
>
|
||||
开始
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="mt-8" style={{ maxWidth: 700, width: '100%' }}>
|
||||
<p className="text-gray-400 text-sm mb-3">试试这些:</p>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{PRESET_QUERIES.map((q, i) => (
|
||||
<Tag
|
||||
key={i}
|
||||
className="cursor-pointer !text-sm !py-1 !px-3 !rounded-full hover:!bg-blue-50 hover:!border-blue-300 transition-colors"
|
||||
onClick={() => {
|
||||
setQuery(q);
|
||||
}}
|
||||
>
|
||||
{q}
|
||||
</Tag>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default LandingView;
|
||||
@@ -0,0 +1,225 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 结果展示
|
||||
*
|
||||
* 完成横幅 + AI 综合报告(Markdown 正式渲染)+ 文献清单表格
|
||||
* 降级展示:如果 resultList 为 null,仅展示报告
|
||||
*/
|
||||
|
||||
import { useState } from 'react';
|
||||
import { Card, Table, Button, Typography, Tag, Divider, Empty, message } from 'antd';
|
||||
import {
|
||||
CheckCircleFilled,
|
||||
FileTextOutlined,
|
||||
TableOutlined,
|
||||
PlusOutlined,
|
||||
DownloadOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import ReactMarkdown from 'react-markdown';
|
||||
import remarkGfm from 'remark-gfm';
|
||||
import { getAccessToken } from '../../../../framework/auth/api';
|
||||
import type { DeepResearchTask, LiteratureItem } from '../../types/deepResearch';
|
||||
|
||||
const { Text } = Typography;
|
||||
|
||||
interface ResultsViewProps {
|
||||
task: DeepResearchTask;
|
||||
onNewSearch: () => void;
|
||||
}
|
||||
|
||||
const ResultsView: React.FC<ResultsViewProps> = ({ task, onNewSearch }) => {
|
||||
const { synthesisReport, resultList, resultCount, query, completedAt, taskId } = task;
|
||||
const [exporting, setExporting] = useState(false);
|
||||
|
||||
const handleExportWord = async () => {
|
||||
setExporting(true);
|
||||
try {
|
||||
const token = getAccessToken();
|
||||
const res = await fetch(`/api/v1/asl/research/tasks/${taskId}/export-word`, {
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
if (!res.ok) throw new Error(`HTTP ${res.status}`);
|
||||
const blob = await res.blob();
|
||||
const disposition = res.headers.get('Content-Disposition') || '';
|
||||
const filenameMatch = disposition.match(/filename\*?=(?:UTF-8'')?([^;]+)/);
|
||||
const filename = filenameMatch ? decodeURIComponent(filenameMatch[1]) : 'DeepResearch.docx';
|
||||
const url = URL.createObjectURL(blob);
|
||||
const a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.download = filename;
|
||||
a.click();
|
||||
URL.revokeObjectURL(url);
|
||||
message.success('导出成功');
|
||||
} catch (err: any) {
|
||||
message.error(err.message || '导出失败');
|
||||
} finally {
|
||||
setExporting(false);
|
||||
}
|
||||
};
|
||||
|
||||
const columns = [
|
||||
{
|
||||
title: '#',
|
||||
key: 'index',
|
||||
width: 50,
|
||||
render: (_: any, __: any, idx: number) => idx + 1,
|
||||
},
|
||||
{
|
||||
title: '标题',
|
||||
dataIndex: 'title',
|
||||
key: 'title',
|
||||
ellipsis: true,
|
||||
render: (text: string, record: LiteratureItem) => (
|
||||
record.url ? (
|
||||
<a href={record.url} target="_blank" rel="noopener noreferrer" className="text-blue-600 hover:underline">
|
||||
{text || record.url}
|
||||
</a>
|
||||
) : (
|
||||
<span>{text || '—'}</span>
|
||||
)
|
||||
),
|
||||
},
|
||||
{
|
||||
title: '作者',
|
||||
dataIndex: 'authors',
|
||||
key: 'authors',
|
||||
width: 160,
|
||||
ellipsis: true,
|
||||
render: (t: string) => t || '—',
|
||||
},
|
||||
{
|
||||
title: '期刊',
|
||||
dataIndex: 'journal',
|
||||
key: 'journal',
|
||||
width: 140,
|
||||
ellipsis: true,
|
||||
render: (t: string) => t || '—',
|
||||
},
|
||||
{
|
||||
title: '年份',
|
||||
dataIndex: 'year',
|
||||
key: 'year',
|
||||
width: 70,
|
||||
render: (t: number | string) => t || '—',
|
||||
},
|
||||
{
|
||||
title: '研究类型',
|
||||
dataIndex: 'studyType',
|
||||
key: 'studyType',
|
||||
width: 100,
|
||||
render: (t: string) => t ? <Tag color="blue">{t}</Tag> : '—',
|
||||
},
|
||||
{
|
||||
title: 'PMID',
|
||||
dataIndex: 'pmid',
|
||||
key: 'pmid',
|
||||
width: 100,
|
||||
render: (t: string) => t ? (
|
||||
<a
|
||||
href={`https://pubmed.ncbi.nlm.nih.gov/${t}/`}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="text-blue-500"
|
||||
>
|
||||
{t}
|
||||
</a>
|
||||
) : '—',
|
||||
},
|
||||
];
|
||||
|
||||
return (
|
||||
<div>
|
||||
{/* 完成横幅 */}
|
||||
<Card className="mb-6 !bg-green-50 !border-green-200">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
<CheckCircleFilled className="text-2xl text-green-500" />
|
||||
<div>
|
||||
<Text strong className="text-lg block">Deep Research 完成</Text>
|
||||
<Text type="secondary" className="text-sm">
|
||||
「{query}」 — 找到 {resultCount || 0} 篇文献
|
||||
{completedAt && ` · ${new Date(completedAt).toLocaleString()}`}
|
||||
</Text>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex gap-2">
|
||||
<Button icon={<DownloadOutlined />} loading={exporting} onClick={handleExportWord}>
|
||||
导出 Word
|
||||
</Button>
|
||||
<Button icon={<PlusOutlined />} onClick={onNewSearch}>
|
||||
新建检索
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
{/* AI 综合报告 — Markdown 渲染 */}
|
||||
{synthesisReport && (
|
||||
<Card
|
||||
className="mb-6"
|
||||
title={<><FileTextOutlined className="mr-2" />AI 综合分析报告</>}
|
||||
>
|
||||
<div className="prose prose-sm max-w-none prose-headings:text-base prose-headings:font-semibold prose-a:text-blue-600 prose-a:no-underline hover:prose-a:underline">
|
||||
<ReactMarkdown
|
||||
remarkPlugins={[remarkGfm]}
|
||||
components={{
|
||||
a: ({ href, children }) => (
|
||||
<>
|
||||
<a href={href} target="_blank" rel="noopener noreferrer">
|
||||
{children}
|
||||
</a>
|
||||
{href && (
|
||||
<a
|
||||
href={href}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="!text-gray-400 !text-xs !ml-1 hover:!text-blue-500 !no-underline"
|
||||
title="点击打开链接"
|
||||
>
|
||||
{href}
|
||||
</a>
|
||||
)}
|
||||
</>
|
||||
),
|
||||
}}
|
||||
>
|
||||
{synthesisReport}
|
||||
</ReactMarkdown>
|
||||
</div>
|
||||
</Card>
|
||||
)}
|
||||
|
||||
<Divider />
|
||||
|
||||
{/* 文献清单表格 */}
|
||||
{resultList && resultList.length > 0 ? (
|
||||
<Card
|
||||
title={<><TableOutlined className="mr-2" />文献清单({resultList.length} 篇)</>}
|
||||
>
|
||||
<Table
|
||||
dataSource={resultList}
|
||||
columns={columns}
|
||||
rowKey={(_, idx) => String(idx)}
|
||||
size="small"
|
||||
pagination={{ pageSize: 20, showSizeChanger: true }}
|
||||
scroll={{ x: 900 }}
|
||||
/>
|
||||
</Card>
|
||||
) : (
|
||||
synthesisReport ? (
|
||||
<Card>
|
||||
<Empty
|
||||
description="AI 未能结构化提取文献列表,请查阅上方综合报告中的文献引用"
|
||||
image={Empty.PRESENTED_IMAGE_SIMPLE}
|
||||
/>
|
||||
</Card>
|
||||
) : (
|
||||
<Card>
|
||||
<Empty description="无结果数据" />
|
||||
</Card>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default ResultsView;
|
||||
@@ -0,0 +1,223 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 配置面板
|
||||
*
|
||||
* 数据源 Checkbox + 高级筛选 + 生成需求书按钮
|
||||
* 支持 collapsed 模式显示为摘要卡片
|
||||
*/
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { Card, Checkbox, Button, Input, Select, Spin, Divider, Typography, Tag } from 'antd';
|
||||
import {
|
||||
ArrowLeftOutlined,
|
||||
ThunderboltOutlined,
|
||||
GlobalOutlined,
|
||||
EditOutlined,
|
||||
CheckCircleOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import { aslApi } from '../../api';
|
||||
import type { DataSourceConfig } from '../../types/deepResearch';
|
||||
|
||||
const { Text } = Typography;
|
||||
|
||||
interface SetupPanelProps {
|
||||
initialQuery: string;
|
||||
onSubmit: (
|
||||
query: string,
|
||||
selectedSources: string[],
|
||||
filters: { yearRange?: string; targetCount?: string }
|
||||
) => void;
|
||||
onBack: () => void;
|
||||
loading: boolean;
|
||||
collapsed?: boolean;
|
||||
onExpand?: () => void;
|
||||
}
|
||||
|
||||
const LOADING_TEXTS = [
|
||||
'AI 正在理解您的研究意图...',
|
||||
'正在进行 PICOS 结构化拆解...',
|
||||
'正在匹配 MeSH 医学术语...',
|
||||
'正在生成检索指令书...',
|
||||
];
|
||||
|
||||
const SetupPanel: React.FC<SetupPanelProps> = ({
|
||||
initialQuery, onSubmit, onBack, loading, collapsed, onExpand,
|
||||
}) => {
|
||||
const [query, setQuery] = useState(initialQuery);
|
||||
const [dataSources, setDataSources] = useState<DataSourceConfig[]>([]);
|
||||
const [selectedIds, setSelectedIds] = useState<string[]>([]);
|
||||
const [yearRange, setYearRange] = useState<string>('近5年');
|
||||
const [targetCount, setTargetCount] = useState<string>('全面检索');
|
||||
const [loadingSources, setLoadingSources] = useState(true);
|
||||
const [loadingTextIdx, setLoadingTextIdx] = useState(0);
|
||||
|
||||
useEffect(() => {
|
||||
aslApi.getDeepResearchDataSources().then(res => {
|
||||
const sources = res.data || [];
|
||||
setDataSources(sources);
|
||||
setSelectedIds(sources.filter((s: DataSourceConfig) => s.defaultChecked).map((s: DataSourceConfig) => s.id));
|
||||
}).catch(() => {
|
||||
setDataSources([]);
|
||||
}).finally(() => {
|
||||
setLoadingSources(false);
|
||||
});
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (!loading) { setLoadingTextIdx(0); return; }
|
||||
const timer = setInterval(() => {
|
||||
setLoadingTextIdx(prev => (prev + 1) % LOADING_TEXTS.length);
|
||||
}, 3000);
|
||||
return () => clearInterval(timer);
|
||||
}, [loading]);
|
||||
|
||||
const handleToggle = (id: string) => {
|
||||
setSelectedIds(prev =>
|
||||
prev.includes(id) ? prev.filter(x => x !== id) : [...prev, id]
|
||||
);
|
||||
};
|
||||
|
||||
const handleSubmit = () => {
|
||||
const domains = dataSources
|
||||
.filter(s => selectedIds.includes(s.id))
|
||||
.map(s => s.domainScope);
|
||||
onSubmit(query, domains, { yearRange, targetCount });
|
||||
};
|
||||
|
||||
const selectedNames = dataSources.filter(s => selectedIds.includes(s.id)).map(s => s.label);
|
||||
|
||||
if (collapsed) {
|
||||
return (
|
||||
<Card size="small" className="!bg-white">
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3 flex-1 min-w-0">
|
||||
<CheckCircleOutlined className="text-green-500 text-lg flex-shrink-0" />
|
||||
<div className="min-w-0">
|
||||
<Text strong className="block truncate">{query}</Text>
|
||||
<div className="flex items-center gap-2 mt-1 flex-wrap">
|
||||
{selectedNames.map(name => (
|
||||
<Tag key={name} className="!text-xs !m-0">{name}</Tag>
|
||||
))}
|
||||
<Text type="secondary" className="text-xs">{yearRange} · {targetCount}</Text>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{onExpand && (
|
||||
<Button type="link" icon={<EditOutlined />} onClick={onExpand} className="flex-shrink-0">
|
||||
修改
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
const englishSources = dataSources.filter(s => s.category === 'english');
|
||||
const chineseSources = dataSources.filter(s => s.category === 'chinese');
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div className="flex items-center gap-3 mb-4">
|
||||
<Button type="text" icon={<ArrowLeftOutlined />} onClick={onBack} size="small">
|
||||
返回
|
||||
</Button>
|
||||
<Text strong className="text-lg">配置检索参数</Text>
|
||||
</div>
|
||||
|
||||
<Card className="mb-4" size="small">
|
||||
<Text type="secondary" className="block mb-2 text-xs">研究想法</Text>
|
||||
<Input.TextArea
|
||||
value={query}
|
||||
onChange={(e) => setQuery(e.target.value)}
|
||||
autoSize={{ minRows: 2, maxRows: 4 }}
|
||||
className="!text-base"
|
||||
/>
|
||||
</Card>
|
||||
|
||||
<Card className="mb-4" size="small" title={<><GlobalOutlined className="mr-2" />选择数据源</>}>
|
||||
{loadingSources ? (
|
||||
<Spin size="small" />
|
||||
) : (
|
||||
<>
|
||||
<Text type="secondary" className="block mb-3 text-xs">英文数据库</Text>
|
||||
<div className="flex flex-col gap-2 mb-4">
|
||||
{englishSources.map(ds => (
|
||||
<Checkbox
|
||||
key={ds.id}
|
||||
checked={selectedIds.includes(ds.id)}
|
||||
onChange={() => handleToggle(ds.id)}
|
||||
>
|
||||
<span className="font-medium">{ds.label}</span>
|
||||
<span className="text-gray-400 text-xs ml-2">{ds.domainScope}</span>
|
||||
</Checkbox>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<Divider className="!my-3" />
|
||||
|
||||
<Text type="secondary" className="block mb-3 text-xs">中文数据库</Text>
|
||||
<div className="flex flex-col gap-2">
|
||||
{chineseSources.map(ds => (
|
||||
<Checkbox
|
||||
key={ds.id}
|
||||
checked={selectedIds.includes(ds.id)}
|
||||
onChange={() => handleToggle(ds.id)}
|
||||
>
|
||||
<span className="font-medium">{ds.label}</span>
|
||||
<span className="text-gray-400 text-xs ml-2">{ds.domainScope}</span>
|
||||
</Checkbox>
|
||||
))}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</Card>
|
||||
|
||||
<Card className="mb-6" size="small" title="高级筛选">
|
||||
<div className="flex gap-4">
|
||||
<div className="flex-1">
|
||||
<Text type="secondary" className="block mb-1 text-xs">时间范围</Text>
|
||||
<Select
|
||||
value={yearRange}
|
||||
onChange={setYearRange}
|
||||
className="w-full"
|
||||
options={[
|
||||
{ value: '不限', label: '不限' },
|
||||
{ value: '近1年', label: '近1年' },
|
||||
{ value: '近3年', label: '近3年' },
|
||||
{ value: '近5年', label: '近5年' },
|
||||
{ value: '近10年', label: '近10年' },
|
||||
]}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex-1">
|
||||
<Text type="secondary" className="block mb-1 text-xs">目标数量</Text>
|
||||
<Select
|
||||
value={targetCount}
|
||||
onChange={setTargetCount}
|
||||
className="w-full"
|
||||
options={[
|
||||
{ value: '全面检索', label: '全面检索' },
|
||||
{ value: '约20篇', label: '约20篇' },
|
||||
{ value: '约50篇', label: '约50篇' },
|
||||
{ value: '约100篇', label: '约100篇' },
|
||||
]}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
<Button
|
||||
type="primary"
|
||||
icon={<ThunderboltOutlined />}
|
||||
size="large"
|
||||
block
|
||||
onClick={handleSubmit}
|
||||
loading={loading}
|
||||
disabled={!query.trim() || selectedIds.length === 0}
|
||||
>
|
||||
{loading ? LOADING_TEXTS[loadingTextIdx] : '生成检索需求书'}
|
||||
</Button>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default SetupPanel;
|
||||
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* Deep Research V2.0 — 检索指令确认
|
||||
*
|
||||
* 单列布局:PICOS 内联摘要 + 可编辑检索指令书 + 保存/启动按钮
|
||||
* 支持 collapsed 模式显示为已确认卡片
|
||||
*/
|
||||
|
||||
import { useState } from 'react';
|
||||
import { Card, Button, Input, Tag, Typography, Divider, message } from 'antd';
|
||||
import {
|
||||
RocketOutlined,
|
||||
SaveOutlined,
|
||||
CheckCircleOutlined,
|
||||
AimOutlined,
|
||||
TagsOutlined,
|
||||
} from '@ant-design/icons';
|
||||
import type { IntentSummary } from '../../types/deepResearch';
|
||||
|
||||
const { Text } = Typography;
|
||||
const { TextArea } = Input;
|
||||
|
||||
interface StrategyConfirmProps {
|
||||
generatedRequirement: string;
|
||||
intentSummary: IntentSummary | null;
|
||||
onConfirm: (confirmedRequirement: string) => void;
|
||||
collapsed?: boolean;
|
||||
}
|
||||
|
||||
const StrategyConfirm: React.FC<StrategyConfirmProps> = ({
|
||||
generatedRequirement,
|
||||
intentSummary,
|
||||
onConfirm,
|
||||
collapsed,
|
||||
}) => {
|
||||
const [editedRequirement, setEditedRequirement] = useState(generatedRequirement);
|
||||
const [saved, setSaved] = useState(false);
|
||||
|
||||
if (collapsed) {
|
||||
return (
|
||||
<Card size="small" className="!bg-white">
|
||||
<div className="flex items-center gap-3">
|
||||
<CheckCircleOutlined className="text-green-500 text-lg flex-shrink-0" />
|
||||
<Text strong>检索指令已确认,正在执行深度检索</Text>
|
||||
</div>
|
||||
</Card>
|
||||
);
|
||||
}
|
||||
|
||||
const handleSave = () => {
|
||||
setSaved(true);
|
||||
message.success('检索指令已保存');
|
||||
setTimeout(() => setSaved(false), 2000);
|
||||
};
|
||||
|
||||
return (
|
||||
<div>
|
||||
<Text strong className="text-lg block mb-4">确认检索指令</Text>
|
||||
|
||||
{intentSummary && (
|
||||
<Card size="small" className="mb-4 !bg-blue-50/50">
|
||||
<div className="flex items-center gap-2 mb-2">
|
||||
<AimOutlined className="text-blue-500" />
|
||||
<Text className="text-sm">{intentSummary.objective}</Text>
|
||||
</div>
|
||||
<Divider className="!my-2" />
|
||||
<div className="flex flex-wrap gap-x-6 gap-y-1 text-xs mb-2">
|
||||
<span><Text type="secondary">P 人群</Text> <Text className="ml-1">{intentSummary.population || '—'}</Text></span>
|
||||
<span><Text type="secondary">I 干预</Text> <Text className="ml-1">{intentSummary.intervention || '—'}</Text></span>
|
||||
<span><Text type="secondary">C 对照</Text> <Text className="ml-1">{intentSummary.comparison || '—'}</Text></span>
|
||||
<span><Text type="secondary">O 结局</Text> <Text className="ml-1">{intentSummary.outcome || '—'}</Text></span>
|
||||
</div>
|
||||
{intentSummary.meshTerms && intentSummary.meshTerms.length > 0 && (
|
||||
<div className="flex items-center gap-1 flex-wrap mt-1">
|
||||
<TagsOutlined className="text-green-500 text-xs" />
|
||||
{intentSummary.meshTerms.map((term, i) => (
|
||||
<Tag key={i} color="green" className="!text-xs !m-0">{term}</Tag>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
{intentSummary.studyDesign && intentSummary.studyDesign.length > 0 && (
|
||||
<div className="flex items-center gap-1 flex-wrap mt-1">
|
||||
{intentSummary.studyDesign.map((s, i) => (
|
||||
<Tag key={i} color="blue" className="!text-xs !m-0">{s}</Tag>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</Card>
|
||||
)}
|
||||
|
||||
<Card size="small" className="mb-4">
|
||||
<TextArea
|
||||
value={editedRequirement}
|
||||
onChange={(e) => { setEditedRequirement(e.target.value); setSaved(false); }}
|
||||
autoSize={{ minRows: 8, maxRows: 20 }}
|
||||
className="!text-sm !leading-relaxed"
|
||||
/>
|
||||
</Card>
|
||||
|
||||
<div className="flex gap-3">
|
||||
<Button
|
||||
icon={<SaveOutlined />}
|
||||
onClick={handleSave}
|
||||
disabled={saved}
|
||||
>
|
||||
{saved ? '已保存' : '保存修改'}
|
||||
</Button>
|
||||
<Button
|
||||
type="primary"
|
||||
icon={<RocketOutlined />}
|
||||
size="large"
|
||||
className="flex-1"
|
||||
onClick={() => onConfirm(editedRequirement)}
|
||||
disabled={!editedRequirement.trim()}
|
||||
>
|
||||
启动 Deep Research
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default StrategyConfirm;
|
||||
48
frontend-v2/src/modules/asl/hooks/useDeepResearchTask.ts
Normal file
48
frontend-v2/src/modules/asl/hooks/useDeepResearchTask.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
/**
|
||||
* Deep Research V2.0 任务轮询 Hook
|
||||
*
|
||||
* running 时 3s 轮询,completed/failed 停止。
|
||||
*/
|
||||
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { aslApi } from '../api';
|
||||
|
||||
interface UseDeepResearchTaskOptions {
|
||||
taskId: string | null;
|
||||
enabled?: boolean;
|
||||
pollingInterval?: number;
|
||||
}
|
||||
|
||||
export function useDeepResearchTask({
|
||||
taskId,
|
||||
enabled = true,
|
||||
pollingInterval = 3000,
|
||||
}: UseDeepResearchTaskOptions) {
|
||||
const { data, isLoading, error, refetch } = useQuery({
|
||||
queryKey: ['deep-research-task', taskId],
|
||||
queryFn: () => aslApi.getDeepResearchTask(taskId!),
|
||||
enabled: enabled && !!taskId,
|
||||
refetchInterval: (query) => {
|
||||
const task = query.state.data?.data;
|
||||
if (!task) return pollingInterval;
|
||||
if (task.status === 'completed' || task.status === 'failed') {
|
||||
return false;
|
||||
}
|
||||
return pollingInterval;
|
||||
},
|
||||
staleTime: 0,
|
||||
});
|
||||
|
||||
const task = data?.data || null;
|
||||
|
||||
return {
|
||||
task,
|
||||
isLoading,
|
||||
error,
|
||||
refetch,
|
||||
isRunning: task?.status === 'running' || task?.status === 'pending',
|
||||
isCompleted: task?.status === 'completed',
|
||||
isFailed: task?.status === 'failed',
|
||||
isDraft: task?.status === 'draft',
|
||||
};
|
||||
}
|
||||
@@ -22,6 +22,9 @@ const FulltextResults = lazy(() => import('./pages/FulltextResults'));
|
||||
// 智能文献检索页面
|
||||
const ResearchSearch = lazy(() => import('./pages/ResearchSearch'));
|
||||
|
||||
// Deep Research V2.0
|
||||
const DeepResearchPage = lazy(() => import('./pages/DeepResearchPage'));
|
||||
|
||||
const ASLModule = () => {
|
||||
return (
|
||||
<Suspense
|
||||
@@ -35,9 +38,12 @@ const ASLModule = () => {
|
||||
<Route path="" element={<ASLLayout />}>
|
||||
<Route index element={<Navigate to="screening/title/settings" replace />} />
|
||||
|
||||
{/* 智能文献检索 */}
|
||||
{/* 智能文献检索 V1.x(保留兼容) */}
|
||||
<Route path="research/search" element={<ResearchSearch />} />
|
||||
|
||||
{/* Deep Research V2.0 */}
|
||||
<Route path="research/deep" element={<DeepResearchPage />} />
|
||||
|
||||
{/* 标题摘要初筛 */}
|
||||
<Route path="screening/title">
|
||||
<Route index element={<Navigate to="settings" replace />} />
|
||||
|
||||
161
frontend-v2/src/modules/asl/pages/DeepResearchPage.tsx
Normal file
161
frontend-v2/src/modules/asl/pages/DeepResearchPage.tsx
Normal file
@@ -0,0 +1,161 @@
|
||||
/**
|
||||
* Deep Research V2.0 主页面 — 瀑布流布局
|
||||
*
|
||||
* Phase 0: Landing(全屏居中搜索)
|
||||
* Phase 1+: 配置 → 策略 → 执行 → 结果,依次累积展示
|
||||
*/
|
||||
|
||||
import { useState, useCallback, useRef, useEffect } from 'react';
|
||||
import { message } from 'antd';
|
||||
import { aslApi } from '../api';
|
||||
import { useDeepResearchTask } from '../hooks/useDeepResearchTask';
|
||||
import LandingView from '../components/deep-research/LandingView';
|
||||
import SetupPanel from '../components/deep-research/SetupPanel';
|
||||
import StrategyConfirm from '../components/deep-research/StrategyConfirm';
|
||||
import AgentTerminal from '../components/deep-research/AgentTerminal';
|
||||
import ResultsView from '../components/deep-research/ResultsView';
|
||||
import type { IntentSummary, GenerateRequirementResponse } from '../types/deepResearch';
|
||||
|
||||
type Phase = 0 | 1 | 2 | 3 | 4;
|
||||
|
||||
const DeepResearchPage = () => {
|
||||
const [phase, setPhase] = useState<Phase>(0);
|
||||
const [query, setQuery] = useState('');
|
||||
const [taskId, setTaskId] = useState<string | null>(null);
|
||||
const [generatedRequirement, setGeneratedRequirement] = useState('');
|
||||
const [intentSummary, setIntentSummary] = useState<IntentSummary | null>(null);
|
||||
const [generating, setGenerating] = useState(false);
|
||||
|
||||
const strategyRef = useRef<HTMLDivElement>(null);
|
||||
const terminalRef = useRef<HTMLDivElement>(null);
|
||||
const resultsRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
const { task, isRunning, isCompleted, isFailed } = useDeepResearchTask({
|
||||
taskId,
|
||||
enabled: phase >= 3,
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
if (isCompleted && phase === 3) {
|
||||
setPhase(4);
|
||||
setTimeout(() => resultsRef.current?.scrollIntoView({ behavior: 'smooth', block: 'start' }), 150);
|
||||
}
|
||||
}, [isCompleted, phase]);
|
||||
|
||||
const scrollTo = (ref: React.RefObject<HTMLDivElement | null>) => {
|
||||
setTimeout(() => ref.current?.scrollIntoView({ behavior: 'smooth', block: 'start' }), 150);
|
||||
};
|
||||
|
||||
const handleLandingSubmit = useCallback((q: string) => {
|
||||
setQuery(q);
|
||||
setPhase(1);
|
||||
setGeneratedRequirement('');
|
||||
setIntentSummary(null);
|
||||
setTaskId(null);
|
||||
}, []);
|
||||
|
||||
const handleSetupSubmit = useCallback(async (
|
||||
originalQuery: string,
|
||||
selectedSources: string[],
|
||||
filters: { yearRange?: string; targetCount?: string }
|
||||
) => {
|
||||
setGenerating(true);
|
||||
try {
|
||||
const res = await aslApi.generateRequirement({
|
||||
originalQuery,
|
||||
targetSources: selectedSources,
|
||||
filters,
|
||||
});
|
||||
const data = res.data as GenerateRequirementResponse;
|
||||
setTaskId(data.taskId);
|
||||
setGeneratedRequirement(data.generatedRequirement);
|
||||
setIntentSummary(data.intentSummary);
|
||||
setPhase(2);
|
||||
scrollTo(strategyRef);
|
||||
} catch (err: any) {
|
||||
message.error(err.message || '需求扩写失败');
|
||||
} finally {
|
||||
setGenerating(false);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const handleStrategyConfirm = useCallback(async (confirmedReq: string) => {
|
||||
if (!taskId) return;
|
||||
try {
|
||||
await aslApi.executeDeepResearchTask(taskId, confirmedReq);
|
||||
setPhase(3);
|
||||
scrollTo(terminalRef);
|
||||
message.success('Deep Research 已启动');
|
||||
} catch (err: any) {
|
||||
message.error(err.message || '启动失败');
|
||||
}
|
||||
}, [taskId]);
|
||||
|
||||
const handleNewSearch = useCallback(() => {
|
||||
setPhase(0);
|
||||
setQuery('');
|
||||
setTaskId(null);
|
||||
setGeneratedRequirement('');
|
||||
setIntentSummary(null);
|
||||
window.scrollTo({ top: 0, behavior: 'smooth' });
|
||||
}, []);
|
||||
|
||||
if (phase === 0) {
|
||||
return (
|
||||
<div className="h-full overflow-auto bg-gray-50">
|
||||
<LandingView onSubmit={handleLandingSubmit} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="h-full overflow-auto bg-gray-50 pb-16">
|
||||
<div className="max-w-5xl mx-auto px-6 pt-6">
|
||||
{/* Section 1: Setup */}
|
||||
<SetupPanel
|
||||
initialQuery={query}
|
||||
onSubmit={handleSetupSubmit}
|
||||
onBack={() => setPhase(0)}
|
||||
loading={generating}
|
||||
collapsed={phase >= 2}
|
||||
onExpand={phase === 2 ? () => setPhase(1) : undefined}
|
||||
/>
|
||||
|
||||
{/* Section 2: Strategy */}
|
||||
{phase >= 2 && (
|
||||
<div ref={strategyRef} className="mt-6">
|
||||
<StrategyConfirm
|
||||
generatedRequirement={generatedRequirement}
|
||||
intentSummary={intentSummary}
|
||||
onConfirm={handleStrategyConfirm}
|
||||
collapsed={phase >= 3}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Section 3: Executing */}
|
||||
{phase >= 3 && (
|
||||
<div ref={terminalRef} className="mt-6">
|
||||
<AgentTerminal
|
||||
task={task}
|
||||
isRunning={isRunning}
|
||||
isFailed={isFailed}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Section 4: Results */}
|
||||
{phase >= 4 && task && (
|
||||
<div ref={resultsRef} className="mt-6">
|
||||
<ResultsView
|
||||
task={task}
|
||||
onNewSearch={handleNewSearch}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default DeepResearchPage;
|
||||
76
frontend-v2/src/modules/asl/types/deepResearch.ts
Normal file
76
frontend-v2/src/modules/asl/types/deepResearch.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
/**
|
||||
* Deep Research V2.0 类型定义
|
||||
*/
|
||||
|
||||
export interface DataSourceConfig {
|
||||
id: string;
|
||||
label: string;
|
||||
labelEn: string;
|
||||
domainScope: string;
|
||||
category: 'english' | 'chinese';
|
||||
defaultChecked: boolean;
|
||||
note?: string;
|
||||
}
|
||||
|
||||
export interface IntentSummary {
|
||||
objective: string;
|
||||
population: string;
|
||||
intervention: string;
|
||||
comparison: string;
|
||||
outcome: string;
|
||||
studyDesign: string[];
|
||||
meshTerms: string[];
|
||||
condition: string;
|
||||
}
|
||||
|
||||
export interface ExecutionLogEntry {
|
||||
type: 'thinking' | 'searching' | 'reading' | 'analyzing' | 'summary' | 'info';
|
||||
title: string;
|
||||
text: string;
|
||||
ts: string;
|
||||
}
|
||||
|
||||
export interface LiteratureItem {
|
||||
title: string;
|
||||
authors?: string;
|
||||
journal?: string;
|
||||
year?: number | string;
|
||||
doi?: string;
|
||||
pmid?: string;
|
||||
url?: string;
|
||||
abstract?: string;
|
||||
studyType?: string;
|
||||
}
|
||||
|
||||
export interface DeepResearchTask {
|
||||
taskId: string;
|
||||
status: 'draft' | 'pending' | 'running' | 'completed' | 'failed';
|
||||
query: string;
|
||||
targetSources: string[] | null;
|
||||
confirmedRequirement: string | null;
|
||||
aiIntentSummary: IntentSummary | null;
|
||||
executionLogs: ExecutionLogEntry[];
|
||||
synthesisReport: string | null;
|
||||
resultList: LiteratureItem[] | null;
|
||||
resultCount: number | null;
|
||||
errorMessage: string | null;
|
||||
createdAt: string;
|
||||
completedAt: string | null;
|
||||
}
|
||||
|
||||
export interface GenerateRequirementRequest {
|
||||
originalQuery: string;
|
||||
targetSources?: string[];
|
||||
filters?: {
|
||||
yearRange?: string;
|
||||
targetCount?: string;
|
||||
};
|
||||
}
|
||||
|
||||
export interface GenerateRequirementResponse {
|
||||
taskId: string;
|
||||
generatedRequirement: string;
|
||||
intentSummary: IntentSummary;
|
||||
}
|
||||
|
||||
export type DeepResearchStep = 'landing' | 'setup' | 'strategy' | 'executing' | 'results';
|
||||
Reference in New Issue
Block a user