refactor(asl): ASL frontend architecture refactoring with left navigation
- feat: Create ASLLayout component with 7-module left navigation - feat: Implement Title Screening Settings page with optimized PICOS layout - feat: Add placeholder pages for Workbench and Results - fix: Fix nested routing structure for React Router v6 - fix: Resolve Spin component warning in MainLayout - fix: Add QueryClientProvider to App.tsx - style: Optimize PICOS form layout (P+I left, C+O+S right) - style: Align Inclusion/Exclusion criteria side-by-side - docs: Add architecture refactoring and routing fix reports Ref: Week 2 Frontend Development Scope: ASL module MVP - Title Abstract Screening
This commit is contained in:
258
backend/src/modules/asl/controllers/literatureController.ts
Normal file
258
backend/src/modules/asl/controllers/literatureController.ts
Normal file
@@ -0,0 +1,258 @@
|
||||
/**
|
||||
* ASL 文献控制器
|
||||
*/
|
||||
|
||||
import { FastifyRequest, FastifyReply } from 'fastify';
|
||||
import { ImportLiteratureDto, LiteratureDto } from '../types/index.js';
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
import * as XLSX from 'xlsx';
|
||||
|
||||
/**
|
||||
* 导入文献(从Excel或JSON)
|
||||
*/
|
||||
export async function importLiteratures(
|
||||
request: FastifyRequest<{ Body: ImportLiteratureDto }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId, literatures } = request.body;
|
||||
|
||||
// 验证项目归属
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
// 批量创建文献
|
||||
const created = await prisma.aslLiterature.createMany({
|
||||
data: literatures.map((lit) => ({
|
||||
projectId,
|
||||
pmid: lit.pmid,
|
||||
title: lit.title,
|
||||
abstract: lit.abstract,
|
||||
authors: lit.authors,
|
||||
journal: lit.journal,
|
||||
publicationYear: lit.publicationYear,
|
||||
doi: lit.doi,
|
||||
})),
|
||||
skipDuplicates: true, // 跳过重复的PMID
|
||||
});
|
||||
|
||||
logger.info('Literatures imported', {
|
||||
projectId,
|
||||
count: created.count,
|
||||
});
|
||||
|
||||
return reply.status(201).send({
|
||||
success: true,
|
||||
data: {
|
||||
importedCount: created.count,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to import literatures', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to import literatures',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 从Excel文件导入文献
|
||||
*/
|
||||
export async function importLiteraturesFromExcel(
|
||||
request: FastifyRequest,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
|
||||
// 获取上传的文件
|
||||
const data = await request.file();
|
||||
if (!data) {
|
||||
return reply.status(400).send({
|
||||
error: 'No file uploaded',
|
||||
});
|
||||
}
|
||||
|
||||
const projectId = (request.body as any).projectId;
|
||||
if (!projectId) {
|
||||
return reply.status(400).send({
|
||||
error: 'projectId is required',
|
||||
});
|
||||
}
|
||||
|
||||
// 验证项目归属
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
// 解析Excel(内存中)
|
||||
const buffer = await data.toBuffer();
|
||||
const workbook = XLSX.read(buffer, { type: 'buffer' });
|
||||
const sheetName = workbook.SheetNames[0];
|
||||
const sheet = workbook.Sheets[sheetName];
|
||||
const jsonData = XLSX.utils.sheet_to_json<any>(sheet);
|
||||
|
||||
// 映射字段(支持中英文列名)
|
||||
const literatures: LiteratureDto[] = jsonData.map((row) => ({
|
||||
pmid: row.PMID || row.pmid || row['PMID编号'],
|
||||
title: row.Title || row.title || row['标题'],
|
||||
abstract: row.Abstract || row.abstract || row['摘要'],
|
||||
authors: row.Authors || row.authors || row['作者'],
|
||||
journal: row.Journal || row.journal || row['期刊'],
|
||||
publicationYear: row.Year || row.year || row['年份'],
|
||||
doi: row.DOI || row.doi,
|
||||
}));
|
||||
|
||||
// 批量创建
|
||||
const created = await prisma.aslLiterature.createMany({
|
||||
data: literatures.map((lit) => ({
|
||||
projectId,
|
||||
...lit,
|
||||
})),
|
||||
skipDuplicates: true,
|
||||
});
|
||||
|
||||
logger.info('Literatures imported from Excel', {
|
||||
projectId,
|
||||
count: created.count,
|
||||
});
|
||||
|
||||
return reply.status(201).send({
|
||||
success: true,
|
||||
data: {
|
||||
importedCount: created.count,
|
||||
totalRows: jsonData.length,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to import literatures from Excel', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to import literatures from Excel',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取项目的所有文献
|
||||
*/
|
||||
export async function getLiteratures(
|
||||
request: FastifyRequest<{
|
||||
Params: { projectId: string };
|
||||
Querystring: { page?: number; limit?: number };
|
||||
}>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
const { page = 1, limit = 50 } = request.query;
|
||||
|
||||
// 验证项目归属
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
const [literatures, total] = await Promise.all([
|
||||
prisma.aslLiterature.findMany({
|
||||
where: { projectId },
|
||||
skip: (page - 1) * limit,
|
||||
take: limit,
|
||||
orderBy: { createdAt: 'desc' },
|
||||
include: {
|
||||
screeningResults: {
|
||||
select: {
|
||||
conflictStatus: true,
|
||||
finalDecision: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
prisma.aslLiterature.count({
|
||||
where: { projectId },
|
||||
}),
|
||||
]);
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: {
|
||||
literatures,
|
||||
pagination: {
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
totalPages: Math.ceil(total / limit),
|
||||
},
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get literatures', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to get literatures',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除文献
|
||||
*/
|
||||
export async function deleteLiterature(
|
||||
request: FastifyRequest<{ Params: { literatureId: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { literatureId } = request.params;
|
||||
|
||||
// 验证文献归属
|
||||
const literature = await prisma.aslLiterature.findFirst({
|
||||
where: {
|
||||
id: literatureId,
|
||||
project: { userId },
|
||||
},
|
||||
});
|
||||
|
||||
if (!literature) {
|
||||
return reply.status(404).send({
|
||||
error: 'Literature not found',
|
||||
});
|
||||
}
|
||||
|
||||
await prisma.aslLiterature.delete({
|
||||
where: { id: literatureId },
|
||||
});
|
||||
|
||||
logger.info('Literature deleted', { literatureId });
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
message: 'Literature deleted successfully',
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete literature', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to delete literature',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
224
backend/src/modules/asl/controllers/projectController.ts
Normal file
224
backend/src/modules/asl/controllers/projectController.ts
Normal file
@@ -0,0 +1,224 @@
|
||||
/**
|
||||
* ASL 筛选项目控制器
|
||||
*/
|
||||
|
||||
import { FastifyRequest, FastifyReply } from 'fastify';
|
||||
import { CreateScreeningProjectDto } from '../types/index.js';
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
/**
|
||||
* 创建筛选项目
|
||||
*/
|
||||
export async function createProject(
|
||||
request: FastifyRequest<{ Body: CreateScreeningProjectDto & { userId?: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
// 临时测试模式:优先从JWT获取,否则从请求体获取
|
||||
const userId = (request as any).userId || (request.body as any).userId || 'asl-test-user-001';
|
||||
const { projectName, picoCriteria, inclusionCriteria, exclusionCriteria, screeningConfig } = request.body;
|
||||
|
||||
// 验证必填字段
|
||||
if (!projectName || !picoCriteria || !inclusionCriteria || !exclusionCriteria) {
|
||||
return reply.status(400).send({
|
||||
error: 'Missing required fields',
|
||||
});
|
||||
}
|
||||
|
||||
// 创建项目
|
||||
const project = await prisma.aslScreeningProject.create({
|
||||
data: {
|
||||
userId,
|
||||
projectName,
|
||||
picoCriteria,
|
||||
inclusionCriteria,
|
||||
exclusionCriteria,
|
||||
screeningConfig: screeningConfig || {
|
||||
models: ['deepseek-chat', 'qwen-max'],
|
||||
temperature: 0,
|
||||
},
|
||||
status: 'draft',
|
||||
},
|
||||
});
|
||||
|
||||
logger.info('ASL screening project created', {
|
||||
projectId: project.id,
|
||||
userId,
|
||||
projectName,
|
||||
});
|
||||
|
||||
return reply.status(201).send({
|
||||
success: true,
|
||||
data: project,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to create ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to create project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取用户的所有筛选项目
|
||||
*/
|
||||
export async function getProjects(request: FastifyRequest, reply: FastifyReply) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
|
||||
const projects = await prisma.aslScreeningProject.findMany({
|
||||
where: { userId },
|
||||
orderBy: { createdAt: 'desc' },
|
||||
include: {
|
||||
_count: {
|
||||
select: {
|
||||
literatures: true,
|
||||
screeningResults: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: projects,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get ASL projects', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to get projects',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取单个项目详情
|
||||
*/
|
||||
export async function getProjectById(
|
||||
request: FastifyRequest<{ Params: { projectId: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: {
|
||||
id: projectId,
|
||||
userId,
|
||||
},
|
||||
include: {
|
||||
_count: {
|
||||
select: {
|
||||
literatures: true,
|
||||
screeningResults: true,
|
||||
screeningTasks: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: project,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to get project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 更新项目
|
||||
*/
|
||||
export async function updateProject(
|
||||
request: FastifyRequest<{
|
||||
Params: { projectId: string };
|
||||
Body: Partial<CreateScreeningProjectDto>;
|
||||
}>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
const updateData = request.body;
|
||||
|
||||
// 验证项目归属
|
||||
const existingProject = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!existingProject) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
const project = await prisma.aslScreeningProject.update({
|
||||
where: { id: projectId },
|
||||
data: updateData,
|
||||
});
|
||||
|
||||
logger.info('ASL project updated', { projectId, userId });
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: project,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to update ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to update project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除项目
|
||||
*/
|
||||
export async function deleteProject(
|
||||
request: FastifyRequest<{ Params: { projectId: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
|
||||
// 验证项目归属
|
||||
const existingProject = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!existingProject) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
await prisma.aslScreeningProject.delete({
|
||||
where: { id: projectId },
|
||||
});
|
||||
|
||||
logger.info('ASL project deleted', { projectId, userId });
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
message: 'Project deleted successfully',
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to delete project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
57
backend/src/modules/asl/routes/index.ts
Normal file
57
backend/src/modules/asl/routes/index.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
/**
|
||||
* ASL模块路由注册
|
||||
*/
|
||||
|
||||
import { FastifyInstance } from 'fastify';
|
||||
import * as projectController from '../controllers/projectController.js';
|
||||
import * as literatureController from '../controllers/literatureController.js';
|
||||
|
||||
export async function aslRoutes(fastify: FastifyInstance) {
|
||||
// ==================== 筛选项目路由 ====================
|
||||
|
||||
// 创建筛选项目
|
||||
fastify.post('/projects', projectController.createProject);
|
||||
|
||||
// 获取用户的所有项目
|
||||
fastify.get('/projects', projectController.getProjects);
|
||||
|
||||
// 获取单个项目详情
|
||||
fastify.get('/projects/:projectId', projectController.getProjectById);
|
||||
|
||||
// 更新项目
|
||||
fastify.put('/projects/:projectId', projectController.updateProject);
|
||||
|
||||
// 删除项目
|
||||
fastify.delete('/projects/:projectId', projectController.deleteProject);
|
||||
|
||||
// ==================== 文献管理路由 ====================
|
||||
|
||||
// 导入文献(JSON)
|
||||
fastify.post('/literatures/import', literatureController.importLiteratures);
|
||||
|
||||
// 导入文献(Excel上传)
|
||||
fastify.post('/literatures/import-excel', literatureController.importLiteraturesFromExcel);
|
||||
|
||||
// 获取项目的文献列表
|
||||
fastify.get('/projects/:projectId/literatures', literatureController.getLiteratures);
|
||||
|
||||
// 删除文献
|
||||
fastify.delete('/literatures/:literatureId', literatureController.deleteLiterature);
|
||||
|
||||
// ==================== 筛选任务路由(后续实现) ====================
|
||||
|
||||
// TODO: 启动筛选任务
|
||||
// fastify.post('/projects/:projectId/screening/start', screeningController.startScreening);
|
||||
|
||||
// TODO: 获取筛选进度
|
||||
// fastify.get('/tasks/:taskId/progress', screeningController.getProgress);
|
||||
|
||||
// TODO: 获取筛选结果
|
||||
// fastify.get('/projects/:projectId/results', screeningController.getResults);
|
||||
|
||||
// TODO: 审核冲突文献
|
||||
// fastify.post('/results/review', screeningController.reviewConflicts);
|
||||
}
|
||||
|
||||
|
||||
|
||||
261
backend/src/modules/asl/schemas/screening.schema.ts
Normal file
261
backend/src/modules/asl/schemas/screening.schema.ts
Normal file
@@ -0,0 +1,261 @@
|
||||
/**
|
||||
* ASL LLM筛选输出的JSON Schema
|
||||
* 用于验证AI模型输出格式
|
||||
*/
|
||||
|
||||
import { JSONSchemaType } from 'ajv';
|
||||
import { LLMScreeningOutput } from '../types/index.js';
|
||||
|
||||
export const screeningOutputSchema: JSONSchemaType<LLMScreeningOutput> = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
judgment: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
P: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
I: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
C: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
S: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
},
|
||||
required: ['P', 'I', 'C', 'S'],
|
||||
},
|
||||
evidence: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
P: { type: 'string' },
|
||||
I: { type: 'string' },
|
||||
C: { type: 'string' },
|
||||
S: { type: 'string' },
|
||||
},
|
||||
required: ['P', 'I', 'C', 'S'],
|
||||
},
|
||||
conclusion: {
|
||||
type: 'string',
|
||||
enum: ['include', 'exclude', 'uncertain'],
|
||||
},
|
||||
confidence: {
|
||||
type: 'number',
|
||||
minimum: 0,
|
||||
maximum: 1,
|
||||
},
|
||||
reason: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['judgment', 'evidence', 'conclusion', 'confidence', 'reason'],
|
||||
additionalProperties: false,
|
||||
};
|
||||
|
||||
/**
|
||||
* 筛选风格类型
|
||||
*/
|
||||
export type ScreeningStyle = 'lenient' | 'standard' | 'strict';
|
||||
|
||||
/**
|
||||
* 生成LLM筛选的Prompt (v1.1.0 - 支持三种风格)
|
||||
*
|
||||
* @param style - 筛选风格:
|
||||
* - lenient: 宽松模式,宁可多纳入也不错过(适合初筛)
|
||||
* - standard: 标准模式,平衡准确率和召回率(默认)
|
||||
* - strict: 严格模式,宁可错杀也不放过(适合精筛)
|
||||
*/
|
||||
export function generateScreeningPrompt(
|
||||
title: string,
|
||||
abstract: string,
|
||||
picoCriteria: any,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
style: ScreeningStyle = 'standard',
|
||||
authors?: string,
|
||||
journal?: string,
|
||||
publicationYear?: number
|
||||
): string {
|
||||
|
||||
// 根据风格选择不同的Prompt基调
|
||||
const styleConfig = {
|
||||
lenient: {
|
||||
role: '你是一位经验丰富的系统综述专家,负责对医学文献进行**初步筛选(标题摘要筛选)**。',
|
||||
context: `⚠️ **重要提示**: 这是筛选流程的**第一步**,筛选后还需要下载全文进行复筛。因此:
|
||||
- **宁可多纳入,也不要错过可能有价值的文献**
|
||||
- **当信息不足时,倾向于"纳入"或"不确定",而非直接排除**
|
||||
- **只排除明显不符合的文献**`,
|
||||
picoGuideline: `**⭐ 宽松模式原则**:
|
||||
- 只要有部分匹配,就标记为 \`partial\`,不要轻易标记为 \`mismatch\`
|
||||
- 信息不足时,倾向于 \`partial\` 而非 \`mismatch\``,
|
||||
decisionRules: `**⭐ 宽松模式决策规则**:
|
||||
1. **优先纳入**: 当判断不确定时,选择 \`include\` 或 \`uncertain\`,而非 \`exclude\`
|
||||
2. **只排除明显不符**: 只有当文献明确不符合核心PICO标准时才排除
|
||||
3. **容忍边界情况**: 对于边界情况(如地域差异、时间窗口、对照类型),倾向于纳入
|
||||
4. **看潜在价值**: 即使不完全匹配,但有参考价值的也纳入
|
||||
|
||||
**具体容忍规则**:
|
||||
- **人群地域**: 即使不是目标地域,但研究结果有参考价值 → \`include\`
|
||||
- **时间窗口**: 即使不完全在时间范围内,但研究方法可参考 → \`include\`
|
||||
- **对照类型**: 即使对照不是安慰剂,但有对比意义 → \`include\`
|
||||
- **研究设计**: 即使不是理想的RCT,但有科学价值 → \`include\``,
|
||||
confidenceRule: '**⭐ 宽松模式**: 置信度要求降低,0.5以上即可纳入',
|
||||
reasonExample: '虽然对照组不是安慰剂而是另一种药物,但研究方法严谨,结果有参考价值,且研究人群与目标人群有一定相似性。建议纳入全文复筛阶段进一步评估。',
|
||||
finalReminder: '**记住**: 这是**初筛**阶段,**宁可多纳入,也不要错过**。只要有任何可能的价值,就应该纳入全文复筛!'
|
||||
},
|
||||
standard: {
|
||||
role: '你是一位经验丰富的系统综述专家,负责根据PICO标准和纳排标准对医学文献进行初步筛选。',
|
||||
context: '',
|
||||
picoGuideline: '',
|
||||
decisionRules: '',
|
||||
confidenceRule: '',
|
||||
reasonExample: '具体说明你的筛选决策理由,需包含:(1)为什么纳入或排除 (2)哪些PICO标准符合或不符合 (3)是否有特殊考虑',
|
||||
finalReminder: '现在开始筛选,请严格按照JSON格式输出结果。'
|
||||
},
|
||||
strict: {
|
||||
role: '你是一位严谨的系统综述专家,负责根据PICO标准和纳排标准对医学文献进行**严格筛选**。',
|
||||
context: `⚠️ **重要提示**: 这是**严格筛选模式**,要求:
|
||||
- **严格匹配PICO标准,任何维度不匹配都应排除**
|
||||
- **对边界情况持保守态度**
|
||||
- **优先排除而非纳入**
|
||||
- **只纳入高度确定符合标准的文献**`,
|
||||
picoGuideline: `**⭐ 严格模式原则**:
|
||||
- 只有**明确且完全匹配**才能标记为 \`match\`
|
||||
- 任何不确定或不够明确的,标记为 \`partial\` 或 \`mismatch\`
|
||||
- 对标准的理解要严格,不做宽松解释`,
|
||||
decisionRules: `**⭐ 严格模式决策规则**:
|
||||
1. **一票否决**: 任何一个PICO维度为 \`mismatch\`,直接排除
|
||||
2. **多个partial即排除**: 超过2个维度为 \`partial\`,也应排除
|
||||
3. **触发任一排除标准**: 立即排除
|
||||
4. **不确定时倾向排除**: 当信息不足无法判断时,倾向于排除
|
||||
5. **要求高置信度**: 只有置信度≥0.8才纳入
|
||||
|
||||
**具体严格规则**:
|
||||
- **人群地域**: 必须严格匹配目标地域,其他地域一律排除
|
||||
- **时间窗口**: 必须严格在时间范围内,边界情况也排除
|
||||
- **对照类型**: 必须是指定的对照类型(如安慰剂),其他对照排除
|
||||
- **研究设计**: 必须是指定的研究设计,次优设计也排除`,
|
||||
confidenceRule: '**⭐ 严格模式**: 只有置信度≥0.8才能纳入',
|
||||
reasonExample: '虽然研究人群和干预措施匹配,但对照组为另一种药物而非安慰剂,不符合严格的对照要求。在严格筛选模式下,必须排除。',
|
||||
finalReminder: '**记住**: 这是**严格筛选**模式,**宁可错杀,不可放过**。只纳入**完全确定符合**所有标准的高质量文献!'
|
||||
}
|
||||
};
|
||||
|
||||
const config = styleConfig[style];
|
||||
|
||||
return `${config.role}
|
||||
|
||||
${config.context}
|
||||
|
||||
## 研究方案信息
|
||||
|
||||
**PICO标准:**
|
||||
- **P (研究人群)**: ${picoCriteria.population}
|
||||
- **I (干预措施)**: ${picoCriteria.intervention}
|
||||
- **C (对照)**: ${picoCriteria.comparison}
|
||||
- **O (结局指标)**: ${picoCriteria.outcome}
|
||||
- **S (研究设计)**: ${picoCriteria.studyDesign}
|
||||
|
||||
**纳入标准:**
|
||||
${inclusionCriteria}
|
||||
|
||||
**排除标准:**
|
||||
${exclusionCriteria}
|
||||
|
||||
---
|
||||
|
||||
## 待筛选文献
|
||||
|
||||
**标题:** ${title}
|
||||
|
||||
**摘要:** ${abstract}
|
||||
|
||||
${authors ? `**作者:** ${authors}` : ''}
|
||||
${journal ? `**期刊:** ${journal}` : ''}
|
||||
${publicationYear ? `**年份:** ${publicationYear}` : ''}
|
||||
|
||||
---
|
||||
|
||||
## 筛选任务
|
||||
|
||||
请按照以下步骤进行筛选:
|
||||
|
||||
### 步骤1: PICO逐项评估
|
||||
|
||||
对文献的每个PICO维度进行评估,判断是否匹配:
|
||||
- **match** (匹配):文献明确符合该标准
|
||||
- **partial** (部分匹配):文献部分符合,或表述不够明确
|
||||
- **mismatch** (不匹配):文献明确不符合该标准
|
||||
|
||||
${config.picoGuideline}
|
||||
|
||||
### 步骤2: 提取证据
|
||||
|
||||
从标题和摘要中提取支持你判断的**原文片段**,每个维度给出具体证据。
|
||||
|
||||
### 步骤3: 综合决策
|
||||
|
||||
基于PICO评估、纳排标准,给出最终筛选决策:
|
||||
- **include** (纳入):文献符合所有或大部分PICO标准,且满足纳入标准
|
||||
- **exclude** (排除):文献明确不符合PICO标准,或触发排除标准
|
||||
- **uncertain** (不确定):信息不足,无法做出明确判断
|
||||
|
||||
${config.decisionRules}
|
||||
|
||||
### 步骤4: 置信度评分
|
||||
|
||||
给出你对此判断的把握程度(0-1之间):
|
||||
- **0.9-1.0**: 非常确定,有充分证据支持
|
||||
- **0.7-0.9**: 比较确定,证据较为充分
|
||||
- **0.5-0.7**: 中等把握,证据有限
|
||||
- **0.0-0.5**: 不确定,信息严重不足
|
||||
|
||||
${config.confidenceRule}
|
||||
|
||||
---
|
||||
|
||||
## 输出格式要求
|
||||
|
||||
请**严格按照**以下JSON格式输出,不要添加任何额外文字:
|
||||
|
||||
⚠️ **重要**: 必须使用ASCII引号("),不要使用中文引号("")
|
||||
|
||||
\`\`\`json
|
||||
{
|
||||
"judgment": {
|
||||
"P": "match",
|
||||
"I": "match",
|
||||
"C": "partial",
|
||||
"S": "match"
|
||||
},
|
||||
"evidence": {
|
||||
"P": "从摘要中引用支持P判断的原文",
|
||||
"I": "从摘要中引用支持I判断的原文",
|
||||
"C": "从摘要中引用支持C判断的原文",
|
||||
"S": "从摘要中引用支持S判断的原文"
|
||||
},
|
||||
"conclusion": "include",
|
||||
"confidence": 0.85,
|
||||
"reason": "${config.reasonExample}"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
## 关键约束
|
||||
|
||||
1. **judgment** 的每个字段只能是:\`"match"\`, \`"partial"\`, \`"mismatch"\`
|
||||
2. **evidence** 必须引用原文,不要编造内容
|
||||
3. **conclusion** 只能是:\`"include"\`, \`"exclude"\`, \`"uncertain"\`
|
||||
4. **confidence** 必须是0-1之间的数字
|
||||
5. **reason** 长度在50-300字之间,说理充分
|
||||
6. 输出必须是合法的JSON格式
|
||||
|
||||
## 医学文献筛选原则
|
||||
|
||||
- 优先考虑研究设计的严谨性(RCT > 队列研究 > 病例对照)
|
||||
- 标题和摘要信息不足时,倾向于 \`"uncertain"\` 而非直接排除
|
||||
- 对于综述、系统评价、Meta分析,通常排除(除非方案特别说明)
|
||||
- 动物实验、体外实验通常排除(除非方案特别说明)
|
||||
- 会议摘要、病例报告通常排除
|
||||
- 注意区分干预措施的具体类型(如药物剂量、手术方式)
|
||||
- 结局指标要与方案一致(主要结局 vs 次要结局)
|
||||
|
||||
---
|
||||
|
||||
${config.finalReminder}
|
||||
`;
|
||||
}
|
||||
|
||||
237
backend/src/modules/asl/services/llmScreeningService.ts
Normal file
237
backend/src/modules/asl/services/llmScreeningService.ts
Normal file
@@ -0,0 +1,237 @@
|
||||
/**
|
||||
* ASL LLM筛选服务
|
||||
* 使用双模型策略进行文献筛选
|
||||
*/
|
||||
|
||||
import { LLMFactory } from '../../../common/llm/adapters/LLMFactory.js';
|
||||
import { ModelType } from '../../../common/llm/adapters/types.js';
|
||||
import { parseJSON } from '../../../common/utils/jsonParser.js';
|
||||
import Ajv from 'ajv';
|
||||
import { screeningOutputSchema, generateScreeningPrompt, type ScreeningStyle } from '../schemas/screening.schema.js';
|
||||
import { LLMScreeningOutput, DualModelScreeningResult, PicoCriteria } from '../types/index.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
const ajv = new Ajv();
|
||||
const validate = ajv.compile(screeningOutputSchema);
|
||||
|
||||
// 模型名称映射:从模型ID映射到ModelType
|
||||
const MODEL_TYPE_MAP: Record<string, ModelType> = {
|
||||
'deepseek-chat': 'deepseek-v3',
|
||||
'deepseek-v3': 'deepseek-v3',
|
||||
'qwen-max': 'qwen3-72b', // ⭐ qwen-max = Qwen最新最强模型
|
||||
'qwen-plus': 'qwen3-72b', // qwen-plus = Qwen2.5-72B (次选)
|
||||
'qwen3-72b': 'qwen3-72b',
|
||||
'qwen-long': 'qwen-long',
|
||||
'gpt-4o': 'gpt-5', // ⭐ gpt-4o 映射到 gpt-5
|
||||
'gpt-5-pro': 'gpt-5',
|
||||
'gpt-5': 'gpt-5',
|
||||
'claude-sonnet-4.5': 'claude-4.5', // ⭐ claude-sonnet-4.5 映射
|
||||
'claude-sonnet-4-5-20250929': 'claude-4.5',
|
||||
'claude-4.5': 'claude-4.5',
|
||||
};
|
||||
|
||||
export class LLMScreeningService {
|
||||
/**
|
||||
* 使用单个模型进行筛选
|
||||
*/
|
||||
async screenWithModel(
|
||||
modelName: string,
|
||||
title: string,
|
||||
abstract: string,
|
||||
picoCriteria: PicoCriteria,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
style: ScreeningStyle = 'standard',
|
||||
authors?: string,
|
||||
journal?: string,
|
||||
publicationYear?: number
|
||||
): Promise<LLMScreeningOutput> {
|
||||
try {
|
||||
// 映射模型名称到ModelType
|
||||
const modelType = MODEL_TYPE_MAP[modelName];
|
||||
if (!modelType) {
|
||||
throw new Error(`Unsupported model name: ${modelName}. Supported models: ${Object.keys(MODEL_TYPE_MAP).join(', ')}`);
|
||||
}
|
||||
|
||||
const prompt = generateScreeningPrompt(
|
||||
title,
|
||||
abstract,
|
||||
picoCriteria,
|
||||
inclusionCriteria,
|
||||
exclusionCriteria,
|
||||
style,
|
||||
authors,
|
||||
journal,
|
||||
publicationYear
|
||||
);
|
||||
|
||||
const llmAdapter = LLMFactory.getAdapter(modelType);
|
||||
const response = await llmAdapter.chat([
|
||||
{ role: 'user', content: prompt },
|
||||
]);
|
||||
|
||||
// 解析JSON输出
|
||||
const parseResult = parseJSON(response.content);
|
||||
if (!parseResult.success || !parseResult.data) {
|
||||
logger.error('Failed to parse LLM output as JSON', {
|
||||
error: parseResult.error,
|
||||
rawOutput: parseResult.rawOutput,
|
||||
});
|
||||
throw new Error(`Failed to parse LLM output as JSON: ${parseResult.error}`);
|
||||
}
|
||||
|
||||
// JSON Schema验证
|
||||
const valid = validate(parseResult.data);
|
||||
if (!valid) {
|
||||
logger.error('LLM output validation failed', {
|
||||
errors: validate.errors,
|
||||
output: parseResult.data,
|
||||
rawOutput: parseResult.rawOutput,
|
||||
});
|
||||
throw new Error('LLM output does not match expected schema');
|
||||
}
|
||||
|
||||
return parseResult.data as LLMScreeningOutput;
|
||||
} catch (error) {
|
||||
logger.error(`LLM screening failed with model ${modelName}`, {
|
||||
error,
|
||||
title,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 双模型并行筛选(核心功能)
|
||||
*/
|
||||
async dualModelScreening(
|
||||
literatureId: string,
|
||||
title: string,
|
||||
abstract: string,
|
||||
picoCriteria: PicoCriteria,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
models: [string, string] = ['deepseek-chat', 'qwen-max'],
|
||||
style: ScreeningStyle = 'standard',
|
||||
authors?: string,
|
||||
journal?: string,
|
||||
publicationYear?: number
|
||||
): Promise<DualModelScreeningResult> {
|
||||
const [model1, model2] = models;
|
||||
|
||||
try {
|
||||
// 并行调用两个模型(使用相同的筛选风格)
|
||||
const [result1, result2] = await Promise.all([
|
||||
this.screenWithModel(model1, title, abstract, picoCriteria, inclusionCriteria, exclusionCriteria, style, authors, journal, publicationYear),
|
||||
this.screenWithModel(model2, title, abstract, picoCriteria, inclusionCriteria, exclusionCriteria, style, authors, journal, publicationYear),
|
||||
]);
|
||||
|
||||
// 冲突检测(只检测conclusion冲突,不检测PICO维度差异)
|
||||
const conclusionMatch = result1.conclusion === result2.conclusion;
|
||||
const hasConflict = !conclusionMatch;
|
||||
|
||||
// 记录PICO维度差异(用于日志,不影响冲突判断)
|
||||
const { conflictFields } = this.detectConflict(result1, result2);
|
||||
|
||||
// 最终决策
|
||||
let finalDecision: 'include' | 'exclude' | 'pending' = 'pending';
|
||||
if (conclusionMatch) {
|
||||
// conclusion一致时,采纳结论
|
||||
finalDecision = result1.conclusion === 'uncertain' ? 'pending' : result1.conclusion;
|
||||
} else {
|
||||
// conclusion不一致时,标记为pending(需人工复核)
|
||||
finalDecision = 'pending';
|
||||
}
|
||||
|
||||
return {
|
||||
literatureId,
|
||||
deepseek: result1,
|
||||
deepseekModel: model1,
|
||||
qwen: result2,
|
||||
qwenModel: model2,
|
||||
hasConflict,
|
||||
conflictFields: hasConflict ? conflictFields : undefined,
|
||||
finalDecision,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Dual model screening failed', {
|
||||
error,
|
||||
literatureId,
|
||||
title,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 检测两个模型结果是否冲突
|
||||
*/
|
||||
private detectConflict(
|
||||
result1: LLMScreeningOutput,
|
||||
result2: LLMScreeningOutput
|
||||
): { hasConflict: boolean; conflictFields: string[] } {
|
||||
const conflictFields: string[] = [];
|
||||
|
||||
// 检查PICO四个维度
|
||||
const dimensions = ['P', 'I', 'C', 'S'] as const;
|
||||
for (const dim of dimensions) {
|
||||
if (result1.judgment[dim] !== result2.judgment[dim]) {
|
||||
conflictFields.push(dim);
|
||||
}
|
||||
}
|
||||
|
||||
// 检查最终结论
|
||||
if (result1.conclusion !== result2.conclusion) {
|
||||
conflictFields.push('conclusion');
|
||||
}
|
||||
|
||||
return {
|
||||
hasConflict: conflictFields.length > 0,
|
||||
conflictFields,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* 批量筛选文献
|
||||
*/
|
||||
async batchScreening(
|
||||
literatures: Array<{
|
||||
id: string;
|
||||
title: string;
|
||||
abstract: string;
|
||||
}>,
|
||||
picoCriteria: PicoCriteria,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
models?: [string, string],
|
||||
style: ScreeningStyle = 'standard',
|
||||
concurrency: number = 3
|
||||
): Promise<DualModelScreeningResult[]> {
|
||||
const results: DualModelScreeningResult[] = [];
|
||||
|
||||
// 分批处理(并发控制)
|
||||
for (let i = 0; i < literatures.length; i += concurrency) {
|
||||
const batch = literatures.slice(i, i + concurrency);
|
||||
const batchResults = await Promise.all(
|
||||
batch.map((lit) =>
|
||||
this.dualModelScreening(
|
||||
lit.id,
|
||||
lit.title,
|
||||
lit.abstract,
|
||||
picoCriteria,
|
||||
inclusionCriteria,
|
||||
exclusionCriteria,
|
||||
models,
|
||||
style
|
||||
)
|
||||
)
|
||||
);
|
||||
results.push(...batchResults);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
|
||||
export const llmScreeningService = new LLMScreeningService();
|
||||
|
||||
122
backend/src/modules/asl/types/index.ts
Normal file
122
backend/src/modules/asl/types/index.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* ASL模块类型定义
|
||||
* 标题摘要初筛 MVP阶段
|
||||
*/
|
||||
|
||||
// ==================== 筛选项目相关 ====================
|
||||
|
||||
export interface PicoCriteria {
|
||||
population: string; // P: 研究人群
|
||||
intervention: string; // I: 干预措施
|
||||
comparison: string; // C: 对照
|
||||
outcome: string; // O: 结局指标
|
||||
studyDesign: string; // S: 研究设计类型
|
||||
}
|
||||
|
||||
export interface ScreeningConfig {
|
||||
models: string[]; // 使用的模型,如 ["deepseek-chat", "qwen-max"]
|
||||
temperature: number; // 温度参数,建议0
|
||||
maxRetries?: number; // 最大重试次数
|
||||
}
|
||||
|
||||
export interface CreateScreeningProjectDto {
|
||||
projectName: string;
|
||||
picoCriteria: PicoCriteria;
|
||||
inclusionCriteria: string;
|
||||
exclusionCriteria: string;
|
||||
screeningConfig?: ScreeningConfig;
|
||||
}
|
||||
|
||||
// ==================== 文献相关 ====================
|
||||
|
||||
export interface LiteratureDto {
|
||||
pmid?: string;
|
||||
title: string;
|
||||
abstract: string;
|
||||
authors?: string;
|
||||
journal?: string;
|
||||
publicationYear?: number;
|
||||
doi?: string;
|
||||
}
|
||||
|
||||
export interface ImportLiteratureDto {
|
||||
projectId: string;
|
||||
literatures: LiteratureDto[];
|
||||
}
|
||||
|
||||
// ==================== LLM筛选相关 ====================
|
||||
|
||||
export interface PicoJudgment {
|
||||
P: 'match' | 'partial' | 'mismatch';
|
||||
I: 'match' | 'partial' | 'mismatch';
|
||||
C: 'match' | 'partial' | 'mismatch';
|
||||
S: 'match' | 'partial' | 'mismatch';
|
||||
}
|
||||
|
||||
export interface PicoEvidence {
|
||||
P: string;
|
||||
I: string;
|
||||
C: string;
|
||||
S: string;
|
||||
}
|
||||
|
||||
export interface LLMScreeningOutput {
|
||||
judgment: PicoJudgment;
|
||||
evidence: PicoEvidence;
|
||||
conclusion: 'include' | 'exclude' | 'uncertain';
|
||||
confidence: number; // 0-1
|
||||
reason: string;
|
||||
}
|
||||
|
||||
export interface DualModelScreeningResult {
|
||||
literatureId: string;
|
||||
|
||||
// DeepSeek结果
|
||||
deepseek: LLMScreeningOutput;
|
||||
deepseekModel: string;
|
||||
|
||||
// Qwen结果
|
||||
qwen: LLMScreeningOutput;
|
||||
qwenModel: string;
|
||||
|
||||
// 冲突检测
|
||||
hasConflict: boolean;
|
||||
conflictFields?: string[]; // ['P', 'I', 'conclusion']
|
||||
|
||||
// 最终决策(无冲突时自动设置,有冲突时为pending)
|
||||
finalDecision?: 'include' | 'exclude' | 'pending';
|
||||
}
|
||||
|
||||
// ==================== 筛选任务相关 ====================
|
||||
|
||||
export interface StartScreeningTaskDto {
|
||||
projectId: string;
|
||||
taskType: 'title_abstract' | 'full_text';
|
||||
}
|
||||
|
||||
export interface ScreeningTaskProgress {
|
||||
taskId: string;
|
||||
status: 'pending' | 'running' | 'completed' | 'failed';
|
||||
totalItems: number;
|
||||
processedItems: number;
|
||||
successItems: number;
|
||||
failedItems: number;
|
||||
conflictItems: number;
|
||||
estimatedEndAt?: Date;
|
||||
}
|
||||
|
||||
// ==================== 审核工作台相关 ====================
|
||||
|
||||
export interface ConflictReviewDto {
|
||||
resultId: string;
|
||||
finalDecision: 'include' | 'exclude';
|
||||
exclusionReason?: string;
|
||||
}
|
||||
|
||||
export interface BatchReviewDto {
|
||||
projectId: string;
|
||||
reviews: ConflictReviewDto[];
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user