refactor(asl): ASL frontend architecture refactoring with left navigation
- feat: Create ASLLayout component with 7-module left navigation - feat: Implement Title Screening Settings page with optimized PICOS layout - feat: Add placeholder pages for Workbench and Results - fix: Fix nested routing structure for React Router v6 - fix: Resolve Spin component warning in MainLayout - fix: Add QueryClientProvider to App.tsx - style: Optimize PICOS form layout (P+I left, C+O+S right) - style: Align Inclusion/Exclusion criteria side-by-side - docs: Add architecture refactoring and routing fix reports Ref: Week 2 Frontend Development Scope: ASL module MVP - Title Abstract Screening
This commit is contained in:
@@ -406,3 +406,5 @@ npm run dev
|
||||
**下一步:安装winston依赖,开始ASL模块开发!** 🚀
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
2
backend/src/common/cache/CacheAdapter.ts
vendored
2
backend/src/common/cache/CacheAdapter.ts
vendored
@@ -75,3 +75,5 @@ export interface CacheAdapter {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
2
backend/src/common/cache/CacheFactory.ts
vendored
2
backend/src/common/cache/CacheFactory.ts
vendored
@@ -98,3 +98,5 @@ export class CacheFactory {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
2
backend/src/common/cache/index.ts
vendored
2
backend/src/common/cache/index.ts
vendored
@@ -50,3 +50,5 @@ import { CacheFactory } from './CacheFactory.js'
|
||||
export const cache = CacheFactory.getInstance()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -25,3 +25,5 @@ export { registerHealthRoutes } from './healthCheck.js'
|
||||
export type { HealthCheckResponse } from './healthCheck.js'
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -81,3 +81,5 @@ export class JobFactory {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -88,3 +88,5 @@ export interface JobQueue {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
43
backend/src/common/llm/adapters/ClaudeAdapter.ts
Normal file
43
backend/src/common/llm/adapters/ClaudeAdapter.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { CloseAIAdapter } from './CloseAIAdapter.js';
|
||||
|
||||
/**
|
||||
* Claude-4.5-Sonnet适配器(便捷封装)
|
||||
*
|
||||
* 通过CloseAI代理访问Anthropic Claude-4.5-Sonnet模型
|
||||
*
|
||||
* 模型特点:
|
||||
* - 准确率:93%
|
||||
* - 速度:中等
|
||||
* - 成本:¥0.021/1K tokens
|
||||
* - 适用场景:第三方仲裁、结构化输出、高质量文本生成
|
||||
*
|
||||
* 使用场景:
|
||||
* - 双模型对比筛选(DeepSeek vs GPT-5)
|
||||
* - 三模型共识仲裁(DeepSeek + GPT-5 + Claude)
|
||||
* - 作为独立裁判解决冲突决策
|
||||
*
|
||||
* 使用示例:
|
||||
* ```typescript
|
||||
* import { ClaudeAdapter } from '@/common/llm/adapters';
|
||||
*
|
||||
* const claude = new ClaudeAdapter();
|
||||
* const response = await claude.chat([
|
||||
* { role: 'user', content: '作为第三方仲裁,请判断文献是否应该纳入...' }
|
||||
* ]);
|
||||
* ```
|
||||
*
|
||||
* 参考文档:docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
|
||||
*/
|
||||
export class ClaudeAdapter extends CloseAIAdapter {
|
||||
/**
|
||||
* 构造函数
|
||||
* @param modelName - 模型名称,默认 'claude-sonnet-4-5-20250929'
|
||||
*/
|
||||
constructor(modelName: string = 'claude-sonnet-4-5-20250929') {
|
||||
super('claude', modelName);
|
||||
console.log(`[ClaudeAdapter] 初始化完成,模型: ${modelName}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
344
backend/src/common/llm/adapters/CloseAIAdapter.ts
Normal file
344
backend/src/common/llm/adapters/CloseAIAdapter.ts
Normal file
@@ -0,0 +1,344 @@
|
||||
import axios from 'axios';
|
||||
import { ILLMAdapter, Message, LLMOptions, LLMResponse, StreamChunk } from './types.js';
|
||||
import { config } from '../../../config/env.js';
|
||||
|
||||
/**
|
||||
* CloseAI通用适配器
|
||||
*
|
||||
* 支持通过CloseAI代理访问:
|
||||
* - OpenAI GPT-5-Pro
|
||||
* - Anthropic Claude-4.5-Sonnet
|
||||
*
|
||||
* 设计原则:
|
||||
* - CloseAI提供OpenAI兼容的统一接口
|
||||
* - 通过不同的Base URL区分供应商
|
||||
* - 代码逻辑完全复用(OpenAI标准格式)
|
||||
*
|
||||
* 参考文档:docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
|
||||
*/
|
||||
export class CloseAIAdapter implements ILLMAdapter {
|
||||
modelName: string;
|
||||
private apiKey: string;
|
||||
private baseURL: string;
|
||||
private provider: 'openai' | 'claude';
|
||||
|
||||
/**
|
||||
* 构造函数
|
||||
* @param provider - 供应商类型:'openai' 或 'claude'
|
||||
* @param modelName - 模型名称(如 'gpt-5-pro' 或 'claude-sonnet-4-5-20250929')
|
||||
*/
|
||||
constructor(provider: 'openai' | 'claude', modelName: string) {
|
||||
this.provider = provider;
|
||||
this.modelName = modelName;
|
||||
this.apiKey = config.closeaiApiKey || '';
|
||||
|
||||
// 根据供应商选择对应的Base URL
|
||||
this.baseURL = provider === 'openai'
|
||||
? config.closeaiOpenaiBaseUrl // https://api.openai-proxy.org/v1
|
||||
: config.closeaiClaudeBaseUrl; // https://api.openai-proxy.org/anthropic
|
||||
|
||||
// 验证API Key配置
|
||||
if (!this.apiKey) {
|
||||
throw new Error(
|
||||
'CloseAI API key is not configured. Please set CLOSEAI_API_KEY in .env file.'
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`[CloseAIAdapter] 初始化完成`, {
|
||||
provider: this.provider,
|
||||
model: this.modelName,
|
||||
baseURL: this.baseURL,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* 非流式调用
|
||||
* - OpenAI系列:使用chat.completions格式
|
||||
* - Claude系列:使用messages格式(Anthropic SDK)
|
||||
*/
|
||||
async chat(messages: Message[], options?: LLMOptions): Promise<LLMResponse> {
|
||||
try {
|
||||
// Claude使用不同的API格式
|
||||
if (this.provider === 'claude') {
|
||||
return await this.chatClaude(messages, options);
|
||||
}
|
||||
|
||||
// OpenAI系列:标准格式(不包含temperature等可能不支持的参数)
|
||||
const requestBody: any = {
|
||||
model: this.modelName,
|
||||
messages: messages,
|
||||
max_tokens: options?.maxTokens ?? 2000,
|
||||
};
|
||||
|
||||
// 可选参数:只在提供时才添加
|
||||
if (options?.temperature !== undefined) {
|
||||
requestBody.temperature = options.temperature;
|
||||
}
|
||||
if (options?.topP !== undefined) {
|
||||
requestBody.top_p = options.topP;
|
||||
}
|
||||
|
||||
console.log(`[CloseAIAdapter] 发起非流式调用`, {
|
||||
provider: this.provider,
|
||||
model: this.modelName,
|
||||
messagesCount: messages.length,
|
||||
params: Object.keys(requestBody),
|
||||
});
|
||||
|
||||
const response = await axios.post(
|
||||
`${this.baseURL}/chat/completions`,
|
||||
requestBody,
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${this.apiKey}`,
|
||||
},
|
||||
timeout: 180000, // 180秒超时(3分钟)- GPT-5和Claude可能需要更长时间
|
||||
}
|
||||
);
|
||||
|
||||
const choice = response.data.choices[0];
|
||||
|
||||
const result: LLMResponse = {
|
||||
content: choice.message.content,
|
||||
model: response.data.model,
|
||||
usage: {
|
||||
promptTokens: response.data.usage.prompt_tokens,
|
||||
completionTokens: response.data.usage.completion_tokens,
|
||||
totalTokens: response.data.usage.total_tokens,
|
||||
},
|
||||
finishReason: choice.finish_reason,
|
||||
};
|
||||
|
||||
console.log(`[CloseAIAdapter] 调用成功`, {
|
||||
provider: this.provider,
|
||||
model: result.model,
|
||||
tokens: result.usage?.totalTokens,
|
||||
contentLength: result.content.length,
|
||||
});
|
||||
|
||||
return result;
|
||||
} catch (error: unknown) {
|
||||
console.error(`[CloseAIAdapter] ${this.provider.toUpperCase()} API Error:`, error);
|
||||
|
||||
if (axios.isAxiosError(error)) {
|
||||
const errorMessage = error.response?.data?.error?.message || error.message;
|
||||
const statusCode = error.response?.status;
|
||||
|
||||
// 提供更友好的错误信息
|
||||
if (statusCode === 401) {
|
||||
throw new Error(
|
||||
`CloseAI认证失败: API Key无效或已过期。请检查 CLOSEAI_API_KEY 配置。`
|
||||
);
|
||||
} else if (statusCode === 429) {
|
||||
throw new Error(
|
||||
`CloseAI速率限制: 请求过于频繁,请稍后重试。`
|
||||
);
|
||||
} else if (statusCode === 500 || statusCode === 502 || statusCode === 503) {
|
||||
throw new Error(
|
||||
`CloseAI服务异常: 代理服务暂时不可用,请稍后重试。`
|
||||
);
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`CloseAI (${this.provider.toUpperCase()}) API调用失败: ${errorMessage}`
|
||||
);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Claude专用调用方法
|
||||
* 使用Anthropic Messages API格式
|
||||
*/
|
||||
private async chatClaude(messages: Message[], options?: LLMOptions): Promise<LLMResponse> {
|
||||
try {
|
||||
const requestBody = {
|
||||
model: this.modelName,
|
||||
messages: messages,
|
||||
max_tokens: options?.maxTokens ?? 2000,
|
||||
};
|
||||
|
||||
console.log(`[CloseAIAdapter] 发起Claude调用`, {
|
||||
model: this.modelName,
|
||||
messagesCount: messages.length,
|
||||
});
|
||||
|
||||
const response = await axios.post(
|
||||
`${this.baseURL}/v1/messages`, // Anthropic使用 /v1/messages
|
||||
requestBody,
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': this.apiKey, // Anthropic使用 x-api-key 而不是 Authorization
|
||||
'anthropic-version': '2023-06-01', // Anthropic需要版本号
|
||||
},
|
||||
timeout: 180000,
|
||||
}
|
||||
);
|
||||
|
||||
// Anthropic的响应格式不同
|
||||
const content = response.data.content[0].text;
|
||||
|
||||
const result: LLMResponse = {
|
||||
content: content,
|
||||
model: response.data.model,
|
||||
usage: {
|
||||
promptTokens: response.data.usage.input_tokens,
|
||||
completionTokens: response.data.usage.output_tokens,
|
||||
totalTokens: response.data.usage.input_tokens + response.data.usage.output_tokens,
|
||||
},
|
||||
finishReason: response.data.stop_reason,
|
||||
};
|
||||
|
||||
console.log(`[CloseAIAdapter] Claude调用成功`, {
|
||||
model: result.model,
|
||||
tokens: result.usage?.totalTokens,
|
||||
contentLength: result.content.length,
|
||||
});
|
||||
|
||||
return result;
|
||||
} catch (error: unknown) {
|
||||
console.error(`[CloseAIAdapter] Claude API Error:`, error);
|
||||
|
||||
if (axios.isAxiosError(error)) {
|
||||
const errorMessage = error.response?.data?.error?.message || error.message;
|
||||
throw new Error(
|
||||
`CloseAI (Claude) API调用失败: ${errorMessage}`
|
||||
);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 流式调用
|
||||
* - OpenAI系列:使用SSE格式
|
||||
* - Claude系列:暂不支持(可后续实现)
|
||||
*/
|
||||
async *chatStream(
|
||||
messages: Message[],
|
||||
options?: LLMOptions,
|
||||
onChunk?: (chunk: StreamChunk) => void
|
||||
): AsyncGenerator<StreamChunk, void, unknown> {
|
||||
// Claude流式调用暂不支持
|
||||
if (this.provider === 'claude') {
|
||||
throw new Error('Claude流式调用暂未实现,请使用非流式调用');
|
||||
}
|
||||
|
||||
try {
|
||||
// OpenAI系列:标准SSE格式
|
||||
const requestBody: any = {
|
||||
model: this.modelName,
|
||||
messages: messages,
|
||||
max_tokens: options?.maxTokens ?? 2000,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
// 可选参数:只在提供时才添加
|
||||
if (options?.temperature !== undefined) {
|
||||
requestBody.temperature = options.temperature;
|
||||
}
|
||||
if (options?.topP !== undefined) {
|
||||
requestBody.top_p = options.topP;
|
||||
}
|
||||
|
||||
console.log(`[CloseAIAdapter] 发起流式调用`, {
|
||||
provider: this.provider,
|
||||
model: this.modelName,
|
||||
messagesCount: messages.length,
|
||||
});
|
||||
|
||||
const response = await axios.post(
|
||||
`${this.baseURL}/chat/completions`,
|
||||
requestBody,
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${this.apiKey}`,
|
||||
},
|
||||
responseType: 'stream',
|
||||
timeout: 180000, // 180秒超时
|
||||
}
|
||||
);
|
||||
|
||||
const stream = response.data;
|
||||
let buffer = '';
|
||||
let chunkCount = 0;
|
||||
|
||||
for await (const chunk of stream) {
|
||||
buffer += chunk.toString();
|
||||
const lines = buffer.split('\n');
|
||||
buffer = lines.pop() || '';
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmedLine = line.trim();
|
||||
|
||||
// 跳过空行和结束标记
|
||||
if (!trimmedLine || trimmedLine === 'data: [DONE]') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// 解析SSE数据
|
||||
if (trimmedLine.startsWith('data: ')) {
|
||||
try {
|
||||
const jsonStr = trimmedLine.slice(6);
|
||||
const data = JSON.parse(jsonStr);
|
||||
|
||||
const choice = data.choices[0];
|
||||
const content = choice.delta?.content || '';
|
||||
|
||||
const streamChunk: StreamChunk = {
|
||||
content: content,
|
||||
done: choice.finish_reason === 'stop',
|
||||
model: data.model,
|
||||
};
|
||||
|
||||
// 如果流结束,附加usage信息
|
||||
if (choice.finish_reason === 'stop' && data.usage) {
|
||||
streamChunk.usage = {
|
||||
promptTokens: data.usage.prompt_tokens,
|
||||
completionTokens: data.usage.completion_tokens,
|
||||
totalTokens: data.usage.total_tokens,
|
||||
};
|
||||
}
|
||||
|
||||
chunkCount++;
|
||||
|
||||
// 回调函数(可选)
|
||||
if (onChunk) {
|
||||
onChunk(streamChunk);
|
||||
}
|
||||
|
||||
yield streamChunk;
|
||||
} catch (parseError) {
|
||||
console.error('[CloseAIAdapter] Failed to parse SSE data:', parseError);
|
||||
// 继续处理下一个chunk,不中断流
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[CloseAIAdapter] 流式调用完成`, {
|
||||
provider: this.provider,
|
||||
model: this.modelName,
|
||||
chunksReceived: chunkCount,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(`[CloseAIAdapter] ${this.provider.toUpperCase()} Stream Error:`, error);
|
||||
|
||||
if (axios.isAxiosError(error)) {
|
||||
const errorMessage = error.response?.data?.error?.message || error.message;
|
||||
throw new Error(
|
||||
`CloseAI (${this.provider.toUpperCase()}) 流式调用失败: ${errorMessage}`
|
||||
);
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
41
backend/src/common/llm/adapters/GPT5Adapter.ts
Normal file
41
backend/src/common/llm/adapters/GPT5Adapter.ts
Normal file
@@ -0,0 +1,41 @@
|
||||
import { CloseAIAdapter } from './CloseAIAdapter.js';
|
||||
|
||||
/**
|
||||
* GPT-4o适配器(便捷封装)
|
||||
*
|
||||
* 通过CloseAI代理访问OpenAI GPT-4o模型
|
||||
*
|
||||
* 模型特点:
|
||||
* - 准确率:高(与GPT-4同级)
|
||||
* - 速度:快(1-2秒响应)⭐
|
||||
* - 成本:适中
|
||||
* - 适用场景:高质量文献筛选、复杂推理、结构化输出
|
||||
*
|
||||
* 性能对比:
|
||||
* - gpt-4o: 1.5秒(推荐)✅
|
||||
* - gpt-4o-mini: 0.7秒(经济版)
|
||||
* - gpt-5-pro: 50秒(CloseAI平台上过慢,不推荐)
|
||||
*
|
||||
* 使用示例:
|
||||
* ```typescript
|
||||
* import { GPT5Adapter } from '@/common/llm/adapters';
|
||||
*
|
||||
* const gpt = new GPT5Adapter(); // 默认使用 gpt-4o
|
||||
* const response = await gpt.chat([
|
||||
* { role: 'user', content: '根据PICO标准筛选文献...' }
|
||||
* ]);
|
||||
* ```
|
||||
*
|
||||
* 参考文档:docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
|
||||
*/
|
||||
export class GPT5Adapter extends CloseAIAdapter {
|
||||
/**
|
||||
* 构造函数
|
||||
* @param modelName - 模型名称,默认 'gpt-4o'(经过性能测试优化)
|
||||
*/
|
||||
constructor(modelName: string = 'gpt-4o') {
|
||||
super('openai', modelName);
|
||||
console.log(`[GPT5Adapter] 初始化完成,模型: ${modelName}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { ILLMAdapter, ModelType } from './types.js';
|
||||
import { DeepSeekAdapter } from './DeepSeekAdapter.js';
|
||||
import { QwenAdapter } from './QwenAdapter.js';
|
||||
import { GPT5Adapter } from './GPT5Adapter.js';
|
||||
import { ClaudeAdapter } from './ClaudeAdapter.js';
|
||||
|
||||
/**
|
||||
* LLM工厂类
|
||||
@@ -29,13 +31,21 @@ export class LLMFactory {
|
||||
break;
|
||||
|
||||
case 'qwen3-72b':
|
||||
adapter = new QwenAdapter('qwen-plus'); // Qwen3-72B对应的模型名
|
||||
adapter = new QwenAdapter('qwen-max'); // ⭐ 使用 qwen-max(Qwen最新最强模型)
|
||||
break;
|
||||
|
||||
case 'qwen-long':
|
||||
adapter = new QwenAdapter('qwen-long'); // 1M上下文超长文本模型
|
||||
break;
|
||||
|
||||
case 'gpt-5':
|
||||
adapter = new GPT5Adapter(); // ⭐ 通过CloseAI代理,默认使用 gpt-5-pro
|
||||
break;
|
||||
|
||||
case 'claude-4.5':
|
||||
adapter = new ClaudeAdapter('claude-sonnet-4-5-20250929'); // ⭐ 通过CloseAI代理
|
||||
break;
|
||||
|
||||
case 'gemini-pro':
|
||||
// TODO: 实现Gemini适配器
|
||||
throw new Error('Gemini adapter is not implemented yet');
|
||||
@@ -67,7 +77,7 @@ export class LLMFactory {
|
||||
* @returns 是否支持
|
||||
*/
|
||||
static isSupported(modelType: string): boolean {
|
||||
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gemini-pro'].includes(modelType);
|
||||
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gpt-5', 'claude-4.5', 'gemini-pro'].includes(modelType);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -75,7 +85,7 @@ export class LLMFactory {
|
||||
* @returns 支持的模型列表
|
||||
*/
|
||||
static getSupportedModels(): ModelType[] {
|
||||
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gemini-pro'];
|
||||
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gpt-5', 'claude-4.5', 'gemini-pro'];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,13 @@ export interface ILLMAdapter {
|
||||
}
|
||||
|
||||
// 支持的模型类型
|
||||
export type ModelType = 'deepseek-v3' | 'qwen3-72b' | 'qwen-long' | 'gemini-pro';
|
||||
export type ModelType =
|
||||
| 'deepseek-v3' // DeepSeek-V3(直连)
|
||||
| 'qwen3-72b' // Qwen3-72B(阿里云)
|
||||
| 'qwen-long' // Qwen-Long 1M上下文(阿里云)
|
||||
| 'gpt-5' // GPT-5-Pro(CloseAI代理)⭐ 新增
|
||||
| 'claude-4.5' // Claude-4.5-Sonnet(CloseAI代理)⭐ 新增
|
||||
| 'gemini-pro'; // Gemini-Pro(预留)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -36,3 +36,5 @@ export {
|
||||
export { default } from './logger.js'
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -39,3 +39,5 @@
|
||||
export { Metrics, requestTimingHook, responseTimingHook } from './metrics.js'
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -65,3 +65,5 @@ export interface StorageAdapter {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -20,10 +20,37 @@ export interface ParseResult<T = any> {
|
||||
* 3. 带后缀:{ "key": "value" }\n\n以上是提取结果
|
||||
* 4. 代码块:```json\n{ "key": "value" }\n```
|
||||
*/
|
||||
/**
|
||||
* 清理JSON字符串,修复常见格式问题
|
||||
* @param text - 原始文本
|
||||
* @returns 清理后的文本
|
||||
*/
|
||||
function cleanJSONString(text: string): string {
|
||||
let cleaned = text;
|
||||
|
||||
// 1. 替换中文引号为ASCII引号(国际模型常见问题)
|
||||
cleaned = cleaned.replace(/"/g, '"'); // 中文左引号
|
||||
cleaned = cleaned.replace(/"/g, '"'); // 中文右引号
|
||||
cleaned = cleaned.replace(/'/g, "'"); // 中文左单引号
|
||||
cleaned = cleaned.replace(/'/g, "'"); // 中文右单引号
|
||||
|
||||
// 2. 替换全角逗号、冒号为半角
|
||||
cleaned = cleaned.replace(/,/g, ',');
|
||||
cleaned = cleaned.replace(/:/g, ':');
|
||||
|
||||
// 3. 移除零宽字符和不可见字符
|
||||
cleaned = cleaned.replace(/[\u200B-\u200D\uFEFF]/g, '');
|
||||
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
export function extractJSON(text: string): string | null {
|
||||
// 预处理:清理常见格式问题
|
||||
const cleanedText = cleanJSONString(text);
|
||||
|
||||
// 尝试1:直接查找 {...} 或 [...]
|
||||
const jsonPattern = /(\{[\s\S]*\}|\[[\s\S]*\])/;
|
||||
const match = text.match(jsonPattern);
|
||||
const match = cleanedText.match(jsonPattern);
|
||||
|
||||
if (match) {
|
||||
return match[1];
|
||||
@@ -31,7 +58,7 @@ export function extractJSON(text: string): string | null {
|
||||
|
||||
// 尝试2:查找代码块中的JSON
|
||||
const codeBlockPattern = /```(?:json)?\s*\n?([\s\S]*?)\n?```/;
|
||||
const codeMatch = text.match(codeBlockPattern);
|
||||
const codeMatch = cleanedText.match(codeBlockPattern);
|
||||
|
||||
if (codeMatch) {
|
||||
return codeMatch[1].trim();
|
||||
|
||||
@@ -10,6 +10,7 @@ import knowledgeBaseRoutes from './legacy/routes/knowledgeBases.js';
|
||||
import { chatRoutes } from './legacy/routes/chatRoutes.js';
|
||||
import { batchRoutes } from './legacy/routes/batchRoutes.js';
|
||||
import reviewRoutes from './legacy/routes/reviewRoutes.js';
|
||||
import { aslRoutes } from './modules/asl/routes/index.js';
|
||||
import { registerHealthRoutes } from './common/health/index.js';
|
||||
import { logger } from './common/logging/index.js';
|
||||
import { registerTestRoutes } from './test-platform-api.js';
|
||||
@@ -98,6 +99,12 @@ await fastify.register(batchRoutes, { prefix: '/api/v1' });
|
||||
// 注册稿件审查路由
|
||||
await fastify.register(reviewRoutes, { prefix: '/api/v1' });
|
||||
|
||||
// ============================================
|
||||
// 【业务模块】ASL - AI智能文献筛选
|
||||
// ============================================
|
||||
await fastify.register(aslRoutes, { prefix: '/api/v1/asl' });
|
||||
logger.info('✅ ASL智能文献筛选路由已注册: /api/v1/asl');
|
||||
|
||||
// 启动服务器
|
||||
const start = async () => {
|
||||
try {
|
||||
|
||||
258
backend/src/modules/asl/controllers/literatureController.ts
Normal file
258
backend/src/modules/asl/controllers/literatureController.ts
Normal file
@@ -0,0 +1,258 @@
|
||||
/**
|
||||
* ASL 文献控制器
|
||||
*/
|
||||
|
||||
import { FastifyRequest, FastifyReply } from 'fastify';
|
||||
import { ImportLiteratureDto, LiteratureDto } from '../types/index.js';
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
import * as XLSX from 'xlsx';
|
||||
|
||||
/**
|
||||
* 导入文献(从Excel或JSON)
|
||||
*/
|
||||
export async function importLiteratures(
|
||||
request: FastifyRequest<{ Body: ImportLiteratureDto }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId, literatures } = request.body;
|
||||
|
||||
// 验证项目归属
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
// 批量创建文献
|
||||
const created = await prisma.aslLiterature.createMany({
|
||||
data: literatures.map((lit) => ({
|
||||
projectId,
|
||||
pmid: lit.pmid,
|
||||
title: lit.title,
|
||||
abstract: lit.abstract,
|
||||
authors: lit.authors,
|
||||
journal: lit.journal,
|
||||
publicationYear: lit.publicationYear,
|
||||
doi: lit.doi,
|
||||
})),
|
||||
skipDuplicates: true, // 跳过重复的PMID
|
||||
});
|
||||
|
||||
logger.info('Literatures imported', {
|
||||
projectId,
|
||||
count: created.count,
|
||||
});
|
||||
|
||||
return reply.status(201).send({
|
||||
success: true,
|
||||
data: {
|
||||
importedCount: created.count,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to import literatures', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to import literatures',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 从Excel文件导入文献
|
||||
*/
|
||||
export async function importLiteraturesFromExcel(
|
||||
request: FastifyRequest,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
|
||||
// 获取上传的文件
|
||||
const data = await request.file();
|
||||
if (!data) {
|
||||
return reply.status(400).send({
|
||||
error: 'No file uploaded',
|
||||
});
|
||||
}
|
||||
|
||||
const projectId = (request.body as any).projectId;
|
||||
if (!projectId) {
|
||||
return reply.status(400).send({
|
||||
error: 'projectId is required',
|
||||
});
|
||||
}
|
||||
|
||||
// 验证项目归属
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
// 解析Excel(内存中)
|
||||
const buffer = await data.toBuffer();
|
||||
const workbook = XLSX.read(buffer, { type: 'buffer' });
|
||||
const sheetName = workbook.SheetNames[0];
|
||||
const sheet = workbook.Sheets[sheetName];
|
||||
const jsonData = XLSX.utils.sheet_to_json<any>(sheet);
|
||||
|
||||
// 映射字段(支持中英文列名)
|
||||
const literatures: LiteratureDto[] = jsonData.map((row) => ({
|
||||
pmid: row.PMID || row.pmid || row['PMID编号'],
|
||||
title: row.Title || row.title || row['标题'],
|
||||
abstract: row.Abstract || row.abstract || row['摘要'],
|
||||
authors: row.Authors || row.authors || row['作者'],
|
||||
journal: row.Journal || row.journal || row['期刊'],
|
||||
publicationYear: row.Year || row.year || row['年份'],
|
||||
doi: row.DOI || row.doi,
|
||||
}));
|
||||
|
||||
// 批量创建
|
||||
const created = await prisma.aslLiterature.createMany({
|
||||
data: literatures.map((lit) => ({
|
||||
projectId,
|
||||
...lit,
|
||||
})),
|
||||
skipDuplicates: true,
|
||||
});
|
||||
|
||||
logger.info('Literatures imported from Excel', {
|
||||
projectId,
|
||||
count: created.count,
|
||||
});
|
||||
|
||||
return reply.status(201).send({
|
||||
success: true,
|
||||
data: {
|
||||
importedCount: created.count,
|
||||
totalRows: jsonData.length,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to import literatures from Excel', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to import literatures from Excel',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取项目的所有文献
|
||||
*/
|
||||
export async function getLiteratures(
|
||||
request: FastifyRequest<{
|
||||
Params: { projectId: string };
|
||||
Querystring: { page?: number; limit?: number };
|
||||
}>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
const { page = 1, limit = 50 } = request.query;
|
||||
|
||||
// 验证项目归属
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
const [literatures, total] = await Promise.all([
|
||||
prisma.aslLiterature.findMany({
|
||||
where: { projectId },
|
||||
skip: (page - 1) * limit,
|
||||
take: limit,
|
||||
orderBy: { createdAt: 'desc' },
|
||||
include: {
|
||||
screeningResults: {
|
||||
select: {
|
||||
conflictStatus: true,
|
||||
finalDecision: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
prisma.aslLiterature.count({
|
||||
where: { projectId },
|
||||
}),
|
||||
]);
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: {
|
||||
literatures,
|
||||
pagination: {
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
totalPages: Math.ceil(total / limit),
|
||||
},
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get literatures', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to get literatures',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除文献
|
||||
*/
|
||||
export async function deleteLiterature(
|
||||
request: FastifyRequest<{ Params: { literatureId: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { literatureId } = request.params;
|
||||
|
||||
// 验证文献归属
|
||||
const literature = await prisma.aslLiterature.findFirst({
|
||||
where: {
|
||||
id: literatureId,
|
||||
project: { userId },
|
||||
},
|
||||
});
|
||||
|
||||
if (!literature) {
|
||||
return reply.status(404).send({
|
||||
error: 'Literature not found',
|
||||
});
|
||||
}
|
||||
|
||||
await prisma.aslLiterature.delete({
|
||||
where: { id: literatureId },
|
||||
});
|
||||
|
||||
logger.info('Literature deleted', { literatureId });
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
message: 'Literature deleted successfully',
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete literature', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to delete literature',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
224
backend/src/modules/asl/controllers/projectController.ts
Normal file
224
backend/src/modules/asl/controllers/projectController.ts
Normal file
@@ -0,0 +1,224 @@
|
||||
/**
|
||||
* ASL 筛选项目控制器
|
||||
*/
|
||||
|
||||
import { FastifyRequest, FastifyReply } from 'fastify';
|
||||
import { CreateScreeningProjectDto } from '../types/index.js';
|
||||
import { prisma } from '../../../config/database.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
/**
|
||||
* 创建筛选项目
|
||||
*/
|
||||
export async function createProject(
|
||||
request: FastifyRequest<{ Body: CreateScreeningProjectDto & { userId?: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
// 临时测试模式:优先从JWT获取,否则从请求体获取
|
||||
const userId = (request as any).userId || (request.body as any).userId || 'asl-test-user-001';
|
||||
const { projectName, picoCriteria, inclusionCriteria, exclusionCriteria, screeningConfig } = request.body;
|
||||
|
||||
// 验证必填字段
|
||||
if (!projectName || !picoCriteria || !inclusionCriteria || !exclusionCriteria) {
|
||||
return reply.status(400).send({
|
||||
error: 'Missing required fields',
|
||||
});
|
||||
}
|
||||
|
||||
// 创建项目
|
||||
const project = await prisma.aslScreeningProject.create({
|
||||
data: {
|
||||
userId,
|
||||
projectName,
|
||||
picoCriteria,
|
||||
inclusionCriteria,
|
||||
exclusionCriteria,
|
||||
screeningConfig: screeningConfig || {
|
||||
models: ['deepseek-chat', 'qwen-max'],
|
||||
temperature: 0,
|
||||
},
|
||||
status: 'draft',
|
||||
},
|
||||
});
|
||||
|
||||
logger.info('ASL screening project created', {
|
||||
projectId: project.id,
|
||||
userId,
|
||||
projectName,
|
||||
});
|
||||
|
||||
return reply.status(201).send({
|
||||
success: true,
|
||||
data: project,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to create ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to create project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取用户的所有筛选项目
|
||||
*/
|
||||
export async function getProjects(request: FastifyRequest, reply: FastifyReply) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
|
||||
const projects = await prisma.aslScreeningProject.findMany({
|
||||
where: { userId },
|
||||
orderBy: { createdAt: 'desc' },
|
||||
include: {
|
||||
_count: {
|
||||
select: {
|
||||
literatures: true,
|
||||
screeningResults: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: projects,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get ASL projects', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to get projects',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取单个项目详情
|
||||
*/
|
||||
export async function getProjectById(
|
||||
request: FastifyRequest<{ Params: { projectId: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
|
||||
const project = await prisma.aslScreeningProject.findFirst({
|
||||
where: {
|
||||
id: projectId,
|
||||
userId,
|
||||
},
|
||||
include: {
|
||||
_count: {
|
||||
select: {
|
||||
literatures: true,
|
||||
screeningResults: true,
|
||||
screeningTasks: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!project) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: project,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to get ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to get project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 更新项目
|
||||
*/
|
||||
export async function updateProject(
|
||||
request: FastifyRequest<{
|
||||
Params: { projectId: string };
|
||||
Body: Partial<CreateScreeningProjectDto>;
|
||||
}>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
const updateData = request.body;
|
||||
|
||||
// 验证项目归属
|
||||
const existingProject = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!existingProject) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
const project = await prisma.aslScreeningProject.update({
|
||||
where: { id: projectId },
|
||||
data: updateData,
|
||||
});
|
||||
|
||||
logger.info('ASL project updated', { projectId, userId });
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
data: project,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to update ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to update project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 删除项目
|
||||
*/
|
||||
export async function deleteProject(
|
||||
request: FastifyRequest<{ Params: { projectId: string } }>,
|
||||
reply: FastifyReply
|
||||
) {
|
||||
try {
|
||||
const userId = (request as any).userId || 'asl-test-user-001';
|
||||
const { projectId } = request.params;
|
||||
|
||||
// 验证项目归属
|
||||
const existingProject = await prisma.aslScreeningProject.findFirst({
|
||||
where: { id: projectId, userId },
|
||||
});
|
||||
|
||||
if (!existingProject) {
|
||||
return reply.status(404).send({
|
||||
error: 'Project not found',
|
||||
});
|
||||
}
|
||||
|
||||
await prisma.aslScreeningProject.delete({
|
||||
where: { id: projectId },
|
||||
});
|
||||
|
||||
logger.info('ASL project deleted', { projectId, userId });
|
||||
|
||||
return reply.send({
|
||||
success: true,
|
||||
message: 'Project deleted successfully',
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Failed to delete ASL project', { error });
|
||||
return reply.status(500).send({
|
||||
error: 'Failed to delete project',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
57
backend/src/modules/asl/routes/index.ts
Normal file
57
backend/src/modules/asl/routes/index.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
/**
|
||||
* ASL模块路由注册
|
||||
*/
|
||||
|
||||
import { FastifyInstance } from 'fastify';
|
||||
import * as projectController from '../controllers/projectController.js';
|
||||
import * as literatureController from '../controllers/literatureController.js';
|
||||
|
||||
export async function aslRoutes(fastify: FastifyInstance) {
|
||||
// ==================== 筛选项目路由 ====================
|
||||
|
||||
// 创建筛选项目
|
||||
fastify.post('/projects', projectController.createProject);
|
||||
|
||||
// 获取用户的所有项目
|
||||
fastify.get('/projects', projectController.getProjects);
|
||||
|
||||
// 获取单个项目详情
|
||||
fastify.get('/projects/:projectId', projectController.getProjectById);
|
||||
|
||||
// 更新项目
|
||||
fastify.put('/projects/:projectId', projectController.updateProject);
|
||||
|
||||
// 删除项目
|
||||
fastify.delete('/projects/:projectId', projectController.deleteProject);
|
||||
|
||||
// ==================== 文献管理路由 ====================
|
||||
|
||||
// 导入文献(JSON)
|
||||
fastify.post('/literatures/import', literatureController.importLiteratures);
|
||||
|
||||
// 导入文献(Excel上传)
|
||||
fastify.post('/literatures/import-excel', literatureController.importLiteraturesFromExcel);
|
||||
|
||||
// 获取项目的文献列表
|
||||
fastify.get('/projects/:projectId/literatures', literatureController.getLiteratures);
|
||||
|
||||
// 删除文献
|
||||
fastify.delete('/literatures/:literatureId', literatureController.deleteLiterature);
|
||||
|
||||
// ==================== 筛选任务路由(后续实现) ====================
|
||||
|
||||
// TODO: 启动筛选任务
|
||||
// fastify.post('/projects/:projectId/screening/start', screeningController.startScreening);
|
||||
|
||||
// TODO: 获取筛选进度
|
||||
// fastify.get('/tasks/:taskId/progress', screeningController.getProgress);
|
||||
|
||||
// TODO: 获取筛选结果
|
||||
// fastify.get('/projects/:projectId/results', screeningController.getResults);
|
||||
|
||||
// TODO: 审核冲突文献
|
||||
// fastify.post('/results/review', screeningController.reviewConflicts);
|
||||
}
|
||||
|
||||
|
||||
|
||||
261
backend/src/modules/asl/schemas/screening.schema.ts
Normal file
261
backend/src/modules/asl/schemas/screening.schema.ts
Normal file
@@ -0,0 +1,261 @@
|
||||
/**
|
||||
* ASL LLM筛选输出的JSON Schema
|
||||
* 用于验证AI模型输出格式
|
||||
*/
|
||||
|
||||
import { JSONSchemaType } from 'ajv';
|
||||
import { LLMScreeningOutput } from '../types/index.js';
|
||||
|
||||
export const screeningOutputSchema: JSONSchemaType<LLMScreeningOutput> = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
judgment: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
P: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
I: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
C: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
S: { type: 'string', enum: ['match', 'partial', 'mismatch'] },
|
||||
},
|
||||
required: ['P', 'I', 'C', 'S'],
|
||||
},
|
||||
evidence: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
P: { type: 'string' },
|
||||
I: { type: 'string' },
|
||||
C: { type: 'string' },
|
||||
S: { type: 'string' },
|
||||
},
|
||||
required: ['P', 'I', 'C', 'S'],
|
||||
},
|
||||
conclusion: {
|
||||
type: 'string',
|
||||
enum: ['include', 'exclude', 'uncertain'],
|
||||
},
|
||||
confidence: {
|
||||
type: 'number',
|
||||
minimum: 0,
|
||||
maximum: 1,
|
||||
},
|
||||
reason: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['judgment', 'evidence', 'conclusion', 'confidence', 'reason'],
|
||||
additionalProperties: false,
|
||||
};
|
||||
|
||||
/**
|
||||
* 筛选风格类型
|
||||
*/
|
||||
export type ScreeningStyle = 'lenient' | 'standard' | 'strict';
|
||||
|
||||
/**
|
||||
* 生成LLM筛选的Prompt (v1.1.0 - 支持三种风格)
|
||||
*
|
||||
* @param style - 筛选风格:
|
||||
* - lenient: 宽松模式,宁可多纳入也不错过(适合初筛)
|
||||
* - standard: 标准模式,平衡准确率和召回率(默认)
|
||||
* - strict: 严格模式,宁可错杀也不放过(适合精筛)
|
||||
*/
|
||||
export function generateScreeningPrompt(
|
||||
title: string,
|
||||
abstract: string,
|
||||
picoCriteria: any,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
style: ScreeningStyle = 'standard',
|
||||
authors?: string,
|
||||
journal?: string,
|
||||
publicationYear?: number
|
||||
): string {
|
||||
|
||||
// 根据风格选择不同的Prompt基调
|
||||
const styleConfig = {
|
||||
lenient: {
|
||||
role: '你是一位经验丰富的系统综述专家,负责对医学文献进行**初步筛选(标题摘要筛选)**。',
|
||||
context: `⚠️ **重要提示**: 这是筛选流程的**第一步**,筛选后还需要下载全文进行复筛。因此:
|
||||
- **宁可多纳入,也不要错过可能有价值的文献**
|
||||
- **当信息不足时,倾向于"纳入"或"不确定",而非直接排除**
|
||||
- **只排除明显不符合的文献**`,
|
||||
picoGuideline: `**⭐ 宽松模式原则**:
|
||||
- 只要有部分匹配,就标记为 \`partial\`,不要轻易标记为 \`mismatch\`
|
||||
- 信息不足时,倾向于 \`partial\` 而非 \`mismatch\``,
|
||||
decisionRules: `**⭐ 宽松模式决策规则**:
|
||||
1. **优先纳入**: 当判断不确定时,选择 \`include\` 或 \`uncertain\`,而非 \`exclude\`
|
||||
2. **只排除明显不符**: 只有当文献明确不符合核心PICO标准时才排除
|
||||
3. **容忍边界情况**: 对于边界情况(如地域差异、时间窗口、对照类型),倾向于纳入
|
||||
4. **看潜在价值**: 即使不完全匹配,但有参考价值的也纳入
|
||||
|
||||
**具体容忍规则**:
|
||||
- **人群地域**: 即使不是目标地域,但研究结果有参考价值 → \`include\`
|
||||
- **时间窗口**: 即使不完全在时间范围内,但研究方法可参考 → \`include\`
|
||||
- **对照类型**: 即使对照不是安慰剂,但有对比意义 → \`include\`
|
||||
- **研究设计**: 即使不是理想的RCT,但有科学价值 → \`include\``,
|
||||
confidenceRule: '**⭐ 宽松模式**: 置信度要求降低,0.5以上即可纳入',
|
||||
reasonExample: '虽然对照组不是安慰剂而是另一种药物,但研究方法严谨,结果有参考价值,且研究人群与目标人群有一定相似性。建议纳入全文复筛阶段进一步评估。',
|
||||
finalReminder: '**记住**: 这是**初筛**阶段,**宁可多纳入,也不要错过**。只要有任何可能的价值,就应该纳入全文复筛!'
|
||||
},
|
||||
standard: {
|
||||
role: '你是一位经验丰富的系统综述专家,负责根据PICO标准和纳排标准对医学文献进行初步筛选。',
|
||||
context: '',
|
||||
picoGuideline: '',
|
||||
decisionRules: '',
|
||||
confidenceRule: '',
|
||||
reasonExample: '具体说明你的筛选决策理由,需包含:(1)为什么纳入或排除 (2)哪些PICO标准符合或不符合 (3)是否有特殊考虑',
|
||||
finalReminder: '现在开始筛选,请严格按照JSON格式输出结果。'
|
||||
},
|
||||
strict: {
|
||||
role: '你是一位严谨的系统综述专家,负责根据PICO标准和纳排标准对医学文献进行**严格筛选**。',
|
||||
context: `⚠️ **重要提示**: 这是**严格筛选模式**,要求:
|
||||
- **严格匹配PICO标准,任何维度不匹配都应排除**
|
||||
- **对边界情况持保守态度**
|
||||
- **优先排除而非纳入**
|
||||
- **只纳入高度确定符合标准的文献**`,
|
||||
picoGuideline: `**⭐ 严格模式原则**:
|
||||
- 只有**明确且完全匹配**才能标记为 \`match\`
|
||||
- 任何不确定或不够明确的,标记为 \`partial\` 或 \`mismatch\`
|
||||
- 对标准的理解要严格,不做宽松解释`,
|
||||
decisionRules: `**⭐ 严格模式决策规则**:
|
||||
1. **一票否决**: 任何一个PICO维度为 \`mismatch\`,直接排除
|
||||
2. **多个partial即排除**: 超过2个维度为 \`partial\`,也应排除
|
||||
3. **触发任一排除标准**: 立即排除
|
||||
4. **不确定时倾向排除**: 当信息不足无法判断时,倾向于排除
|
||||
5. **要求高置信度**: 只有置信度≥0.8才纳入
|
||||
|
||||
**具体严格规则**:
|
||||
- **人群地域**: 必须严格匹配目标地域,其他地域一律排除
|
||||
- **时间窗口**: 必须严格在时间范围内,边界情况也排除
|
||||
- **对照类型**: 必须是指定的对照类型(如安慰剂),其他对照排除
|
||||
- **研究设计**: 必须是指定的研究设计,次优设计也排除`,
|
||||
confidenceRule: '**⭐ 严格模式**: 只有置信度≥0.8才能纳入',
|
||||
reasonExample: '虽然研究人群和干预措施匹配,但对照组为另一种药物而非安慰剂,不符合严格的对照要求。在严格筛选模式下,必须排除。',
|
||||
finalReminder: '**记住**: 这是**严格筛选**模式,**宁可错杀,不可放过**。只纳入**完全确定符合**所有标准的高质量文献!'
|
||||
}
|
||||
};
|
||||
|
||||
const config = styleConfig[style];
|
||||
|
||||
return `${config.role}
|
||||
|
||||
${config.context}
|
||||
|
||||
## 研究方案信息
|
||||
|
||||
**PICO标准:**
|
||||
- **P (研究人群)**: ${picoCriteria.population}
|
||||
- **I (干预措施)**: ${picoCriteria.intervention}
|
||||
- **C (对照)**: ${picoCriteria.comparison}
|
||||
- **O (结局指标)**: ${picoCriteria.outcome}
|
||||
- **S (研究设计)**: ${picoCriteria.studyDesign}
|
||||
|
||||
**纳入标准:**
|
||||
${inclusionCriteria}
|
||||
|
||||
**排除标准:**
|
||||
${exclusionCriteria}
|
||||
|
||||
---
|
||||
|
||||
## 待筛选文献
|
||||
|
||||
**标题:** ${title}
|
||||
|
||||
**摘要:** ${abstract}
|
||||
|
||||
${authors ? `**作者:** ${authors}` : ''}
|
||||
${journal ? `**期刊:** ${journal}` : ''}
|
||||
${publicationYear ? `**年份:** ${publicationYear}` : ''}
|
||||
|
||||
---
|
||||
|
||||
## 筛选任务
|
||||
|
||||
请按照以下步骤进行筛选:
|
||||
|
||||
### 步骤1: PICO逐项评估
|
||||
|
||||
对文献的每个PICO维度进行评估,判断是否匹配:
|
||||
- **match** (匹配):文献明确符合该标准
|
||||
- **partial** (部分匹配):文献部分符合,或表述不够明确
|
||||
- **mismatch** (不匹配):文献明确不符合该标准
|
||||
|
||||
${config.picoGuideline}
|
||||
|
||||
### 步骤2: 提取证据
|
||||
|
||||
从标题和摘要中提取支持你判断的**原文片段**,每个维度给出具体证据。
|
||||
|
||||
### 步骤3: 综合决策
|
||||
|
||||
基于PICO评估、纳排标准,给出最终筛选决策:
|
||||
- **include** (纳入):文献符合所有或大部分PICO标准,且满足纳入标准
|
||||
- **exclude** (排除):文献明确不符合PICO标准,或触发排除标准
|
||||
- **uncertain** (不确定):信息不足,无法做出明确判断
|
||||
|
||||
${config.decisionRules}
|
||||
|
||||
### 步骤4: 置信度评分
|
||||
|
||||
给出你对此判断的把握程度(0-1之间):
|
||||
- **0.9-1.0**: 非常确定,有充分证据支持
|
||||
- **0.7-0.9**: 比较确定,证据较为充分
|
||||
- **0.5-0.7**: 中等把握,证据有限
|
||||
- **0.0-0.5**: 不确定,信息严重不足
|
||||
|
||||
${config.confidenceRule}
|
||||
|
||||
---
|
||||
|
||||
## 输出格式要求
|
||||
|
||||
请**严格按照**以下JSON格式输出,不要添加任何额外文字:
|
||||
|
||||
⚠️ **重要**: 必须使用ASCII引号("),不要使用中文引号("")
|
||||
|
||||
\`\`\`json
|
||||
{
|
||||
"judgment": {
|
||||
"P": "match",
|
||||
"I": "match",
|
||||
"C": "partial",
|
||||
"S": "match"
|
||||
},
|
||||
"evidence": {
|
||||
"P": "从摘要中引用支持P判断的原文",
|
||||
"I": "从摘要中引用支持I判断的原文",
|
||||
"C": "从摘要中引用支持C判断的原文",
|
||||
"S": "从摘要中引用支持S判断的原文"
|
||||
},
|
||||
"conclusion": "include",
|
||||
"confidence": 0.85,
|
||||
"reason": "${config.reasonExample}"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
## 关键约束
|
||||
|
||||
1. **judgment** 的每个字段只能是:\`"match"\`, \`"partial"\`, \`"mismatch"\`
|
||||
2. **evidence** 必须引用原文,不要编造内容
|
||||
3. **conclusion** 只能是:\`"include"\`, \`"exclude"\`, \`"uncertain"\`
|
||||
4. **confidence** 必须是0-1之间的数字
|
||||
5. **reason** 长度在50-300字之间,说理充分
|
||||
6. 输出必须是合法的JSON格式
|
||||
|
||||
## 医学文献筛选原则
|
||||
|
||||
- 优先考虑研究设计的严谨性(RCT > 队列研究 > 病例对照)
|
||||
- 标题和摘要信息不足时,倾向于 \`"uncertain"\` 而非直接排除
|
||||
- 对于综述、系统评价、Meta分析,通常排除(除非方案特别说明)
|
||||
- 动物实验、体外实验通常排除(除非方案特别说明)
|
||||
- 会议摘要、病例报告通常排除
|
||||
- 注意区分干预措施的具体类型(如药物剂量、手术方式)
|
||||
- 结局指标要与方案一致(主要结局 vs 次要结局)
|
||||
|
||||
---
|
||||
|
||||
${config.finalReminder}
|
||||
`;
|
||||
}
|
||||
|
||||
237
backend/src/modules/asl/services/llmScreeningService.ts
Normal file
237
backend/src/modules/asl/services/llmScreeningService.ts
Normal file
@@ -0,0 +1,237 @@
|
||||
/**
|
||||
* ASL LLM筛选服务
|
||||
* 使用双模型策略进行文献筛选
|
||||
*/
|
||||
|
||||
import { LLMFactory } from '../../../common/llm/adapters/LLMFactory.js';
|
||||
import { ModelType } from '../../../common/llm/adapters/types.js';
|
||||
import { parseJSON } from '../../../common/utils/jsonParser.js';
|
||||
import Ajv from 'ajv';
|
||||
import { screeningOutputSchema, generateScreeningPrompt, type ScreeningStyle } from '../schemas/screening.schema.js';
|
||||
import { LLMScreeningOutput, DualModelScreeningResult, PicoCriteria } from '../types/index.js';
|
||||
import { logger } from '../../../common/logging/index.js';
|
||||
|
||||
const ajv = new Ajv();
|
||||
const validate = ajv.compile(screeningOutputSchema);
|
||||
|
||||
// 模型名称映射:从模型ID映射到ModelType
|
||||
const MODEL_TYPE_MAP: Record<string, ModelType> = {
|
||||
'deepseek-chat': 'deepseek-v3',
|
||||
'deepseek-v3': 'deepseek-v3',
|
||||
'qwen-max': 'qwen3-72b', // ⭐ qwen-max = Qwen最新最强模型
|
||||
'qwen-plus': 'qwen3-72b', // qwen-plus = Qwen2.5-72B (次选)
|
||||
'qwen3-72b': 'qwen3-72b',
|
||||
'qwen-long': 'qwen-long',
|
||||
'gpt-4o': 'gpt-5', // ⭐ gpt-4o 映射到 gpt-5
|
||||
'gpt-5-pro': 'gpt-5',
|
||||
'gpt-5': 'gpt-5',
|
||||
'claude-sonnet-4.5': 'claude-4.5', // ⭐ claude-sonnet-4.5 映射
|
||||
'claude-sonnet-4-5-20250929': 'claude-4.5',
|
||||
'claude-4.5': 'claude-4.5',
|
||||
};
|
||||
|
||||
export class LLMScreeningService {
|
||||
/**
|
||||
* 使用单个模型进行筛选
|
||||
*/
|
||||
async screenWithModel(
|
||||
modelName: string,
|
||||
title: string,
|
||||
abstract: string,
|
||||
picoCriteria: PicoCriteria,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
style: ScreeningStyle = 'standard',
|
||||
authors?: string,
|
||||
journal?: string,
|
||||
publicationYear?: number
|
||||
): Promise<LLMScreeningOutput> {
|
||||
try {
|
||||
// 映射模型名称到ModelType
|
||||
const modelType = MODEL_TYPE_MAP[modelName];
|
||||
if (!modelType) {
|
||||
throw new Error(`Unsupported model name: ${modelName}. Supported models: ${Object.keys(MODEL_TYPE_MAP).join(', ')}`);
|
||||
}
|
||||
|
||||
const prompt = generateScreeningPrompt(
|
||||
title,
|
||||
abstract,
|
||||
picoCriteria,
|
||||
inclusionCriteria,
|
||||
exclusionCriteria,
|
||||
style,
|
||||
authors,
|
||||
journal,
|
||||
publicationYear
|
||||
);
|
||||
|
||||
const llmAdapter = LLMFactory.getAdapter(modelType);
|
||||
const response = await llmAdapter.chat([
|
||||
{ role: 'user', content: prompt },
|
||||
]);
|
||||
|
||||
// 解析JSON输出
|
||||
const parseResult = parseJSON(response.content);
|
||||
if (!parseResult.success || !parseResult.data) {
|
||||
logger.error('Failed to parse LLM output as JSON', {
|
||||
error: parseResult.error,
|
||||
rawOutput: parseResult.rawOutput,
|
||||
});
|
||||
throw new Error(`Failed to parse LLM output as JSON: ${parseResult.error}`);
|
||||
}
|
||||
|
||||
// JSON Schema验证
|
||||
const valid = validate(parseResult.data);
|
||||
if (!valid) {
|
||||
logger.error('LLM output validation failed', {
|
||||
errors: validate.errors,
|
||||
output: parseResult.data,
|
||||
rawOutput: parseResult.rawOutput,
|
||||
});
|
||||
throw new Error('LLM output does not match expected schema');
|
||||
}
|
||||
|
||||
return parseResult.data as LLMScreeningOutput;
|
||||
} catch (error) {
|
||||
logger.error(`LLM screening failed with model ${modelName}`, {
|
||||
error,
|
||||
title,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 双模型并行筛选(核心功能)
|
||||
*/
|
||||
async dualModelScreening(
|
||||
literatureId: string,
|
||||
title: string,
|
||||
abstract: string,
|
||||
picoCriteria: PicoCriteria,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
models: [string, string] = ['deepseek-chat', 'qwen-max'],
|
||||
style: ScreeningStyle = 'standard',
|
||||
authors?: string,
|
||||
journal?: string,
|
||||
publicationYear?: number
|
||||
): Promise<DualModelScreeningResult> {
|
||||
const [model1, model2] = models;
|
||||
|
||||
try {
|
||||
// 并行调用两个模型(使用相同的筛选风格)
|
||||
const [result1, result2] = await Promise.all([
|
||||
this.screenWithModel(model1, title, abstract, picoCriteria, inclusionCriteria, exclusionCriteria, style, authors, journal, publicationYear),
|
||||
this.screenWithModel(model2, title, abstract, picoCriteria, inclusionCriteria, exclusionCriteria, style, authors, journal, publicationYear),
|
||||
]);
|
||||
|
||||
// 冲突检测(只检测conclusion冲突,不检测PICO维度差异)
|
||||
const conclusionMatch = result1.conclusion === result2.conclusion;
|
||||
const hasConflict = !conclusionMatch;
|
||||
|
||||
// 记录PICO维度差异(用于日志,不影响冲突判断)
|
||||
const { conflictFields } = this.detectConflict(result1, result2);
|
||||
|
||||
// 最终决策
|
||||
let finalDecision: 'include' | 'exclude' | 'pending' = 'pending';
|
||||
if (conclusionMatch) {
|
||||
// conclusion一致时,采纳结论
|
||||
finalDecision = result1.conclusion === 'uncertain' ? 'pending' : result1.conclusion;
|
||||
} else {
|
||||
// conclusion不一致时,标记为pending(需人工复核)
|
||||
finalDecision = 'pending';
|
||||
}
|
||||
|
||||
return {
|
||||
literatureId,
|
||||
deepseek: result1,
|
||||
deepseekModel: model1,
|
||||
qwen: result2,
|
||||
qwenModel: model2,
|
||||
hasConflict,
|
||||
conflictFields: hasConflict ? conflictFields : undefined,
|
||||
finalDecision,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Dual model screening failed', {
|
||||
error,
|
||||
literatureId,
|
||||
title,
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 检测两个模型结果是否冲突
|
||||
*/
|
||||
private detectConflict(
|
||||
result1: LLMScreeningOutput,
|
||||
result2: LLMScreeningOutput
|
||||
): { hasConflict: boolean; conflictFields: string[] } {
|
||||
const conflictFields: string[] = [];
|
||||
|
||||
// 检查PICO四个维度
|
||||
const dimensions = ['P', 'I', 'C', 'S'] as const;
|
||||
for (const dim of dimensions) {
|
||||
if (result1.judgment[dim] !== result2.judgment[dim]) {
|
||||
conflictFields.push(dim);
|
||||
}
|
||||
}
|
||||
|
||||
// 检查最终结论
|
||||
if (result1.conclusion !== result2.conclusion) {
|
||||
conflictFields.push('conclusion');
|
||||
}
|
||||
|
||||
return {
|
||||
hasConflict: conflictFields.length > 0,
|
||||
conflictFields,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* 批量筛选文献
|
||||
*/
|
||||
async batchScreening(
|
||||
literatures: Array<{
|
||||
id: string;
|
||||
title: string;
|
||||
abstract: string;
|
||||
}>,
|
||||
picoCriteria: PicoCriteria,
|
||||
inclusionCriteria: string,
|
||||
exclusionCriteria: string,
|
||||
models?: [string, string],
|
||||
style: ScreeningStyle = 'standard',
|
||||
concurrency: number = 3
|
||||
): Promise<DualModelScreeningResult[]> {
|
||||
const results: DualModelScreeningResult[] = [];
|
||||
|
||||
// 分批处理(并发控制)
|
||||
for (let i = 0; i < literatures.length; i += concurrency) {
|
||||
const batch = literatures.slice(i, i + concurrency);
|
||||
const batchResults = await Promise.all(
|
||||
batch.map((lit) =>
|
||||
this.dualModelScreening(
|
||||
lit.id,
|
||||
lit.title,
|
||||
lit.abstract,
|
||||
picoCriteria,
|
||||
inclusionCriteria,
|
||||
exclusionCriteria,
|
||||
models,
|
||||
style
|
||||
)
|
||||
)
|
||||
);
|
||||
results.push(...batchResults);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
|
||||
export const llmScreeningService = new LLMScreeningService();
|
||||
|
||||
122
backend/src/modules/asl/types/index.ts
Normal file
122
backend/src/modules/asl/types/index.ts
Normal file
@@ -0,0 +1,122 @@
|
||||
/**
|
||||
* ASL模块类型定义
|
||||
* 标题摘要初筛 MVP阶段
|
||||
*/
|
||||
|
||||
// ==================== 筛选项目相关 ====================
|
||||
|
||||
export interface PicoCriteria {
|
||||
population: string; // P: 研究人群
|
||||
intervention: string; // I: 干预措施
|
||||
comparison: string; // C: 对照
|
||||
outcome: string; // O: 结局指标
|
||||
studyDesign: string; // S: 研究设计类型
|
||||
}
|
||||
|
||||
export interface ScreeningConfig {
|
||||
models: string[]; // 使用的模型,如 ["deepseek-chat", "qwen-max"]
|
||||
temperature: number; // 温度参数,建议0
|
||||
maxRetries?: number; // 最大重试次数
|
||||
}
|
||||
|
||||
export interface CreateScreeningProjectDto {
|
||||
projectName: string;
|
||||
picoCriteria: PicoCriteria;
|
||||
inclusionCriteria: string;
|
||||
exclusionCriteria: string;
|
||||
screeningConfig?: ScreeningConfig;
|
||||
}
|
||||
|
||||
// ==================== 文献相关 ====================
|
||||
|
||||
export interface LiteratureDto {
|
||||
pmid?: string;
|
||||
title: string;
|
||||
abstract: string;
|
||||
authors?: string;
|
||||
journal?: string;
|
||||
publicationYear?: number;
|
||||
doi?: string;
|
||||
}
|
||||
|
||||
export interface ImportLiteratureDto {
|
||||
projectId: string;
|
||||
literatures: LiteratureDto[];
|
||||
}
|
||||
|
||||
// ==================== LLM筛选相关 ====================
|
||||
|
||||
export interface PicoJudgment {
|
||||
P: 'match' | 'partial' | 'mismatch';
|
||||
I: 'match' | 'partial' | 'mismatch';
|
||||
C: 'match' | 'partial' | 'mismatch';
|
||||
S: 'match' | 'partial' | 'mismatch';
|
||||
}
|
||||
|
||||
export interface PicoEvidence {
|
||||
P: string;
|
||||
I: string;
|
||||
C: string;
|
||||
S: string;
|
||||
}
|
||||
|
||||
export interface LLMScreeningOutput {
|
||||
judgment: PicoJudgment;
|
||||
evidence: PicoEvidence;
|
||||
conclusion: 'include' | 'exclude' | 'uncertain';
|
||||
confidence: number; // 0-1
|
||||
reason: string;
|
||||
}
|
||||
|
||||
export interface DualModelScreeningResult {
|
||||
literatureId: string;
|
||||
|
||||
// DeepSeek结果
|
||||
deepseek: LLMScreeningOutput;
|
||||
deepseekModel: string;
|
||||
|
||||
// Qwen结果
|
||||
qwen: LLMScreeningOutput;
|
||||
qwenModel: string;
|
||||
|
||||
// 冲突检测
|
||||
hasConflict: boolean;
|
||||
conflictFields?: string[]; // ['P', 'I', 'conclusion']
|
||||
|
||||
// 最终决策(无冲突时自动设置,有冲突时为pending)
|
||||
finalDecision?: 'include' | 'exclude' | 'pending';
|
||||
}
|
||||
|
||||
// ==================== 筛选任务相关 ====================
|
||||
|
||||
export interface StartScreeningTaskDto {
|
||||
projectId: string;
|
||||
taskType: 'title_abstract' | 'full_text';
|
||||
}
|
||||
|
||||
export interface ScreeningTaskProgress {
|
||||
taskId: string;
|
||||
status: 'pending' | 'running' | 'completed' | 'failed';
|
||||
totalItems: number;
|
||||
processedItems: number;
|
||||
successItems: number;
|
||||
failedItems: number;
|
||||
conflictItems: number;
|
||||
estimatedEndAt?: Date;
|
||||
}
|
||||
|
||||
// ==================== 审核工作台相关 ====================
|
||||
|
||||
export interface ConflictReviewDto {
|
||||
resultId: string;
|
||||
finalDecision: 'include' | 'exclude';
|
||||
exclusionReason?: string;
|
||||
}
|
||||
|
||||
export interface BatchReviewDto {
|
||||
projectId: string;
|
||||
reviews: ConflictReviewDto[];
|
||||
}
|
||||
|
||||
|
||||
|
||||
359
backend/src/scripts/test-closeai.ts
Normal file
359
backend/src/scripts/test-closeai.ts
Normal file
@@ -0,0 +1,359 @@
|
||||
/**
|
||||
* CloseAI集成测试脚本
|
||||
*
|
||||
* 测试通过CloseAI代理访问GPT-5和Claude-4.5模型
|
||||
*
|
||||
* 运行方式:
|
||||
* ```bash
|
||||
* cd backend
|
||||
* npx tsx src/scripts/test-closeai.ts
|
||||
* ```
|
||||
*
|
||||
* 环境变量要求:
|
||||
* - CLOSEAI_API_KEY: CloseAI API密钥
|
||||
* - CLOSEAI_OPENAI_BASE_URL: OpenAI端点
|
||||
* - CLOSEAI_CLAUDE_BASE_URL: Claude端点
|
||||
*
|
||||
* 参考文档:docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
|
||||
*/
|
||||
|
||||
import { LLMFactory } from '../common/llm/adapters/LLMFactory.js';
|
||||
import { config } from '../config/env.js';
|
||||
|
||||
/**
|
||||
* 测试配置验证
|
||||
*/
|
||||
function validateConfig() {
|
||||
console.log('🔍 验证环境配置...\n');
|
||||
|
||||
const checks = [
|
||||
{
|
||||
name: 'CLOSEAI_API_KEY',
|
||||
value: config.closeaiApiKey,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: 'CLOSEAI_OPENAI_BASE_URL',
|
||||
value: config.closeaiOpenaiBaseUrl,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: 'CLOSEAI_CLAUDE_BASE_URL',
|
||||
value: config.closeaiClaudeBaseUrl,
|
||||
required: true,
|
||||
},
|
||||
];
|
||||
|
||||
let allValid = true;
|
||||
|
||||
for (const check of checks) {
|
||||
const status = check.value ? '✅' : '❌';
|
||||
console.log(`${status} ${check.name}: ${check.value ? '已配置' : '未配置'}`);
|
||||
|
||||
if (check.required && !check.value) {
|
||||
allValid = false;
|
||||
}
|
||||
}
|
||||
|
||||
console.log('');
|
||||
|
||||
if (!allValid) {
|
||||
throw new Error('环境配置不完整,请检查 .env 文件');
|
||||
}
|
||||
|
||||
console.log('✅ 环境配置验证通过\n');
|
||||
}
|
||||
|
||||
/**
|
||||
* 测试GPT-5-Pro
|
||||
*/
|
||||
async function testGPT5() {
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||
console.log('1️⃣ 测试 GPT-5-Pro');
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||
|
||||
try {
|
||||
const gpt5 = LLMFactory.getAdapter('gpt-5');
|
||||
|
||||
console.log('📤 发送测试请求...');
|
||||
console.log('提示词: "你好,请用一句话介绍你自己。"\n');
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const response = await gpt5.chat([
|
||||
{
|
||||
role: 'user',
|
||||
content: '你好,请用一句话介绍你自己。',
|
||||
},
|
||||
]);
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
console.log('📥 收到响应:\n');
|
||||
console.log(`模型: ${response.model}`);
|
||||
console.log(`内容: ${response.content}`);
|
||||
console.log(`耗时: ${duration}ms`);
|
||||
|
||||
if (response.usage) {
|
||||
console.log(`Token使用: ${response.usage.totalTokens} (输入: ${response.usage.promptTokens}, 输出: ${response.usage.completionTokens})`);
|
||||
}
|
||||
|
||||
console.log('\n✅ GPT-5测试通过\n');
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error('\n❌ GPT-5测试失败:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 测试Claude-4.5-Sonnet
|
||||
*/
|
||||
async function testClaude() {
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||
console.log('2️⃣ 测试 Claude-4.5-Sonnet');
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||
|
||||
try {
|
||||
const claude = LLMFactory.getAdapter('claude-4.5');
|
||||
|
||||
console.log('📤 发送测试请求...');
|
||||
console.log('提示词: "你好,请用一句话介绍你自己。"\n');
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const response = await claude.chat([
|
||||
{
|
||||
role: 'user',
|
||||
content: '你好,请用一句话介绍你自己。',
|
||||
},
|
||||
]);
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
console.log('📥 收到响应:\n');
|
||||
console.log(`模型: ${response.model}`);
|
||||
console.log(`内容: ${response.content}`);
|
||||
console.log(`耗时: ${duration}ms`);
|
||||
|
||||
if (response.usage) {
|
||||
console.log(`Token使用: ${response.usage.totalTokens} (输入: ${response.usage.promptTokens}, 输出: ${response.usage.completionTokens})`);
|
||||
}
|
||||
|
||||
console.log('\n✅ Claude测试通过\n');
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error('\n❌ Claude测试失败:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 测试文献筛选场景(实际应用)
|
||||
*/
|
||||
async function testLiteratureScreening() {
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||
console.log('3️⃣ 测试文献筛选场景(双模型对比)');
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||
|
||||
const testLiterature = {
|
||||
title: 'Deep learning in medical imaging: A systematic review',
|
||||
abstract: 'Background: Deep learning has shown remarkable performance in various medical imaging tasks. Methods: We systematically reviewed 150 studies on deep learning applications in radiology, pathology, and ophthalmology. Results: Deep learning models achieved high accuracy (>90%) in most tasks. Conclusion: Deep learning is a promising tool for medical image analysis.',
|
||||
};
|
||||
|
||||
const picoPrompt = `
|
||||
请根据以下PICO标准,判断这篇文献是否应该纳入系统评价:
|
||||
|
||||
**PICO标准:**
|
||||
- Population: 成年患者
|
||||
- Intervention: 深度学习模型
|
||||
- Comparison: 传统机器学习方法
|
||||
- Outcome: 诊断准确率
|
||||
|
||||
**文献信息:**
|
||||
标题:${testLiterature.title}
|
||||
摘要:${testLiterature.abstract}
|
||||
|
||||
请输出JSON格式:
|
||||
{
|
||||
"decision": "include/exclude/uncertain",
|
||||
"reason": "判断理由",
|
||||
"confidence": 0.0-1.0
|
||||
}
|
||||
`;
|
||||
|
||||
try {
|
||||
console.log('📤 使用DeepSeek和GPT-5进行双模型对比筛选...\n');
|
||||
|
||||
// 并行调用两个模型
|
||||
const [deepseekAdapter, gpt5Adapter] = [
|
||||
LLMFactory.getAdapter('deepseek-v3'),
|
||||
LLMFactory.getAdapter('gpt-5'),
|
||||
];
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const [deepseekResponse, gpt5Response] = await Promise.all([
|
||||
deepseekAdapter.chat([{ role: 'user', content: picoPrompt }]),
|
||||
gpt5Adapter.chat([{ role: 'user', content: picoPrompt }]),
|
||||
]);
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
console.log('📥 DeepSeek响应:');
|
||||
console.log(deepseekResponse.content);
|
||||
console.log('');
|
||||
|
||||
console.log('📥 GPT-5响应:');
|
||||
console.log(gpt5Response.content);
|
||||
console.log('');
|
||||
|
||||
console.log(`⏱️ 总耗时: ${duration}ms(并行)`);
|
||||
console.log(`💰 总Token: ${(deepseekResponse.usage?.totalTokens || 0) + (gpt5Response.usage?.totalTokens || 0)}`);
|
||||
|
||||
// 尝试解析JSON结果(简单验证)
|
||||
try {
|
||||
const deepseekDecision = JSON.parse(deepseekResponse.content);
|
||||
const gpt5Decision = JSON.parse(gpt5Response.content);
|
||||
|
||||
console.log('\n✅ 双模型筛选结果:');
|
||||
console.log(`DeepSeek决策: ${deepseekDecision.decision} (置信度: ${deepseekDecision.confidence})`);
|
||||
console.log(`GPT-5决策: ${gpt5Decision.decision} (置信度: ${gpt5Decision.confidence})`);
|
||||
|
||||
if (deepseekDecision.decision === gpt5Decision.decision) {
|
||||
console.log('✅ 两个模型一致,共识度高');
|
||||
} else {
|
||||
console.log('⚠️ 两个模型不一致,建议人工复核或启用第三方仲裁(Claude)');
|
||||
}
|
||||
} catch (parseError) {
|
||||
console.log('⚠️ JSON解析失败(测试环境,实际应用需要优化提示词)');
|
||||
}
|
||||
|
||||
console.log('\n✅ 文献筛选场景测试通过\n');
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error('\n❌ 文献筛选场景测试失败:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 测试流式调用(可选)
|
||||
*/
|
||||
async function testStreamMode() {
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||
console.log('4️⃣ 测试流式调用(GPT-5)');
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||
|
||||
try {
|
||||
const gpt5 = LLMFactory.getAdapter('gpt-5');
|
||||
|
||||
console.log('📤 发送流式请求...');
|
||||
console.log('提示词: "请写一首关于人工智能的短诗(4行)"\n');
|
||||
console.log('📥 流式响应:\n');
|
||||
|
||||
const startTime = Date.now();
|
||||
let fullContent = '';
|
||||
let chunkCount = 0;
|
||||
|
||||
for await (const chunk of gpt5.chatStream([
|
||||
{
|
||||
role: 'user',
|
||||
content: '请写一首关于人工智能的短诗(4行)',
|
||||
},
|
||||
])) {
|
||||
if (chunk.content) {
|
||||
process.stdout.write(chunk.content);
|
||||
fullContent += chunk.content;
|
||||
chunkCount++;
|
||||
}
|
||||
|
||||
if (chunk.done) {
|
||||
const duration = Date.now() - startTime;
|
||||
console.log('\n');
|
||||
console.log(`\n⏱️ 耗时: ${duration}ms`);
|
||||
console.log(`📦 Chunk数: ${chunkCount}`);
|
||||
console.log(`📝 总字符数: ${fullContent.length}`);
|
||||
|
||||
if (chunk.usage) {
|
||||
console.log(`💰 Token使用: ${chunk.usage.totalTokens}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log('\n✅ 流式调用测试通过\n');
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error('\n❌ 流式调用测试失败:', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 主测试函数
|
||||
*/
|
||||
async function main() {
|
||||
console.log('╔═══════════════════════════════════════════════════╗');
|
||||
console.log('║ 🧪 CloseAI集成测试 ║');
|
||||
console.log('║ 测试GPT-5和Claude-4.5通过CloseAI代理访问 ║');
|
||||
console.log('╚═══════════════════════════════════════════════════╝\n');
|
||||
|
||||
try {
|
||||
// 验证配置
|
||||
validateConfig();
|
||||
|
||||
// 测试结果
|
||||
const results = {
|
||||
gpt5: false,
|
||||
claude: false,
|
||||
literatureScreening: false,
|
||||
stream: false,
|
||||
};
|
||||
|
||||
// 1. 测试GPT-5
|
||||
results.gpt5 = await testGPT5();
|
||||
|
||||
// 2. 测试Claude-4.5
|
||||
results.claude = await testClaude();
|
||||
|
||||
// 3. 测试文献筛选场景
|
||||
results.literatureScreening = await testLiteratureScreening();
|
||||
|
||||
// 4. 测试流式调用(可选)
|
||||
results.stream = await testStreamMode();
|
||||
|
||||
// 总结
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
||||
console.log('📊 测试总结');
|
||||
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||
|
||||
const allPassed = Object.values(results).every((r) => r === true);
|
||||
|
||||
console.log(`GPT-5测试: ${results.gpt5 ? '✅ 通过' : '❌ 失败'}`);
|
||||
console.log(`Claude测试: ${results.claude ? '✅ 通过' : '❌ 失败'}`);
|
||||
console.log(`文献筛选场景: ${results.literatureScreening ? '✅ 通过' : '❌ 失败'}`);
|
||||
console.log(`流式调用测试: ${results.stream ? '✅ 通过' : '❌ 失败'}`);
|
||||
|
||||
console.log('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n');
|
||||
|
||||
if (allPassed) {
|
||||
console.log('🎉 所有测试通过!CloseAI集成成功!');
|
||||
console.log('\n✅ 可以在ASL模块中使用GPT-5和Claude-4.5进行双模型对比筛选');
|
||||
console.log('✅ 支持三模型共识仲裁(DeepSeek + GPT-5 + Claude)');
|
||||
console.log('✅ 支持流式调用,适用于实时响应场景\n');
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.error('⚠️ 部分测试失败,请检查配置和网络连接\n');
|
||||
process.exit(1);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('❌ 测试执行失败:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// 运行测试
|
||||
main();
|
||||
|
||||
|
||||
|
||||
@@ -201,3 +201,5 @@ testPlatformInfrastructure().catch(error => {
|
||||
process.exit(1)
|
||||
})
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user