feat(ssa): Complete T-test end-to-end testing with 9 bug fixes - Phase 1 core 85% complete. R service: missing value auto-filter. Backend: error handling, variable matching, dynamic filename. Frontend: module activation, session isolation, error propagation. Full flow verified.

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
2026-02-19 20:57:00 +08:00
parent 8137e3cde2
commit 49b5c37cb1
86 changed files with 21207 additions and 252 deletions

View File

@@ -0,0 +1,39 @@
import { CloseAIAdapter } from './CloseAIAdapter.js';
/**
* Claude-4.5-Sonnet适配器便捷封装
*
* 通过CloseAI代理访问Anthropic Claude-4.5-Sonnet模型
*
* 模型特点:
* - 准确率93%
* - 速度:中等
* - 成本¥0.021/1K tokens
* - 适用场景:第三方仲裁、结构化输出、高质量文本生成
*
* 使用场景:
* - 双模型对比筛选DeepSeek vs GPT-5
* - 三模型共识仲裁DeepSeek + GPT-5 + Claude
* - 作为独立裁判解决冲突决策
*
* 使用示例:
* ```typescript
* import { ClaudeAdapter } from '@/common/llm/adapters';
*
* const claude = new ClaudeAdapter();
* const response = await claude.chat([
* { role: 'user', content: '作为第三方仲裁,请判断文献是否应该纳入...' }
* ]);
* ```
*
* 参考文档docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
*/
export class ClaudeAdapter extends CloseAIAdapter {
/**
* 构造函数
* @param modelName - 模型名称,默认 'claude-sonnet-4-5-20250929'
*/
constructor(modelName = 'claude-sonnet-4-5-20250929') {
super('claude', modelName);
console.log(`[ClaudeAdapter] 初始化完成,模型: ${modelName}`);
}
}

View File

@@ -0,0 +1,269 @@
import axios from 'axios';
import { config } from '../../../config/env.js';
/**
* CloseAI通用适配器
*
* 支持通过CloseAI代理访问
* - OpenAI GPT-5-Pro
* - Anthropic Claude-4.5-Sonnet
*
* 设计原则:
* - CloseAI提供OpenAI兼容的统一接口
* - 通过不同的Base URL区分供应商
* - 代码逻辑完全复用OpenAI标准格式
*
* 参考文档docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
*/
export class CloseAIAdapter {
/**
* 构造函数
* @param provider - 供应商类型:'openai' 或 'claude'
* @param modelName - 模型名称(如 'gpt-5-pro' 或 'claude-sonnet-4-5-20250929'
*/
constructor(provider, modelName) {
this.provider = provider;
this.modelName = modelName;
this.apiKey = config.closeaiApiKey || '';
// 根据供应商选择对应的Base URL
this.baseURL = provider === 'openai'
? config.closeaiOpenaiBaseUrl // https://api.openai-proxy.org/v1
: config.closeaiClaudeBaseUrl; // https://api.openai-proxy.org/anthropic
// 验证API Key配置
if (!this.apiKey) {
throw new Error('CloseAI API key is not configured. Please set CLOSEAI_API_KEY in .env file.');
}
console.log(`[CloseAIAdapter] 初始化完成`, {
provider: this.provider,
model: this.modelName,
baseURL: this.baseURL,
});
}
/**
* 非流式调用
* - OpenAI系列使用chat.completions格式
* - Claude系列使用messages格式Anthropic SDK
*/
async chat(messages, options) {
try {
// Claude使用不同的API格式
if (this.provider === 'claude') {
return await this.chatClaude(messages, options);
}
// OpenAI系列标准格式不包含temperature等可能不支持的参数
const requestBody = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
};
// 可选参数:只在提供时才添加
if (options?.temperature !== undefined) {
requestBody.temperature = options.temperature;
}
if (options?.topP !== undefined) {
requestBody.top_p = options.topP;
}
console.log(`[CloseAIAdapter] 发起非流式调用`, {
provider: this.provider,
model: this.modelName,
messagesCount: messages.length,
params: Object.keys(requestBody),
});
const response = await axios.post(`${this.baseURL}/chat/completions`, requestBody, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- GPT-5和Claude可能需要更长时间
});
const choice = response.data.choices[0];
const result = {
content: choice.message.content,
model: response.data.model,
usage: {
promptTokens: response.data.usage.prompt_tokens,
completionTokens: response.data.usage.completion_tokens,
totalTokens: response.data.usage.total_tokens,
},
finishReason: choice.finish_reason,
};
console.log(`[CloseAIAdapter] 调用成功`, {
provider: this.provider,
model: result.model,
tokens: result.usage?.totalTokens,
contentLength: result.content.length,
});
return result;
}
catch (error) {
console.error(`[CloseAIAdapter] ${this.provider.toUpperCase()} API Error:`, error);
if (axios.isAxiosError(error)) {
const errorMessage = error.response?.data?.error?.message || error.message;
const statusCode = error.response?.status;
// 提供更友好的错误信息
if (statusCode === 401) {
throw new Error(`CloseAI认证失败: API Key无效或已过期。请检查 CLOSEAI_API_KEY 配置。`);
}
else if (statusCode === 429) {
throw new Error(`CloseAI速率限制: 请求过于频繁,请稍后重试。`);
}
else if (statusCode === 500 || statusCode === 502 || statusCode === 503) {
throw new Error(`CloseAI服务异常: 代理服务暂时不可用,请稍后重试。`);
}
throw new Error(`CloseAI (${this.provider.toUpperCase()}) API调用失败: ${errorMessage}`);
}
throw error;
}
}
/**
* Claude专用调用方法
* 使用Anthropic Messages API格式
*/
async chatClaude(messages, options) {
try {
const requestBody = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
};
console.log(`[CloseAIAdapter] 发起Claude调用`, {
model: this.modelName,
messagesCount: messages.length,
});
const response = await axios.post(`${this.baseURL}/v1/messages`, // Anthropic使用 /v1/messages
requestBody, {
headers: {
'Content-Type': 'application/json',
'x-api-key': this.apiKey, // Anthropic使用 x-api-key 而不是 Authorization
'anthropic-version': '2023-06-01', // Anthropic需要版本号
},
timeout: 180000,
});
// Anthropic的响应格式不同
const content = response.data.content[0].text;
const result = {
content: content,
model: response.data.model,
usage: {
promptTokens: response.data.usage.input_tokens,
completionTokens: response.data.usage.output_tokens,
totalTokens: response.data.usage.input_tokens + response.data.usage.output_tokens,
},
finishReason: response.data.stop_reason,
};
console.log(`[CloseAIAdapter] Claude调用成功`, {
model: result.model,
tokens: result.usage?.totalTokens,
contentLength: result.content.length,
});
return result;
}
catch (error) {
console.error(`[CloseAIAdapter] Claude API Error:`, error);
if (axios.isAxiosError(error)) {
const errorMessage = error.response?.data?.error?.message || error.message;
throw new Error(`CloseAI (Claude) API调用失败: ${errorMessage}`);
}
throw error;
}
}
/**
* 流式调用
* - OpenAI系列使用SSE格式
* - Claude系列暂不支持可后续实现
*/
async *chatStream(messages, options, onChunk) {
// Claude流式调用暂不支持
if (this.provider === 'claude') {
throw new Error('Claude流式调用暂未实现请使用非流式调用');
}
try {
// OpenAI系列标准SSE格式
const requestBody = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
stream: true,
};
// 可选参数:只在提供时才添加
if (options?.temperature !== undefined) {
requestBody.temperature = options.temperature;
}
if (options?.topP !== undefined) {
requestBody.top_p = options.topP;
}
console.log(`[CloseAIAdapter] 发起流式调用`, {
provider: this.provider,
model: this.modelName,
messagesCount: messages.length,
});
const response = await axios.post(`${this.baseURL}/chat/completions`, requestBody, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
responseType: 'stream',
timeout: 180000, // 180秒超时
});
const stream = response.data;
let buffer = '';
let chunkCount = 0;
for await (const chunk of stream) {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
const trimmedLine = line.trim();
// 跳过空行和结束标记
if (!trimmedLine || trimmedLine === 'data: [DONE]') {
continue;
}
// 解析SSE数据
if (trimmedLine.startsWith('data: ')) {
try {
const jsonStr = trimmedLine.slice(6);
const data = JSON.parse(jsonStr);
const choice = data.choices[0];
const content = choice.delta?.content || '';
const streamChunk = {
content: content,
done: choice.finish_reason === 'stop',
model: data.model,
};
// 如果流结束附加usage信息
if (choice.finish_reason === 'stop' && data.usage) {
streamChunk.usage = {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
};
}
chunkCount++;
// 回调函数(可选)
if (onChunk) {
onChunk(streamChunk);
}
yield streamChunk;
}
catch (parseError) {
console.error('[CloseAIAdapter] Failed to parse SSE data:', parseError);
// 继续处理下一个chunk不中断流
}
}
}
}
console.log(`[CloseAIAdapter] 流式调用完成`, {
provider: this.provider,
model: this.modelName,
chunksReceived: chunkCount,
});
}
catch (error) {
console.error(`[CloseAIAdapter] ${this.provider.toUpperCase()} Stream Error:`, error);
if (axios.isAxiosError(error)) {
const errorMessage = error.response?.data?.error?.message || error.message;
throw new Error(`CloseAI (${this.provider.toUpperCase()}) 流式调用失败: ${errorMessage}`);
}
throw error;
}
}
}

View File

@@ -0,0 +1,116 @@
import axios from 'axios';
import { config } from '../../../config/env.js';
export class DeepSeekAdapter {
constructor(modelName = 'deepseek-chat') {
this.modelName = modelName;
this.apiKey = config.deepseekApiKey || '';
this.baseURL = 'https://api.deepseek.com/v1';
if (!this.apiKey) {
throw new Error('DeepSeek API key is not configured');
}
}
// 非流式调用
async chat(messages, options) {
try {
const response = await axios.post(`${this.baseURL}/chat/completions`, {
model: this.modelName,
messages: messages,
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
stream: false,
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- 稿件评估需要更长时间
});
const choice = response.data.choices[0];
return {
content: choice.message.content,
model: response.data.model,
usage: {
promptTokens: response.data.usage.prompt_tokens,
completionTokens: response.data.usage.completion_tokens,
totalTokens: response.data.usage.total_tokens,
},
finishReason: choice.finish_reason,
};
}
catch (error) {
console.error('DeepSeek API Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`DeepSeek API调用失败: ${error.response?.data?.error?.message || error.message}`);
}
throw error;
}
}
// 流式调用
async *chatStream(messages, options, onChunk) {
try {
const response = await axios.post(`${this.baseURL}/chat/completions`, {
model: this.modelName,
messages: messages,
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
stream: true,
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
responseType: 'stream',
timeout: 60000,
});
const stream = response.data;
let buffer = '';
for await (const chunk of stream) {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
const trimmedLine = line.trim();
if (!trimmedLine || trimmedLine === 'data: [DONE]') {
continue;
}
if (trimmedLine.startsWith('data: ')) {
try {
const jsonStr = trimmedLine.slice(6);
const data = JSON.parse(jsonStr);
const choice = data.choices[0];
const content = choice.delta?.content || '';
const streamChunk = {
content: content,
done: choice.finish_reason === 'stop',
model: data.model,
};
if (choice.finish_reason === 'stop' && data.usage) {
streamChunk.usage = {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
};
}
if (onChunk) {
onChunk(streamChunk);
}
yield streamChunk;
}
catch (parseError) {
console.error('Failed to parse SSE data:', parseError);
}
}
}
}
}
catch (error) {
console.error('DeepSeek Stream Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`DeepSeek流式调用失败: ${error.response?.data?.error?.message || error.message}`);
}
throw error;
}
}
}

View File

@@ -0,0 +1,39 @@
import { CloseAIAdapter } from './CloseAIAdapter.js';
/**
* GPT-4o适配器便捷封装
*
* 通过CloseAI代理访问OpenAI GPT-4o模型
*
* 模型特点:
* - 准确率与GPT-4同级
* - 速度1-2秒响应
* - 成本:适中
* - 适用场景:高质量文献筛选、复杂推理、结构化输出
*
* 性能对比:
* - gpt-4o: 1.5秒(推荐)✅
* - gpt-4o-mini: 0.7秒(经济版)
* - gpt-5-pro: 50秒CloseAI平台上过慢不推荐
*
* 使用示例:
* ```typescript
* import { GPT5Adapter } from '@/common/llm/adapters';
*
* const gpt = new GPT5Adapter(); // 默认使用 gpt-4o
* const response = await gpt.chat([
* { role: 'user', content: '根据PICO标准筛选文献...' }
* ]);
* ```
*
* 参考文档docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
*/
export class GPT5Adapter extends CloseAIAdapter {
/**
* 构造函数
* @param modelName - 模型名称,默认 'gpt-4o'(经过性能测试优化)
*/
constructor(modelName = 'gpt-4o') {
super('openai', modelName);
console.log(`[GPT5Adapter] 初始化完成,模型: ${modelName}`);
}
}

View File

@@ -0,0 +1,76 @@
import { DeepSeekAdapter } from './DeepSeekAdapter.js';
import { QwenAdapter } from './QwenAdapter.js';
import { GPT5Adapter } from './GPT5Adapter.js';
import { ClaudeAdapter } from './ClaudeAdapter.js';
/**
* LLM工厂类
* 根据模型类型创建相应的适配器实例
*/
export class LLMFactory {
/**
* 获取LLM适配器实例单例模式
* @param modelType 模型类型
* @returns LLM适配器实例
*/
static getAdapter(modelType) {
// 如果已经创建过该适配器,直接返回
if (this.adapters.has(modelType)) {
return this.adapters.get(modelType);
}
// 根据模型类型创建适配器
let adapter;
switch (modelType) {
case 'deepseek-v3':
adapter = new DeepSeekAdapter('deepseek-chat');
break;
case 'qwen3-72b':
adapter = new QwenAdapter('qwen-max'); // ⭐ 使用 qwen-maxQwen最新最强模型
break;
case 'qwen-long':
adapter = new QwenAdapter('qwen-long'); // 1M上下文超长文本模型
break;
case 'gpt-5':
adapter = new GPT5Adapter(); // ⭐ 通过CloseAI代理默认使用 gpt-5-pro
break;
case 'claude-4.5':
adapter = new ClaudeAdapter('claude-sonnet-4-5-20250929'); // ⭐ 通过CloseAI代理
break;
case 'gemini-pro':
// TODO: 实现Gemini适配器
throw new Error('Gemini adapter is not implemented yet');
default:
throw new Error(`Unsupported model type: ${modelType}`);
}
// 缓存适配器实例
this.adapters.set(modelType, adapter);
return adapter;
}
/**
* 清除适配器缓存
* @param modelType 可选,指定清除某个模型的适配器,不传则清除所有
*/
static clearCache(modelType) {
if (modelType) {
this.adapters.delete(modelType);
}
else {
this.adapters.clear();
}
}
/**
* 检查模型是否支持
* @param modelType 模型类型
* @returns 是否支持
*/
static isSupported(modelType) {
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gpt-5', 'claude-4.5', 'gemini-pro'].includes(modelType);
}
/**
* 获取所有支持的模型列表
* @returns 支持的模型列表
*/
static getSupportedModels() {
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gpt-5', 'claude-4.5', 'gemini-pro'];
}
}
LLMFactory.adapters = new Map();

View File

@@ -0,0 +1,135 @@
import axios from 'axios';
import { config } from '../../../config/env.js';
export class QwenAdapter {
constructor(modelName = 'qwen-turbo') {
this.modelName = modelName;
this.apiKey = config.dashscopeApiKey || '';
this.baseURL = 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation';
if (!this.apiKey) {
throw new Error('DashScope API key is not configured. Please set DASHSCOPE_API_KEY in .env file.');
}
}
// 非流式调用
async chat(messages, options) {
try {
const response = await axios.post(this.baseURL, {
model: this.modelName,
input: {
messages: messages,
},
parameters: {
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
result_format: 'message',
},
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- 稿件评估需要更长时间
});
const output = response.data.output;
const usage = response.data.usage;
return {
content: output.choices[0].message.content,
model: this.modelName,
usage: {
promptTokens: usage.input_tokens,
completionTokens: usage.output_tokens,
totalTokens: usage.total_tokens || usage.input_tokens + usage.output_tokens,
},
finishReason: output.choices[0].finish_reason,
};
}
catch (error) {
console.error('Qwen API Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`Qwen API调用失败: ${error.response?.data?.message || error.message}`);
}
throw error;
}
}
// 流式调用
async *chatStream(messages, options, onChunk) {
try {
// Qwen-Long需要更长的超时时间全文模式可能传输~750K tokens
const timeout = this.modelName === 'qwen-long' ? 300000 : 60000; // 5分钟 vs 1分钟
console.log(`[QwenAdapter] 开始流式调用`, {
model: this.modelName,
timeout: `${timeout / 1000}`,
messagesCount: messages.length,
});
const response = await axios.post(this.baseURL, {
model: this.modelName,
input: {
messages: messages,
},
parameters: {
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
result_format: 'message',
incremental_output: true,
},
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
'X-DashScope-SSE': 'enable',
},
responseType: 'stream',
timeout: timeout,
});
const stream = response.data;
let buffer = '';
for await (const chunk of stream) {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
const trimmedLine = line.trim();
if (!trimmedLine || trimmedLine.startsWith(':')) {
continue;
}
if (trimmedLine.startsWith('data:')) {
try {
const jsonStr = trimmedLine.slice(5).trim();
const data = JSON.parse(jsonStr);
const output = data.output;
const choice = output.choices[0];
const content = choice.message?.content || '';
const streamChunk = {
content: content,
done: choice.finish_reason === 'stop',
model: this.modelName,
};
if (choice.finish_reason === 'stop' && data.usage) {
streamChunk.usage = {
promptTokens: data.usage.input_tokens,
completionTokens: data.usage.output_tokens,
totalTokens: data.usage.total_tokens || data.usage.input_tokens + data.usage.output_tokens,
};
}
if (onChunk) {
onChunk(streamChunk);
}
yield streamChunk;
}
catch (parseError) {
console.error('Failed to parse Qwen SSE data:', parseError);
}
}
}
}
}
catch (error) {
console.error('Qwen Stream Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`Qwen流式调用失败: ${error.response?.data?.message || error.message}`);
}
throw error;
}
}
}

View File

@@ -0,0 +1,2 @@
// LLM适配器类型定义
export {};