feat(ssa): Complete T-test end-to-end testing with 9 bug fixes - Phase 1 core 85% complete. R service: missing value auto-filter. Backend: error handling, variable matching, dynamic filename. Frontend: module activation, session isolation, error propagation. Full flow verified.

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
2026-02-19 20:57:00 +08:00
parent 8137e3cde2
commit 49b5c37cb1
86 changed files with 21207 additions and 252 deletions

View File

@@ -0,0 +1,39 @@
import { CloseAIAdapter } from './CloseAIAdapter.js';
/**
* Claude-4.5-Sonnet适配器便捷封装
*
* 通过CloseAI代理访问Anthropic Claude-4.5-Sonnet模型
*
* 模型特点:
* - 准确率93%
* - 速度:中等
* - 成本¥0.021/1K tokens
* - 适用场景:第三方仲裁、结构化输出、高质量文本生成
*
* 使用场景:
* - 双模型对比筛选DeepSeek vs GPT-5
* - 三模型共识仲裁DeepSeek + GPT-5 + Claude
* - 作为独立裁判解决冲突决策
*
* 使用示例:
* ```typescript
* import { ClaudeAdapter } from '@/common/llm/adapters';
*
* const claude = new ClaudeAdapter();
* const response = await claude.chat([
* { role: 'user', content: '作为第三方仲裁,请判断文献是否应该纳入...' }
* ]);
* ```
*
* 参考文档docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
*/
export class ClaudeAdapter extends CloseAIAdapter {
/**
* 构造函数
* @param modelName - 模型名称,默认 'claude-sonnet-4-5-20250929'
*/
constructor(modelName = 'claude-sonnet-4-5-20250929') {
super('claude', modelName);
console.log(`[ClaudeAdapter] 初始化完成,模型: ${modelName}`);
}
}

View File

@@ -0,0 +1,269 @@
import axios from 'axios';
import { config } from '../../../config/env.js';
/**
* CloseAI通用适配器
*
* 支持通过CloseAI代理访问
* - OpenAI GPT-5-Pro
* - Anthropic Claude-4.5-Sonnet
*
* 设计原则:
* - CloseAI提供OpenAI兼容的统一接口
* - 通过不同的Base URL区分供应商
* - 代码逻辑完全复用OpenAI标准格式
*
* 参考文档docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
*/
export class CloseAIAdapter {
/**
* 构造函数
* @param provider - 供应商类型:'openai' 或 'claude'
* @param modelName - 模型名称(如 'gpt-5-pro' 或 'claude-sonnet-4-5-20250929'
*/
constructor(provider, modelName) {
this.provider = provider;
this.modelName = modelName;
this.apiKey = config.closeaiApiKey || '';
// 根据供应商选择对应的Base URL
this.baseURL = provider === 'openai'
? config.closeaiOpenaiBaseUrl // https://api.openai-proxy.org/v1
: config.closeaiClaudeBaseUrl; // https://api.openai-proxy.org/anthropic
// 验证API Key配置
if (!this.apiKey) {
throw new Error('CloseAI API key is not configured. Please set CLOSEAI_API_KEY in .env file.');
}
console.log(`[CloseAIAdapter] 初始化完成`, {
provider: this.provider,
model: this.modelName,
baseURL: this.baseURL,
});
}
/**
* 非流式调用
* - OpenAI系列使用chat.completions格式
* - Claude系列使用messages格式Anthropic SDK
*/
async chat(messages, options) {
try {
// Claude使用不同的API格式
if (this.provider === 'claude') {
return await this.chatClaude(messages, options);
}
// OpenAI系列标准格式不包含temperature等可能不支持的参数
const requestBody = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
};
// 可选参数:只在提供时才添加
if (options?.temperature !== undefined) {
requestBody.temperature = options.temperature;
}
if (options?.topP !== undefined) {
requestBody.top_p = options.topP;
}
console.log(`[CloseAIAdapter] 发起非流式调用`, {
provider: this.provider,
model: this.modelName,
messagesCount: messages.length,
params: Object.keys(requestBody),
});
const response = await axios.post(`${this.baseURL}/chat/completions`, requestBody, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- GPT-5和Claude可能需要更长时间
});
const choice = response.data.choices[0];
const result = {
content: choice.message.content,
model: response.data.model,
usage: {
promptTokens: response.data.usage.prompt_tokens,
completionTokens: response.data.usage.completion_tokens,
totalTokens: response.data.usage.total_tokens,
},
finishReason: choice.finish_reason,
};
console.log(`[CloseAIAdapter] 调用成功`, {
provider: this.provider,
model: result.model,
tokens: result.usage?.totalTokens,
contentLength: result.content.length,
});
return result;
}
catch (error) {
console.error(`[CloseAIAdapter] ${this.provider.toUpperCase()} API Error:`, error);
if (axios.isAxiosError(error)) {
const errorMessage = error.response?.data?.error?.message || error.message;
const statusCode = error.response?.status;
// 提供更友好的错误信息
if (statusCode === 401) {
throw new Error(`CloseAI认证失败: API Key无效或已过期。请检查 CLOSEAI_API_KEY 配置。`);
}
else if (statusCode === 429) {
throw new Error(`CloseAI速率限制: 请求过于频繁,请稍后重试。`);
}
else if (statusCode === 500 || statusCode === 502 || statusCode === 503) {
throw new Error(`CloseAI服务异常: 代理服务暂时不可用,请稍后重试。`);
}
throw new Error(`CloseAI (${this.provider.toUpperCase()}) API调用失败: ${errorMessage}`);
}
throw error;
}
}
/**
* Claude专用调用方法
* 使用Anthropic Messages API格式
*/
async chatClaude(messages, options) {
try {
const requestBody = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
};
console.log(`[CloseAIAdapter] 发起Claude调用`, {
model: this.modelName,
messagesCount: messages.length,
});
const response = await axios.post(`${this.baseURL}/v1/messages`, // Anthropic使用 /v1/messages
requestBody, {
headers: {
'Content-Type': 'application/json',
'x-api-key': this.apiKey, // Anthropic使用 x-api-key 而不是 Authorization
'anthropic-version': '2023-06-01', // Anthropic需要版本号
},
timeout: 180000,
});
// Anthropic的响应格式不同
const content = response.data.content[0].text;
const result = {
content: content,
model: response.data.model,
usage: {
promptTokens: response.data.usage.input_tokens,
completionTokens: response.data.usage.output_tokens,
totalTokens: response.data.usage.input_tokens + response.data.usage.output_tokens,
},
finishReason: response.data.stop_reason,
};
console.log(`[CloseAIAdapter] Claude调用成功`, {
model: result.model,
tokens: result.usage?.totalTokens,
contentLength: result.content.length,
});
return result;
}
catch (error) {
console.error(`[CloseAIAdapter] Claude API Error:`, error);
if (axios.isAxiosError(error)) {
const errorMessage = error.response?.data?.error?.message || error.message;
throw new Error(`CloseAI (Claude) API调用失败: ${errorMessage}`);
}
throw error;
}
}
/**
* 流式调用
* - OpenAI系列使用SSE格式
* - Claude系列暂不支持可后续实现
*/
async *chatStream(messages, options, onChunk) {
// Claude流式调用暂不支持
if (this.provider === 'claude') {
throw new Error('Claude流式调用暂未实现请使用非流式调用');
}
try {
// OpenAI系列标准SSE格式
const requestBody = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
stream: true,
};
// 可选参数:只在提供时才添加
if (options?.temperature !== undefined) {
requestBody.temperature = options.temperature;
}
if (options?.topP !== undefined) {
requestBody.top_p = options.topP;
}
console.log(`[CloseAIAdapter] 发起流式调用`, {
provider: this.provider,
model: this.modelName,
messagesCount: messages.length,
});
const response = await axios.post(`${this.baseURL}/chat/completions`, requestBody, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
responseType: 'stream',
timeout: 180000, // 180秒超时
});
const stream = response.data;
let buffer = '';
let chunkCount = 0;
for await (const chunk of stream) {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
const trimmedLine = line.trim();
// 跳过空行和结束标记
if (!trimmedLine || trimmedLine === 'data: [DONE]') {
continue;
}
// 解析SSE数据
if (trimmedLine.startsWith('data: ')) {
try {
const jsonStr = trimmedLine.slice(6);
const data = JSON.parse(jsonStr);
const choice = data.choices[0];
const content = choice.delta?.content || '';
const streamChunk = {
content: content,
done: choice.finish_reason === 'stop',
model: data.model,
};
// 如果流结束附加usage信息
if (choice.finish_reason === 'stop' && data.usage) {
streamChunk.usage = {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
};
}
chunkCount++;
// 回调函数(可选)
if (onChunk) {
onChunk(streamChunk);
}
yield streamChunk;
}
catch (parseError) {
console.error('[CloseAIAdapter] Failed to parse SSE data:', parseError);
// 继续处理下一个chunk不中断流
}
}
}
}
console.log(`[CloseAIAdapter] 流式调用完成`, {
provider: this.provider,
model: this.modelName,
chunksReceived: chunkCount,
});
}
catch (error) {
console.error(`[CloseAIAdapter] ${this.provider.toUpperCase()} Stream Error:`, error);
if (axios.isAxiosError(error)) {
const errorMessage = error.response?.data?.error?.message || error.message;
throw new Error(`CloseAI (${this.provider.toUpperCase()}) 流式调用失败: ${errorMessage}`);
}
throw error;
}
}
}

View File

@@ -0,0 +1,116 @@
import axios from 'axios';
import { config } from '../../../config/env.js';
export class DeepSeekAdapter {
constructor(modelName = 'deepseek-chat') {
this.modelName = modelName;
this.apiKey = config.deepseekApiKey || '';
this.baseURL = 'https://api.deepseek.com/v1';
if (!this.apiKey) {
throw new Error('DeepSeek API key is not configured');
}
}
// 非流式调用
async chat(messages, options) {
try {
const response = await axios.post(`${this.baseURL}/chat/completions`, {
model: this.modelName,
messages: messages,
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
stream: false,
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- 稿件评估需要更长时间
});
const choice = response.data.choices[0];
return {
content: choice.message.content,
model: response.data.model,
usage: {
promptTokens: response.data.usage.prompt_tokens,
completionTokens: response.data.usage.completion_tokens,
totalTokens: response.data.usage.total_tokens,
},
finishReason: choice.finish_reason,
};
}
catch (error) {
console.error('DeepSeek API Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`DeepSeek API调用失败: ${error.response?.data?.error?.message || error.message}`);
}
throw error;
}
}
// 流式调用
async *chatStream(messages, options, onChunk) {
try {
const response = await axios.post(`${this.baseURL}/chat/completions`, {
model: this.modelName,
messages: messages,
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
stream: true,
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
responseType: 'stream',
timeout: 60000,
});
const stream = response.data;
let buffer = '';
for await (const chunk of stream) {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
const trimmedLine = line.trim();
if (!trimmedLine || trimmedLine === 'data: [DONE]') {
continue;
}
if (trimmedLine.startsWith('data: ')) {
try {
const jsonStr = trimmedLine.slice(6);
const data = JSON.parse(jsonStr);
const choice = data.choices[0];
const content = choice.delta?.content || '';
const streamChunk = {
content: content,
done: choice.finish_reason === 'stop',
model: data.model,
};
if (choice.finish_reason === 'stop' && data.usage) {
streamChunk.usage = {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
};
}
if (onChunk) {
onChunk(streamChunk);
}
yield streamChunk;
}
catch (parseError) {
console.error('Failed to parse SSE data:', parseError);
}
}
}
}
}
catch (error) {
console.error('DeepSeek Stream Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`DeepSeek流式调用失败: ${error.response?.data?.error?.message || error.message}`);
}
throw error;
}
}
}

View File

@@ -0,0 +1,39 @@
import { CloseAIAdapter } from './CloseAIAdapter.js';
/**
* GPT-4o适配器便捷封装
*
* 通过CloseAI代理访问OpenAI GPT-4o模型
*
* 模型特点:
* - 准确率与GPT-4同级
* - 速度1-2秒响应
* - 成本:适中
* - 适用场景:高质量文献筛选、复杂推理、结构化输出
*
* 性能对比:
* - gpt-4o: 1.5秒(推荐)✅
* - gpt-4o-mini: 0.7秒(经济版)
* - gpt-5-pro: 50秒CloseAI平台上过慢不推荐
*
* 使用示例:
* ```typescript
* import { GPT5Adapter } from '@/common/llm/adapters';
*
* const gpt = new GPT5Adapter(); // 默认使用 gpt-4o
* const response = await gpt.chat([
* { role: 'user', content: '根据PICO标准筛选文献...' }
* ]);
* ```
*
* 参考文档docs/02-通用能力层/01-LLM大模型网关/03-CloseAI集成指南.md
*/
export class GPT5Adapter extends CloseAIAdapter {
/**
* 构造函数
* @param modelName - 模型名称,默认 'gpt-4o'(经过性能测试优化)
*/
constructor(modelName = 'gpt-4o') {
super('openai', modelName);
console.log(`[GPT5Adapter] 初始化完成,模型: ${modelName}`);
}
}

View File

@@ -0,0 +1,76 @@
import { DeepSeekAdapter } from './DeepSeekAdapter.js';
import { QwenAdapter } from './QwenAdapter.js';
import { GPT5Adapter } from './GPT5Adapter.js';
import { ClaudeAdapter } from './ClaudeAdapter.js';
/**
* LLM工厂类
* 根据模型类型创建相应的适配器实例
*/
export class LLMFactory {
/**
* 获取LLM适配器实例单例模式
* @param modelType 模型类型
* @returns LLM适配器实例
*/
static getAdapter(modelType) {
// 如果已经创建过该适配器,直接返回
if (this.adapters.has(modelType)) {
return this.adapters.get(modelType);
}
// 根据模型类型创建适配器
let adapter;
switch (modelType) {
case 'deepseek-v3':
adapter = new DeepSeekAdapter('deepseek-chat');
break;
case 'qwen3-72b':
adapter = new QwenAdapter('qwen-max'); // ⭐ 使用 qwen-maxQwen最新最强模型
break;
case 'qwen-long':
adapter = new QwenAdapter('qwen-long'); // 1M上下文超长文本模型
break;
case 'gpt-5':
adapter = new GPT5Adapter(); // ⭐ 通过CloseAI代理默认使用 gpt-5-pro
break;
case 'claude-4.5':
adapter = new ClaudeAdapter('claude-sonnet-4-5-20250929'); // ⭐ 通过CloseAI代理
break;
case 'gemini-pro':
// TODO: 实现Gemini适配器
throw new Error('Gemini adapter is not implemented yet');
default:
throw new Error(`Unsupported model type: ${modelType}`);
}
// 缓存适配器实例
this.adapters.set(modelType, adapter);
return adapter;
}
/**
* 清除适配器缓存
* @param modelType 可选,指定清除某个模型的适配器,不传则清除所有
*/
static clearCache(modelType) {
if (modelType) {
this.adapters.delete(modelType);
}
else {
this.adapters.clear();
}
}
/**
* 检查模型是否支持
* @param modelType 模型类型
* @returns 是否支持
*/
static isSupported(modelType) {
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gpt-5', 'claude-4.5', 'gemini-pro'].includes(modelType);
}
/**
* 获取所有支持的模型列表
* @returns 支持的模型列表
*/
static getSupportedModels() {
return ['deepseek-v3', 'qwen3-72b', 'qwen-long', 'gpt-5', 'claude-4.5', 'gemini-pro'];
}
}
LLMFactory.adapters = new Map();

View File

@@ -0,0 +1,135 @@
import axios from 'axios';
import { config } from '../../../config/env.js';
export class QwenAdapter {
constructor(modelName = 'qwen-turbo') {
this.modelName = modelName;
this.apiKey = config.dashscopeApiKey || '';
this.baseURL = 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation';
if (!this.apiKey) {
throw new Error('DashScope API key is not configured. Please set DASHSCOPE_API_KEY in .env file.');
}
}
// 非流式调用
async chat(messages, options) {
try {
const response = await axios.post(this.baseURL, {
model: this.modelName,
input: {
messages: messages,
},
parameters: {
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
result_format: 'message',
},
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- 稿件评估需要更长时间
});
const output = response.data.output;
const usage = response.data.usage;
return {
content: output.choices[0].message.content,
model: this.modelName,
usage: {
promptTokens: usage.input_tokens,
completionTokens: usage.output_tokens,
totalTokens: usage.total_tokens || usage.input_tokens + usage.output_tokens,
},
finishReason: output.choices[0].finish_reason,
};
}
catch (error) {
console.error('Qwen API Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`Qwen API调用失败: ${error.response?.data?.message || error.message}`);
}
throw error;
}
}
// 流式调用
async *chatStream(messages, options, onChunk) {
try {
// Qwen-Long需要更长的超时时间全文模式可能传输~750K tokens
const timeout = this.modelName === 'qwen-long' ? 300000 : 60000; // 5分钟 vs 1分钟
console.log(`[QwenAdapter] 开始流式调用`, {
model: this.modelName,
timeout: `${timeout / 1000}`,
messagesCount: messages.length,
});
const response = await axios.post(this.baseURL, {
model: this.modelName,
input: {
messages: messages,
},
parameters: {
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
result_format: 'message',
incremental_output: true,
},
}, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
'X-DashScope-SSE': 'enable',
},
responseType: 'stream',
timeout: timeout,
});
const stream = response.data;
let buffer = '';
for await (const chunk of stream) {
buffer += chunk.toString();
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
const trimmedLine = line.trim();
if (!trimmedLine || trimmedLine.startsWith(':')) {
continue;
}
if (trimmedLine.startsWith('data:')) {
try {
const jsonStr = trimmedLine.slice(5).trim();
const data = JSON.parse(jsonStr);
const output = data.output;
const choice = output.choices[0];
const content = choice.message?.content || '';
const streamChunk = {
content: content,
done: choice.finish_reason === 'stop',
model: this.modelName,
};
if (choice.finish_reason === 'stop' && data.usage) {
streamChunk.usage = {
promptTokens: data.usage.input_tokens,
completionTokens: data.usage.output_tokens,
totalTokens: data.usage.total_tokens || data.usage.input_tokens + data.usage.output_tokens,
};
}
if (onChunk) {
onChunk(streamChunk);
}
yield streamChunk;
}
catch (parseError) {
console.error('Failed to parse Qwen SSE data:', parseError);
}
}
}
}
}
catch (error) {
console.error('Qwen Stream Error:', error);
if (axios.isAxiosError(error)) {
throw new Error(`Qwen流式调用失败: ${error.response?.data?.message || error.message}`);
}
throw error;
}
}
}

View File

@@ -0,0 +1,2 @@
// LLM适配器类型定义
export {};

View File

@@ -0,0 +1,27 @@
/**
* 日志系统统一导出
*
* 提供平台级的日志能力,支持结构化日志和多种日志场景。
*
* @module logging
*
* @example
* ```typescript
* // 方式1使用全局logger推荐
* import { logger } from '@/common/logging'
* logger.info('User logged in', { userId: 123 })
*
* // 方式2创建子logger带上下文
* import { createChildLogger } from '@/common/logging'
* const aslLogger = createChildLogger('ASL', { projectId: 456 })
* aslLogger.info('Screening started')
*
* // 方式3使用专用日志函数
* import { logHttpRequest, logLLMCall } from '@/common/logging'
* logHttpRequest('GET', '/api/projects', 200, 50)
* logLLMCall('deepseek', 'chat', 1500, { model: 'deepseek-chat' })
* ```
*/
export { logger, createChildLogger, logHttpRequest, logDatabaseQuery, logLLMCall, logAsyncJob } from './logger.js';
// 默认导出
export { default } from './logger.js';

View File

@@ -0,0 +1,161 @@
import winston from 'winston';
/**
* 云原生日志系统
*
* 核心设计原则:
* - ✅ 只输出到stdout不写本地文件
* - ✅ JSON格式便于阿里云SLS解析
* - ✅ 结构化日志(包含元数据)
* - ✅ 统一的日志格式
*
* 日志级别:
* - error: 错误,需要立即处理
* - warn: 警告,需要关注
* - info: 重要信息,正常业务日志
* - debug: 调试信息,仅开发环境
*
* 环境变量:
* - LOG_LEVEL: 日志级别默认development=debug, production=info
* - NODE_ENV: development | production
* - SERVICE_NAME: 服务名称默认aiclinical-backend
*
* @example
* ```typescript
* import { logger } from '@/common/logging'
*
* // 基础日志
* logger.info('User logged in', { userId: 123 })
* logger.error('Database query failed', { error: err.message, query: 'SELECT ...' })
*
* // 带上下文的日志
* const childLogger = logger.child({ module: 'ASL', projectId: 456 })
* childLogger.info('Screening started', { count: 100 })
* ```
*/
// 获取日志级别
function getLogLevel() {
if (process.env.LOG_LEVEL) {
return process.env.LOG_LEVEL;
}
return process.env.NODE_ENV === 'production' ? 'info' : 'debug';
}
// 获取服务名称
function getServiceName() {
return process.env.SERVICE_NAME || 'aiclinical-backend';
}
// 创建Winston Logger
export const logger = winston.createLogger({
level: getLogLevel(),
// JSON格式 + 时间戳 + 错误堆栈
format: winston.format.combine(winston.format.timestamp({
format: 'YYYY-MM-DD HH:mm:ss.SSS'
}), winston.format.errors({ stack: true }), winston.format.json()),
// 默认元数据(所有日志都包含)
defaultMeta: {
service: getServiceName(),
env: process.env.NODE_ENV || 'development',
instance: process.env.HOSTNAME || process.env.COMPUTERNAME || 'unknown',
pid: process.pid
},
// ⭐ 云原生只输出到stdout
transports: [
new winston.transports.Console({
format: process.env.NODE_ENV === 'production'
? winston.format.json() // 生产环境纯JSON
: winston.format.combine(// 开发环境:带颜色的可读格式
winston.format.colorize(), winston.format.printf((info) => {
const { timestamp, level, message, service, ...meta } = info;
const metaStr = Object.keys(meta).length > 0
? '\n ' + JSON.stringify(meta, null, 2)
: '';
return `${timestamp} [${service}] ${level}: ${message}${metaStr}`;
}))
})
]
});
/**
* 创建子logger带上下文
*
* @example
* ```typescript
* const aslLogger = createChildLogger('ASL', { projectId: 123 })
* aslLogger.info('Screening started')
* // 输出:{ ..., module: 'ASL', projectId: 123, message: 'Screening started' }
* ```
*/
export function createChildLogger(module, meta = {}) {
return logger.child({ module, ...meta });
}
/**
* 记录HTTP请求日志
*
* @example
* ```typescript
* logHttpRequest('GET', '/api/projects', 200, 50)
* ```
*/
export function logHttpRequest(method, url, statusCode, duration, meta = {}) {
const level = statusCode >= 500 ? 'error' : statusCode >= 400 ? 'warn' : 'info';
logger.log(level, 'HTTP Request', {
type: 'http',
method,
url,
statusCode,
duration,
...meta
});
}
/**
* 记录数据库查询日志
*
* @example
* ```typescript
* logDatabaseQuery('SELECT * FROM users WHERE id = $1', 45, { userId: 123 })
* ```
*/
export function logDatabaseQuery(query, duration, meta = {}) {
if (process.env.LOG_LEVEL === 'debug') {
logger.debug('Database Query', {
type: 'database',
query: query.substring(0, 200), // 限制长度
duration,
...meta
});
}
}
/**
* 记录LLM API调用日志
*
* @example
* ```typescript
* logLLMCall('deepseek', 'chat', 1500, { model: 'deepseek-chat' })
* ```
*/
export function logLLMCall(provider, operation, duration, meta = {}) {
logger.info('LLM API Call', {
type: 'llm',
provider,
operation,
duration,
...meta
});
}
/**
* 记录异步任务日志
*
* @example
* ```typescript
* logAsyncJob('asl:screening', 'started', { jobId: '123', projectId: 456 })
* ```
*/
export function logAsyncJob(jobType, status, meta = {}) {
const level = status === 'failed' ? 'error' : 'info';
logger.log(level, 'Async Job', {
type: 'job',
jobType,
status,
...meta
});
}
// 导出默认logger
export default logger;