feat(iit): Complete CRA Agent V3.0 P1 - ChatOrchestrator with LLM Function Calling

P1 Architecture: Lightweight ReAct (Function Calling loop, max 3 rounds)

Core changes:
- Add ToolDefinition/ToolCall types to LLM adapters (DeepSeek + CloseAI + Claude)
- Replace 6 old tools with 4 semantic tools: read_report, look_up_data, check_quality, search_knowledge
- Create ChatOrchestrator (~160 lines) replacing ChatService (1,442 lines)
- Wire WechatCallbackController to ChatOrchestrator, deprecate ChatService
- Fix nullable content (string | null) across 12+ LLM consumer files

E2E test results: 8/8 scenarios passed (100%)
- QC report query, critical issues, patient data, trend, on-demand QC
- Knowledge base search, project overview, data modification refusal

Net code reduction: ~1,100 lines
Tested: E2E P1 chat test 8/8 passed with DeepSeek API

Made-with: Cursor
This commit is contained in:
2026-02-26 14:27:09 +08:00
parent 203846968c
commit 7c3cc12b2e
32 changed files with 903 additions and 337 deletions

View File

@@ -63,27 +63,22 @@ export class CloseAIAdapter implements ILLMAdapter {
return await this.chatClaude(messages, options);
}
// OpenAI系列标准格式不包含temperature等可能不支持的参数
const requestBody: any = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
};
// 可选参数:只在提供时才添加
if (options?.temperature !== undefined) {
requestBody.temperature = options.temperature;
}
if (options?.topP !== undefined) {
requestBody.top_p = options.topP;
}
console.log(`[CloseAIAdapter] 发起非流式调用`, {
provider: this.provider,
model: this.modelName,
messagesCount: messages.length,
params: Object.keys(requestBody),
});
if (options?.tools?.length) {
requestBody.tools = options.tools;
requestBody.tool_choice = options.tool_choice ?? 'auto';
}
const response = await axios.post(
`${this.baseURL}/chat/completions`,
@@ -93,14 +88,14 @@ export class CloseAIAdapter implements ILLMAdapter {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- GPT-5和Claude可能需要更长时间
timeout: 180000,
}
);
const choice = response.data.choices[0];
const result: LLMResponse = {
content: choice.message.content,
content: choice.message.content ?? null,
model: response.data.model,
usage: {
promptTokens: response.data.usage.prompt_tokens,
@@ -108,15 +103,9 @@ export class CloseAIAdapter implements ILLMAdapter {
totalTokens: response.data.usage.total_tokens,
},
finishReason: choice.finish_reason,
toolCalls: choice.message.tool_calls ?? undefined,
};
console.log(`[CloseAIAdapter] 调用成功`, {
provider: this.provider,
model: result.model,
tokens: result.usage?.totalTokens,
contentLength: result.content.length,
});
return result;
} catch (error: unknown) {
console.error(`[CloseAIAdapter] ${this.provider.toUpperCase()} API Error:`, error);
@@ -155,50 +144,64 @@ export class CloseAIAdapter implements ILLMAdapter {
*/
private async chatClaude(messages: Message[], options?: LLMOptions): Promise<LLMResponse> {
try {
const requestBody = {
const requestBody: any = {
model: this.modelName,
messages: messages,
max_tokens: options?.maxTokens ?? 2000,
};
console.log(`[CloseAIAdapter] 发起Claude调用`, {
model: this.modelName,
messagesCount: messages.length,
});
if (options?.tools?.length) {
requestBody.tools = options.tools.map((t) => ({
name: t.function.name,
description: t.function.description,
input_schema: t.function.parameters,
}));
if (options.tool_choice === 'none') {
requestBody.tool_choice = { type: 'none' };
} else if (options.tool_choice === 'required') {
requestBody.tool_choice = { type: 'any' };
} else {
requestBody.tool_choice = { type: 'auto' };
}
}
const response = await axios.post(
`${this.baseURL}/v1/messages`, // Anthropic使用 /v1/messages
`${this.baseURL}/v1/messages`,
requestBody,
{
headers: {
'Content-Type': 'application/json',
'x-api-key': this.apiKey, // Anthropic使用 x-api-key 而不是 Authorization
'anthropic-version': '2023-06-01', // Anthropic需要版本号
'x-api-key': this.apiKey,
'anthropic-version': '2023-06-01',
},
timeout: 180000,
}
);
// Anthropic的响应格式不同
const content = response.data.content[0].text;
const blocks = response.data.content as any[];
const textBlock = blocks.find((b: any) => b.type === 'text');
const toolBlocks = blocks.filter((b: any) => b.type === 'tool_use');
const toolCalls = toolBlocks.length > 0
? toolBlocks.map((b: any) => ({
id: b.id,
type: 'function' as const,
function: { name: b.name, arguments: JSON.stringify(b.input) },
}))
: undefined;
const result: LLMResponse = {
content: content,
content: textBlock?.text ?? null,
model: response.data.model,
usage: {
promptTokens: response.data.usage.input_tokens,
completionTokens: response.data.usage.output_tokens,
totalTokens: response.data.usage.input_tokens + response.data.usage.output_tokens,
},
finishReason: response.data.stop_reason,
finishReason: response.data.stop_reason === 'tool_use' ? 'tool_calls' : response.data.stop_reason,
toolCalls,
};
console.log(`[CloseAIAdapter] Claude调用成功`, {
model: result.model,
tokens: result.usage?.totalTokens,
contentLength: result.content.length,
});
return result;
} catch (error: unknown) {
console.error(`[CloseAIAdapter] Claude API Error:`, error);

View File

@@ -17,32 +17,38 @@ export class DeepSeekAdapter implements ILLMAdapter {
}
}
// 非流式调用
async chat(messages: Message[], options?: LLMOptions): Promise<LLMResponse> {
try {
const requestBody: any = {
model: this.modelName,
messages: messages,
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
stream: false,
};
if (options?.tools?.length) {
requestBody.tools = options.tools;
requestBody.tool_choice = options.tool_choice ?? 'auto';
}
const response = await axios.post(
`${this.baseURL}/chat/completions`,
{
model: this.modelName,
messages: messages,
temperature: options?.temperature ?? 0.7,
max_tokens: options?.maxTokens ?? 2000,
top_p: options?.topP ?? 0.9,
stream: false,
},
requestBody,
{
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.apiKey}`,
},
timeout: 180000, // 180秒超时3分钟- 稿件评估需要更长时间
timeout: 180000,
}
);
const choice = response.data.choices[0];
return {
content: choice.message.content,
content: choice.message.content ?? null,
model: response.data.model,
usage: {
promptTokens: response.data.usage.prompt_tokens,
@@ -50,6 +56,7 @@ export class DeepSeekAdapter implements ILLMAdapter {
totalTokens: response.data.usage.total_tokens,
},
finishReason: choice.finish_reason,
toolCalls: choice.message.tool_calls ?? undefined,
};
} catch (error: unknown) {
console.error('DeepSeek API Error:', error);

View File

@@ -1,8 +1,32 @@
// LLM适配器类型定义
// ---- Function Calling / Tool Use ----
export interface ToolDefinition {
type: 'function';
function: {
name: string;
description: string;
parameters: Record<string, any>;
};
}
export interface ToolCall {
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}
// ---- Core message / option / response types ----
export interface Message {
role: 'system' | 'user' | 'assistant';
content: string;
role: 'system' | 'user' | 'assistant' | 'tool';
content: string | null;
tool_calls?: ToolCall[];
tool_call_id?: string;
}
export interface LLMOptions {
@@ -10,10 +34,12 @@ export interface LLMOptions {
maxTokens?: number;
topP?: number;
stream?: boolean;
tools?: ToolDefinition[];
tool_choice?: 'auto' | 'none' | 'required';
}
export interface LLMResponse {
content: string;
content: string | null;
model: string;
usage?: {
promptTokens: number;
@@ -21,6 +47,7 @@ export interface LLMResponse {
totalTokens: number;
};
finishReason?: string;
toolCalls?: ToolCall[];
}
export interface StreamChunk {

View File

@@ -72,7 +72,7 @@ export class QueryRewriter {
}
);
const content = response.content.trim();
const content = (response.content ?? '').trim();
// 3. 解析 JSON 数组
const rewritten = this.parseRewrittenQueries(content, query);