feat(iit): Complete CRA Agent V3.0 P1 - ChatOrchestrator with LLM Function Calling
P1 Architecture: Lightweight ReAct (Function Calling loop, max 3 rounds) Core changes: - Add ToolDefinition/ToolCall types to LLM adapters (DeepSeek + CloseAI + Claude) - Replace 6 old tools with 4 semantic tools: read_report, look_up_data, check_quality, search_knowledge - Create ChatOrchestrator (~160 lines) replacing ChatService (1,442 lines) - Wire WechatCallbackController to ChatOrchestrator, deprecate ChatService - Fix nullable content (string | null) across 12+ LLM consumer files E2E test results: 8/8 scenarios passed (100%) - QC report query, critical issues, patient data, trend, on-demand QC - Knowledge base search, project overview, data modification refusal Net code reduction: ~1,100 lines Tested: E2E P1 chat test 8/8 passed with DeepSeek API Made-with: Cursor
This commit is contained in:
@@ -189,7 +189,7 @@ class IntentRouterService {
|
||||
maxTokens: 100,
|
||||
});
|
||||
|
||||
return this.parseLLMResponse(response.content);
|
||||
return this.parseLLMResponse(response.content ?? '');
|
||||
}
|
||||
|
||||
private parseLLMResponse(text: string): IntentResult {
|
||||
|
||||
@@ -67,7 +67,7 @@ export class PicoInferenceService {
|
||||
maxTokens: rendered.modelConfig?.maxTokens ?? 1024,
|
||||
});
|
||||
|
||||
const raw = this.robustJsonParse(response.content);
|
||||
const raw = this.robustJsonParse(response.content ?? '');
|
||||
const validated = PicoInferenceSchema.parse({
|
||||
...raw,
|
||||
status: 'ai_inferred',
|
||||
|
||||
@@ -122,7 +122,7 @@ export class QueryService {
|
||||
});
|
||||
|
||||
// 4. 三层 JSON 解析
|
||||
const raw = this.robustJsonParse(response.content);
|
||||
const raw = this.robustJsonParse(response.content ?? '');
|
||||
|
||||
// 5. Zod 校验(动态防幻觉)
|
||||
const validColumns = profile?.columns.map(c => c.name) ?? [];
|
||||
|
||||
@@ -104,7 +104,7 @@ export class ReflectionService {
|
||||
maxTokens: LLM_MAX_TOKENS,
|
||||
});
|
||||
|
||||
const rawOutput = response.content;
|
||||
const rawOutput = response.content ?? '';
|
||||
logger.info('[SSA:Reflection] LLM response received', {
|
||||
contentLength: rawOutput.length,
|
||||
usage: response.usage,
|
||||
|
||||
Reference in New Issue
Block a user