feat(aia): Complete AIA V2.0 with universal streaming capabilities
Major Updates: - Add StreamingService with OpenAI Compatible format (backend/common/streaming) - Upgrade Chat component V2 with Ant Design X integration - Implement AIA module with 12 intelligent agents - Create AgentHub with 100% prototype V11 restoration - Create ChatWorkspace with streaming response support - Add ThinkingBlock for deep thinking display - Add useAIStream Hook for OpenAI Compatible stream handling Backend Common Capabilities (~400 lines): - OpenAIStreamAdapter: SSE adapter with OpenAI format - StreamingService: unified streaming service - Support content and reasoning_content dual streams - Deep thinking tag processing (<think>...</think>) Frontend Common Capabilities (~2000 lines): - AIStreamChat: modern streaming chat component - ThinkingBlock: collapsible deep thinking display - ConversationList: conversation management with grouping - useAIStream: OpenAI Compatible stream handler Hook - useConversations: conversation state management Hook - Modern design styles (Ultramodern theme) AIA Module Frontend (~1500 lines): - AgentHub: 12 agent cards with timeline design - ChatWorkspace: fullscreen immersive chat interface - AgentCard: theme-colored cards (blue/yellow/teal/purple) - 5 phases, 12 agents configuration - Responsive layout (desktop + mobile) AIA Module Backend (~900 lines): - agentService: 12 agents config with system prompts - conversationService: refactored with StreamingService - attachmentService: file upload skeleton (30k token limit) - 12 API endpoints with authentication - Full CRUD for conversations and messages Documentation: - AIA module status and development guide - Universal capabilities catalog (11 services) - Quick reference card for developers - System overview updates Testing: - Stream response verified (HTTP 200) - Authentication working correctly - Auto conversation creation working - Deep thinking display working - Message input and send working Status: Core features completed (85%), attachment and history loading pending
This commit is contained in:
196
backend/src/common/streaming/OpenAIStreamAdapter.ts
Normal file
196
backend/src/common/streaming/OpenAIStreamAdapter.ts
Normal file
@@ -0,0 +1,196 @@
|
||||
/**
|
||||
* OpenAI Compatible 流式响应适配器
|
||||
*
|
||||
* 将内部 LLM 响应转换为 OpenAI Compatible 格式
|
||||
* 支持 Ant Design X 的 XRequest 直接消费
|
||||
*/
|
||||
|
||||
import { FastifyReply } from 'fastify';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
import type { OpenAIStreamChunk, StreamOptions, THINKING_TAGS } from './types';
|
||||
import { logger } from '../logging/logger';
|
||||
|
||||
/**
|
||||
* OpenAI 流式响应适配器
|
||||
*/
|
||||
export class OpenAIStreamAdapter {
|
||||
private reply: FastifyReply;
|
||||
private messageId: string;
|
||||
private model: string;
|
||||
private created: number;
|
||||
private isHeaderSent: boolean = false;
|
||||
|
||||
constructor(reply: FastifyReply, model: string = 'deepseek-v3') {
|
||||
this.reply = reply;
|
||||
this.messageId = `chatcmpl-${uuidv4()}`;
|
||||
this.model = model;
|
||||
this.created = Math.floor(Date.now() / 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* 初始化 SSE 连接
|
||||
*/
|
||||
initSSE(): void {
|
||||
if (this.isHeaderSent) return;
|
||||
|
||||
this.reply.raw.writeHead(200, {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
'Connection': 'keep-alive',
|
||||
'X-Accel-Buffering': 'no',
|
||||
'Access-Control-Allow-Origin': '*',
|
||||
});
|
||||
|
||||
this.isHeaderSent = true;
|
||||
logger.debug('[OpenAIStreamAdapter] SSE 连接已初始化');
|
||||
}
|
||||
|
||||
/**
|
||||
* 发送内容增量
|
||||
*/
|
||||
sendContentDelta(content: string): void {
|
||||
this.initSSE();
|
||||
|
||||
const chunk: OpenAIStreamChunk = {
|
||||
id: this.messageId,
|
||||
object: 'chat.completion.chunk',
|
||||
created: this.created,
|
||||
model: this.model,
|
||||
choices: [{
|
||||
index: 0,
|
||||
delta: { content },
|
||||
finish_reason: null,
|
||||
}],
|
||||
};
|
||||
|
||||
this.writeChunk(chunk);
|
||||
}
|
||||
|
||||
/**
|
||||
* 发送思考内容增量(DeepSeek 风格)
|
||||
*/
|
||||
sendReasoningDelta(reasoningContent: string): void {
|
||||
this.initSSE();
|
||||
|
||||
const chunk: OpenAIStreamChunk = {
|
||||
id: this.messageId,
|
||||
object: 'chat.completion.chunk',
|
||||
created: this.created,
|
||||
model: this.model,
|
||||
choices: [{
|
||||
index: 0,
|
||||
delta: { reasoning_content: reasoningContent },
|
||||
finish_reason: null,
|
||||
}],
|
||||
};
|
||||
|
||||
this.writeChunk(chunk);
|
||||
}
|
||||
|
||||
/**
|
||||
* 发送角色标识(流开始时)
|
||||
*/
|
||||
sendRoleStart(): void {
|
||||
this.initSSE();
|
||||
|
||||
const chunk: OpenAIStreamChunk = {
|
||||
id: this.messageId,
|
||||
object: 'chat.completion.chunk',
|
||||
created: this.created,
|
||||
model: this.model,
|
||||
choices: [{
|
||||
index: 0,
|
||||
delta: { role: 'assistant' },
|
||||
finish_reason: null,
|
||||
}],
|
||||
};
|
||||
|
||||
this.writeChunk(chunk);
|
||||
}
|
||||
|
||||
/**
|
||||
* 发送完成标识
|
||||
*/
|
||||
sendComplete(usage?: { promptTokens: number; completionTokens: number; totalTokens: number }): void {
|
||||
this.initSSE();
|
||||
|
||||
const chunk: OpenAIStreamChunk = {
|
||||
id: this.messageId,
|
||||
object: 'chat.completion.chunk',
|
||||
created: this.created,
|
||||
model: this.model,
|
||||
choices: [{
|
||||
index: 0,
|
||||
delta: {},
|
||||
finish_reason: 'stop',
|
||||
}],
|
||||
usage: usage ? {
|
||||
prompt_tokens: usage.promptTokens,
|
||||
completion_tokens: usage.completionTokens,
|
||||
total_tokens: usage.totalTokens,
|
||||
} : undefined,
|
||||
};
|
||||
|
||||
this.writeChunk(chunk);
|
||||
|
||||
// 发送 [DONE] 标识
|
||||
this.reply.raw.write('data: [DONE]\n\n');
|
||||
logger.debug('[OpenAIStreamAdapter] 流式响应完成');
|
||||
}
|
||||
|
||||
/**
|
||||
* 发送错误
|
||||
*/
|
||||
sendError(error: Error | string): void {
|
||||
this.initSSE();
|
||||
|
||||
const errorMessage = typeof error === 'string' ? error : error.message;
|
||||
|
||||
const errorChunk = {
|
||||
error: {
|
||||
message: errorMessage,
|
||||
type: 'server_error',
|
||||
code: 'internal_error',
|
||||
},
|
||||
};
|
||||
|
||||
this.reply.raw.write(`data: ${JSON.stringify(errorChunk)}\n\n`);
|
||||
this.reply.raw.write('data: [DONE]\n\n');
|
||||
|
||||
logger.error('[OpenAIStreamAdapter] 流式响应错误', { error: errorMessage });
|
||||
}
|
||||
|
||||
/**
|
||||
* 结束流
|
||||
*/
|
||||
end(): void {
|
||||
if (this.isHeaderSent) {
|
||||
this.reply.raw.end();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取消息 ID
|
||||
*/
|
||||
getMessageId(): string {
|
||||
return this.messageId;
|
||||
}
|
||||
|
||||
/**
|
||||
* 写入 Chunk
|
||||
*/
|
||||
private writeChunk(chunk: OpenAIStreamChunk): void {
|
||||
this.reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建 OpenAI 流式适配器
|
||||
*/
|
||||
export function createOpenAIStreamAdapter(
|
||||
reply: FastifyReply,
|
||||
model?: string
|
||||
): OpenAIStreamAdapter {
|
||||
return new OpenAIStreamAdapter(reply, model);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user