update at 2025-10-14 21:52:11

This commit is contained in:
douboer
2025-10-14 21:52:11 +08:00
parent ac3ed480ab
commit 4f5eea604e
40 changed files with 15231 additions and 126 deletions

View File

@@ -1,4 +1,5 @@
import type { MCPServerConfig, ServerCapabilities, Tool, Resource, Prompt } from '../types';
import type { MCPServerConfig } from '../types';
import type { ServerCapabilities, Tool, Resource, Prompt } from '../types/index';
import { v4 as uuidv4 } from 'uuid';
import { SSETransport } from './SSETransport';
@@ -302,6 +303,18 @@ export class MCPClientService {
}
}
/**
* 获取服务器的工具列表
*/
getTools(serverId: string): Tool[] {
const serverInfo = this.clients.get(serverId);
if (!serverInfo) {
console.warn(`服务器 ${serverId} 未连接`);
return [];
}
return serverInfo.capabilities?.tools || [];
}
/**
* 获取提示
*/
@@ -361,7 +374,7 @@ export class MCPClientService {
return false;
}
const { client, config } = serverInfo;
const { client } = serverInfo;
try {
if (client.type === 'sse') {

View File

@@ -0,0 +1,931 @@
import type {
Topic,
Message,
Conversation,
SendMessageOptions,
StreamEvent,
TopicFilter
} from '../types/chat'
import { modelServiceManager } from './modelServiceManager'
import { MCPClientService } from './MCPClientService'
class ChatService {
private static instance: ChatService
private topics: Map<string, Topic> = new Map()
private conversations: Map<string, Conversation> = new Map()
private mcpClient: MCPClientService = new MCPClientService()
static getInstance(): ChatService {
if (!ChatService.instance) {
ChatService.instance = new ChatService()
}
return ChatService.instance
}
// ==================== 话题管理 ====================
/**
* 创建新话题
*/
createTopic(name: string, options?: {
description?: string
modelId?: string
}): Topic {
const topic: Topic = {
id: this.generateId(),
name: name || '新对话',
description: options?.description,
createdAt: new Date(),
updatedAt: new Date(),
messageCount: 0,
pinned: false,
archived: false,
favorite: false,
model: options?.modelId
}
this.topics.set(topic.id, topic)
this.saveTopics()
// 创建对应的对话
const conversation: Conversation = {
id: this.generateId(),
topicId: topic.id,
messages: [],
createdAt: new Date(),
updatedAt: new Date(),
metadata: {
model: options?.modelId
}
}
this.conversations.set(conversation.id, conversation)
this.saveConversations()
return topic
}
/**
* 获取所有话题
*/
getTopics(filter?: TopicFilter): Topic[] {
let topics = Array.from(this.topics.values())
if (filter) {
if (filter.search) {
const search = filter.search.toLowerCase()
topics = topics.filter(t =>
t.name.toLowerCase().includes(search) ||
t.description?.toLowerCase().includes(search) ||
t.lastMessage?.toLowerCase().includes(search)
)
}
if (filter.pinned !== undefined) {
topics = topics.filter(t => t.pinned === filter.pinned)
}
if (filter.archived !== undefined) {
topics = topics.filter(t => t.archived === filter.archived)
}
if (filter.favorite !== undefined) {
topics = topics.filter(t => t.favorite === filter.favorite)
}
}
// 排序:置顶 > 更新时间
return topics.sort((a, b) => {
if (a.pinned !== b.pinned) return a.pinned ? -1 : 1
return b.updatedAt.getTime() - a.updatedAt.getTime()
})
}
/**
* 获取单个话题
*/
getTopic(topicId: string): Topic | undefined {
return this.topics.get(topicId)
}
/**
* 更新话题
*/
updateTopic(topicId: string, updates: Partial<Topic>): Topic | undefined {
const topic = this.topics.get(topicId)
if (!topic) return undefined
Object.assign(topic, updates, {
updatedAt: new Date()
})
this.topics.set(topicId, topic)
this.saveTopics()
return topic
}
/**
* 删除话题
*/
deleteTopic(topicId: string): boolean {
const deleted = this.topics.delete(topicId)
if (deleted) {
// 删除关联的对话
for (const [convId, conv] of this.conversations) {
if (conv.topicId === topicId) {
this.conversations.delete(convId)
}
}
this.saveTopics()
this.saveConversations()
}
return deleted
}
/**
* 切换话题置顶状态
*/
toggleTopicPin(topicId: string): boolean {
const topic = this.topics.get(topicId)
if (!topic) return false
topic.pinned = !topic.pinned
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
return topic.pinned
}
/**
* 切换话题收藏状态
*/
toggleTopicFavorite(topicId: string): boolean {
const topic = this.topics.get(topicId)
if (!topic) return false
topic.favorite = !topic.favorite
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
return topic.favorite
}
/**
* 归档话题
*/
archiveTopic(topicId: string): boolean {
const topic = this.topics.get(topicId)
if (!topic) return false
topic.archived = true
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
return true
}
// ==================== 消息管理 ====================
/**
* 获取话题的所有消息
*/
getMessages(topicId: string): Message[] {
for (const conv of this.conversations.values()) {
if (conv.topicId === topicId) {
return conv.messages
}
}
return []
}
/**
* 发送消息
*/
async sendMessage(options: SendMessageOptions): Promise<Message> {
const { topicId, content, role = 'user', model } = options
// 查找对话
let conversation: Conversation | undefined
for (const conv of this.conversations.values()) {
if (conv.topicId === topicId) {
conversation = conv
break
}
}
if (!conversation) {
throw new Error('对话不存在')
}
// 创建用户消息
const userMessage: Message = {
id: this.generateId(),
role,
content,
status: 'success',
timestamp: new Date()
}
conversation.messages.push(userMessage)
conversation.updatedAt = new Date()
// 更新话题
const topic = this.topics.get(topicId)
if (topic) {
topic.messageCount = conversation.messages.length
topic.lastMessage = this.getMessagePreview(content)
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
}
this.conversations.set(conversation.id, conversation)
this.saveConversations()
// 如果不是用户消息,直接返回
if (role !== 'user') {
return userMessage
}
// 创建助手消息占位符
const assistantMessage: Message = {
id: this.generateId(),
role: 'assistant',
content: '',
status: 'sending',
timestamp: new Date(),
model: model || conversation.metadata?.model
}
conversation.messages.push(assistantMessage)
this.conversations.set(conversation.id, conversation)
try {
// 调用 AI 模型
const response = await this.callModel(conversation, model)
// 更新助手消息
assistantMessage.content = response.content
assistantMessage.status = 'success'
assistantMessage.tokens = response.tokens
conversation.updatedAt = new Date()
this.conversations.set(conversation.id, conversation)
this.saveConversations()
// 更新话题
if (topic) {
topic.messageCount = conversation.messages.length
topic.lastMessage = this.getMessagePreview(response.content)
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
}
return assistantMessage
} catch (error) {
assistantMessage.status = 'error'
assistantMessage.error = error instanceof Error ? error.message : '发送失败'
this.conversations.set(conversation.id, conversation)
this.saveConversations()
throw error
}
}
/**
* 流式发送消息
*/
async sendMessageStream(
options: SendMessageOptions,
onChunk: (event: StreamEvent) => void,
mcpServerId?: string // 新增:可选的 MCP 服务器 ID
): Promise<void> {
const { topicId, content, role = 'user', model } = options
// 查找对话
let conversation: Conversation | undefined
for (const conv of this.conversations.values()) {
if (conv.topicId === topicId) {
conversation = conv
break
}
}
if (!conversation) {
throw new Error('对话不存在')
}
// 创建用户消息
const userMessage: Message = {
id: this.generateId(),
role,
content,
status: 'success',
timestamp: new Date()
}
conversation.messages.push(userMessage)
conversation.updatedAt = new Date()
this.conversations.set(conversation.id, conversation)
this.saveConversations()
// 更新话题(用户消息)
const topic = this.topics.get(topicId)
if (topic) {
topic.messageCount = conversation.messages.length
topic.lastMessage = this.getMessagePreview(content)
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
}
// 创建助手消息
const assistantMessage: Message = {
id: this.generateId(),
role: 'assistant',
content: '',
status: 'sending',
timestamp: new Date(),
model: model || conversation.metadata?.model
}
conversation.messages.push(assistantMessage)
conversation.updatedAt = new Date()
this.conversations.set(conversation.id, conversation)
this.saveConversations()
// 再次更新话题计数
if (topic) {
topic.messageCount = conversation.messages.length
this.topics.set(topicId, topic)
this.saveTopics()
}
onChunk({ type: 'start', messageId: assistantMessage.id })
try {
// 调用流式 API
await this.callModelStream(
conversation,
model,
(chunk) => {
assistantMessage.content += chunk
conversation.updatedAt = new Date()
this.conversations.set(conversation.id, conversation)
this.saveConversations()
onChunk({ type: 'delta', content: chunk, messageId: assistantMessage.id })
},
mcpServerId // 传递 MCP 服务器 ID
)
assistantMessage.status = 'success'
conversation.updatedAt = new Date()
this.conversations.set(conversation.id, conversation)
this.saveConversations()
onChunk({ type: 'end', messageId: assistantMessage.id })
// 更新话题(完成)
if (topic) {
topic.messageCount = conversation.messages.length
topic.lastMessage = this.getMessagePreview(assistantMessage.content)
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
}
} catch (error) {
assistantMessage.status = 'error'
assistantMessage.error = error instanceof Error ? error.message : '发送失败'
this.saveConversations()
onChunk({
type: 'error',
error: assistantMessage.error,
messageId: assistantMessage.id
})
}
}
/**
* 删除消息
*/
deleteMessage(topicId: string, messageId: string): boolean {
for (const conv of this.conversations.values()) {
if (conv.topicId === topicId) {
const index = conv.messages.findIndex(m => m.id === messageId)
if (index !== -1) {
conv.messages.splice(index, 1)
conv.updatedAt = new Date()
this.conversations.set(conv.id, conv)
this.saveConversations()
// 更新话题
const topic = this.topics.get(topicId)
if (topic) {
topic.messageCount = conv.messages.length
if (conv.messages.length > 0) {
const lastMsg = conv.messages[conv.messages.length - 1]
topic.lastMessage = this.getMessagePreview(lastMsg.content)
} else {
topic.lastMessage = undefined
}
topic.updatedAt = new Date()
this.topics.set(topicId, topic)
this.saveTopics()
}
return true
}
}
}
return false
}
/**
* 重新生成消息
*/
async regenerateMessage(topicId: string, messageId: string): Promise<Message> {
// 找到要重新生成的消息
let conversation: Conversation | undefined
let messageIndex = -1
for (const conv of this.conversations.values()) {
if (conv.topicId === topicId) {
conversation = conv
messageIndex = conv.messages.findIndex(m => m.id === messageId)
if (messageIndex !== -1) break
}
}
if (!conversation || messageIndex === -1) {
throw new Error('消息不存在')
}
// 删除该消息之后的所有消息
conversation.messages.splice(messageIndex)
// 获取最后一条用户消息
let lastUserMessage: Message | undefined
for (let i = conversation.messages.length - 1; i >= 0; i--) {
if (conversation.messages[i].role === 'user') {
lastUserMessage = conversation.messages[i]
break
}
}
if (!lastUserMessage) {
throw new Error('没有找到用户消息')
}
// 重新发送
return await this.sendMessage({
topicId,
content: lastUserMessage.content,
model: conversation.metadata?.model
})
}
// ==================== 私有方法 ====================
/**
* 调用模型
*/
private async callModel(
conversation: Conversation,
model?: string
): Promise<{ content: string; tokens?: any }> {
const callModelStartTime = performance.now()
console.log('⏱️ [callModel] 开始处理', { model, 对话消息数: conversation.messages.length })
// 准备消息历史
const beforePrepare = performance.now()
const messages = conversation.messages
.filter(m => m.status === 'success')
.map(m => ({
role: m.role,
content: m.content
}))
const afterPrepare = performance.now()
console.log('⏱️ [callModel] 准备消息耗时:', (afterPrepare - beforePrepare).toFixed(2), 'ms', '处理后消息数:', messages.length)
// 获取已连接的服务 - 从 modelServiceManager 获取
const allServices = modelServiceManager.getAllServices()
console.log('🔍 [callModel] 所有服务:', allServices.map(s => ({
name: s.name,
status: s.status,
models: s.models?.length || 0
})))
const services = allServices.filter(s => s.status === 'connected')
console.log('🔍 [callModel] 已连接的服务:', services.length, '个')
if (services.length === 0) {
console.error('❌ [callModel] 没有已连接的服务!')
console.error('📋 [callModel] 请检查:')
console.error(' 1. 是否在"模型服务"中添加了服务?')
console.error(' 2. 服务是否已启用(enabled=true)?')
console.error(' 3. 服务是否有可用的模型列表?')
console.error(' 4. localStorage中的数据:', localStorage.getItem('model-providers'))
throw new Error('没有可用的模型服务,请先在"模型服务"中添加并连接服务')
}
let service = services[0] // 默认使用第一个可用服务
let selectedModel = model || service.models?.[0] || 'default'
// 如果指定了模型,尝试找到拥有该模型的服务
if (model) {
const foundService = services.find(s =>
s.models && s.models.includes(model)
)
if (foundService) {
service = foundService
selectedModel = model
} else {
console.warn(`⚠️ 未找到包含模型 "${model}" 的服务,使用默认服务`)
}
}
console.log('🔍 [callModel] 使用服务:', service.name, '模型:', selectedModel)
// 调用服务
const beforeServiceCall = performance.now()
const result = await modelServiceManager.sendChatRequest(
service.id,
messages,
selectedModel
)
const afterServiceCall = performance.now()
console.log('⏱️ [callModel] 服务调用耗时:', (afterServiceCall - beforeServiceCall).toFixed(2), 'ms')
if (!result.success) {
throw new Error(result.error || '请求失败')
}
// 解析响应
const beforeParse = performance.now()
const parsedContent = this.parseModelResponse(result.data)
const afterParse = performance.now()
console.log('⏱️ [callModel] 解析响应耗时:', (afterParse - beforeParse).toFixed(2), 'ms')
console.log('⏱️ [callModel] callModel总耗时:', (afterParse - callModelStartTime).toFixed(2), 'ms')
return {
content: parsedContent,
tokens: result.data?.usage
}
}
/**
* 流式调用模型
*/
private async callModelStream(
conversation: Conversation,
model: string | undefined,
onChunk: (chunk: string) => void,
mcpServerId?: string // 可选的 MCP 服务器 ID
): Promise<void> {
const streamStartTime = performance.now()
console.log('⏱️ [callModelStream] 开始真流式处理')
// 获取 MCP 工具列表(如果选择了 MCP 服务器)
let tools: any[] = []
if (mcpServerId) {
console.log('🔧 [callModelStream] 获取 MCP 服务器工具:', mcpServerId)
const mcpTools = this.mcpClient.getTools(mcpServerId)
tools = this.convertToolsToOpenAIFormat(mcpTools)
console.log('🔧 [callModelStream] 转换后的工具:', tools.length, '个')
}
// 准备消息历史
const messages = conversation.messages
.filter(m => m.status === 'success')
.map(m => ({
role: m.role,
content: m.content
}))
// 获取已连接的服务
const allServices = modelServiceManager.getAllServices()
const services = allServices.filter(s => s.status === 'connected')
if (services.length === 0) {
throw new Error('没有可用的模型服务,请先在"模型服务"中添加并连接服务')
}
let service = services[0]
let selectedModel = model || service.models?.[0] || 'default'
// 如果指定了模型,尝试找到拥有该模型的服务
if (model) {
const foundService = services.find(s =>
s.models && s.models.includes(model)
)
if (foundService) {
service = foundService
selectedModel = model
}
}
console.log('🔍 [callModelStream] 使用流式服务:', service.name, '模型:', selectedModel)
console.log('🚀 [callModelStream] === 开始真正的流式请求 ===')
// 调用真正的流式API
const beforeStreamCall = performance.now()
let chunkCount = 0
let buffer = '' // 缓冲区,用于批量输出
const BATCH_SIZE = 3 // 每3个字符输出一次,增强流式效果
const result = await modelServiceManager.sendChatRequestStream(
service.id,
messages,
selectedModel,
(chunk) => {
// 实时输出,但批量处理增强视觉效果
chunkCount++
if (chunkCount === 1) {
const firstChunkTime = performance.now()
console.log('⚡ [callModelStream] 首字延迟:', (firstChunkTime - beforeStreamCall).toFixed(2), 'ms')
}
// 累积到缓冲区
buffer += chunk
// 当缓冲区达到批量大小时输出
if (buffer.length >= BATCH_SIZE) {
const output = buffer
buffer = ''
onChunk(output)
}
},
tools.length > 0 ? tools : undefined
)
// 输出剩余的缓冲区内容
if (buffer.length > 0) {
onChunk(buffer)
}
const afterStreamCall = performance.now()
console.log('🚀 [callModelStream] 流式请求完成,收到块数:', chunkCount)
console.log('⏱️ [callModelStream] 流式调用总耗时:', (afterStreamCall - beforeStreamCall).toFixed(2), 'ms')
if (!result.success) {
throw new Error(result.error || '流式请求失败')
}
// 处理工具调用
if (result.data?.toolCalls && result.data.toolCalls.length > 0 && mcpServerId) {
console.log('🔧 [callModelStream] 开始执行工具调用')
await this.executeToolCalls(conversation, result.data.toolCalls, mcpServerId, model, onChunk)
}
const endTime = performance.now()
console.log('⏱️ [callModelStream] 真流式总耗时:', (endTime - streamStartTime).toFixed(2), 'ms')
} /**
* 解析模型响应
*/
private parseModelResponse(data: any, _serviceType?: string): string {
if (!data) return ''
// OpenAI 格式
if (data.choices && data.choices[0]?.message?.content) {
return data.choices[0].message.content
}
// Claude 格式
if (data.content && Array.isArray(data.content)) {
return data.content
.filter((c: any) => c.type === 'text')
.map((c: any) => c.text)
.join('')
}
// Gemini 格式
if (data.candidates && data.candidates[0]?.content?.parts) {
return data.candidates[0].content.parts
.map((p: any) => p.text)
.join('')
}
// 通用格式
if (typeof data === 'string') return data
if (data.content) return data.content
if (data.text) return data.text
if (data.message) return data.message
return JSON.stringify(data)
}
/**
* 获取消息预览
*/
private getMessagePreview(content: string, maxLength = 50): string {
if (!content) return ''
const text = content.replace(/\n/g, ' ').trim()
return text.length > maxLength ? text.slice(0, maxLength) + '...' : text
}
/**
* 生成唯一 ID
*/
private generateId(): string {
return `${Date.now()}_${Math.random().toString(36).substr(2, 9)}`
}
// ==================== 持久化 ====================
private saveTopics(): void {
try {
const data = Array.from(this.topics.values())
localStorage.setItem('chat-topics', JSON.stringify(data))
} catch (error) {
console.error('保存话题失败:', error)
}
}
private loadTopics(): void {
try {
const data = localStorage.getItem('chat-topics')
if (data) {
const topics = JSON.parse(data) as Topic[]
topics.forEach(topic => {
// 恢复 Date 对象
topic.createdAt = new Date(topic.createdAt)
topic.updatedAt = new Date(topic.updatedAt)
this.topics.set(topic.id, topic)
})
}
} catch (error) {
console.error('加载话题失败:', error)
}
}
private saveConversations(): void {
try {
const data = Array.from(this.conversations.values())
localStorage.setItem('chat-conversations', JSON.stringify(data))
} catch (error) {
console.error('保存对话失败:', error)
}
}
private loadConversations(): void {
try {
const data = localStorage.getItem('chat-conversations')
if (data) {
const conversations = JSON.parse(data) as Conversation[]
conversations.forEach(conv => {
// 恢复 Date 对象
conv.createdAt = new Date(conv.createdAt)
conv.updatedAt = new Date(conv.updatedAt)
conv.messages.forEach(msg => {
msg.timestamp = new Date(msg.timestamp)
})
this.conversations.set(conv.id, conv)
})
}
} catch (error) {
console.error('加载对话失败:', error)
}
}
/**
* 初始化
*/
initialize(): void {
this.loadTopics()
this.loadConversations()
// 如果没有话题,创建默认话题
if (this.topics.size === 0) {
this.createTopic('欢迎使用', {
description: '开始你的第一次对话'
})
}
}
/**
* 将 MCP 工具转换为 OpenAI 函数调用格式
*/
private convertToolsToOpenAIFormat(mcpTools: any[]): any[] {
return mcpTools.map(tool => ({
type: 'function',
function: {
name: tool.name,
description: tool.description || '',
parameters: tool.inputSchema || {
type: 'object',
properties: {},
required: []
}
}
}))
}
/**
* 执行工具调用并将结果返回给 AI
*/
private async executeToolCalls(
conversation: Conversation,
toolCalls: any[],
mcpServerId: string,
model: string | undefined,
onChunk: (chunk: string) => void
): Promise<void> {
console.log('🔧 [executeToolCalls] 执行', toolCalls.length, '个工具调用')
// 添加工具调用信息到消息中
const toolCallMessage = {
role: 'assistant' as const,
content: '',
tool_calls: toolCalls
}
// 执行每个工具调用
const toolResults = []
for (const toolCall of toolCalls) {
try {
const functionName = toolCall.function.name
const functionArgs = JSON.parse(toolCall.function.arguments)
console.log(`🔧 [executeToolCalls] 调用工具: ${functionName}`, functionArgs)
onChunk(`\n\n🔧 正在调用工具: ${functionName}...\n`)
const result = await this.mcpClient.callTool(mcpServerId, functionName, functionArgs)
console.log(`✅ [executeToolCalls] 工具调用成功: ${functionName}`, result)
onChunk(`✅ 工具执行完成\n`)
toolResults.push({
tool_call_id: toolCall.id,
role: 'tool',
name: functionName,
content: JSON.stringify(result)
})
} catch (error) {
console.error(`❌ [executeToolCalls] 工具调用失败:`, error)
const errorMsg = error instanceof Error ? error.message : '未知错误'
onChunk(`❌ 工具执行失败: ${errorMsg}\n`)
toolResults.push({
tool_call_id: toolCall.id,
role: 'tool',
name: toolCall.function.name,
content: JSON.stringify({ error: errorMsg })
})
}
}
// 将工具调用和结果添加到消息历史
const messages = conversation.messages
.filter(m => m.status === 'success')
.map(m => ({
role: m.role,
content: m.content
}))
messages.push(toolCallMessage as any)
messages.push(...(toolResults as any[]))
// 获取已连接的服务
const allServices = modelServiceManager.getAllServices()
const services = allServices.filter(s => s.status === 'connected')
if (services.length === 0) {
throw new Error('没有可用的模型服务')
}
let service = services[0]
let selectedModel = model || service.models?.[0] || 'default'
if (model) {
const foundService = services.find(s =>
s.models && s.models.includes(model)
)
if (foundService) {
service = foundService
selectedModel = model
}
}
// 向 AI 发送工具结果,获取最终回复
console.log('🤖 [executeToolCalls] 将工具结果发送给 AI')
onChunk('\n\n🤖 正在生成回复...\n')
await modelServiceManager.sendChatRequestStream(
service.id,
messages,
selectedModel,
onChunk
)
}
/**
* 获取所有服务(供外部使用)
*/
getAllServices() {
return modelServiceManager.getAllServices()
}
}
export const chatService = ChatService.getInstance()

View File

@@ -0,0 +1,954 @@
export interface ModelService {
id: string
name: string
type: 'openai' | 'claude' | 'gemini' | 'azure' | 'local' | 'dashscope' | 'volcengine' | 'custom'
url: string
apiKey: string
status: 'connected' | 'disconnected' | 'connecting' | 'error'
models?: string[]
lastUsed?: Date
customConfig?: string
errorMessage?: string
}
export interface ApiResponse<T = any> {
success: boolean
data?: T
error?: string
}
export class ModelServiceManager {
private services: Map<string, ModelService> = new Map()
private static instance: ModelServiceManager
static getInstance(): ModelServiceManager {
if (!ModelServiceManager.instance) {
ModelServiceManager.instance = new ModelServiceManager()
// 自动加载保存的服务
ModelServiceManager.instance.loadFromModelStore()
}
return ModelServiceManager.instance
}
// 从 modelStore (localStorage) 加载服务配置
loadFromModelStore(): void {
try {
const saved = localStorage.getItem('model-providers')
if (!saved) {
console.log('🔍 [loadFromModelStore] 没有找到保存的服务')
return
}
const providers = JSON.parse(saved)
console.log('🔍 [loadFromModelStore] 加载服务:', providers.length, '个')
providers.forEach((provider: any) => {
// 将 modelStore 的 provider 格式转换为 ModelService 格式
// 关键判断逻辑:
// 1. enabled === true (明确启用)
// 2. connected === true (已连接)
// 3. 如果两者都是 undefined,但有 apiKey,也认为是可用的
const isEnabled = provider.enabled === true || provider.connected === true
const hasApiKey = provider.apiKey && provider.apiKey.length > 0
const shouldConnect = isEnabled || (provider.enabled !== false && hasApiKey)
// 解析模型列表
let modelList: string[] = []
if (provider.models && Array.isArray(provider.models)) {
modelList = provider.models.map((m: any) =>
typeof m === 'string' ? m : (m.id || m.name || '')
).filter((m: string) => m.length > 0)
}
const service: ModelService = {
id: provider.id,
name: provider.name,
type: this.mapProviderType(provider.type),
url: provider.baseUrl || provider.url || '',
apiKey: provider.apiKey || '',
status: shouldConnect ? 'connected' : 'disconnected',
models: modelList
}
this.services.set(service.id, service)
console.log('🔍 [loadFromModelStore] 添加服务:', {
name: service.name,
enabled: provider.enabled,
connected: provider.connected,
hasApiKey,
shouldConnect,
status: service.status,
模型数: service.models?.length,
前3个模型: service.models?.slice(0, 3)
})
})
} catch (error) {
console.error('❌ [loadFromModelStore] 加载失败:', error)
}
}
// 映射 provider type 到 service type
private mapProviderType(type: string): ModelService['type'] {
const map: Record<string, ModelService['type']> = {
'openai': 'openai',
'claude': 'claude',
'google': 'gemini',
'ollama': 'local',
'volcengine': 'volcengine', // 火山引擎
'dashscope': 'dashscope', // 阿里云通义千问
'azure': 'azure',
'local': 'local',
'custom': 'custom'
}
const mapped = map[type] || 'custom'
console.log('🔍 [mapProviderType]', type, '→', mapped)
return mapped
}
// 测试服务连接
async testConnection(service: ModelService): Promise<ApiResponse<{ models: string[] }>> {
try {
const models = await this.fetchModels(service)
return {
success: true,
data: { models }
}
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : '连接失败'
}
}
}
// 测试服务连接(用于预定义模型列表的服务)
private async testServiceConnection(service: ModelService): Promise<void> {
const headers: HeadersInit = {
'Content-Type': 'application/json'
}
// 设置认证头
switch (service.type) {
case 'volcengine':
case 'openai':
case 'local':
case 'dashscope':
headers['Authorization'] = `Bearer ${service.apiKey}`
break
case 'claude':
headers['x-api-key'] = service.apiKey
headers['anthropic-version'] = '2023-06-01'
break
}
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 10000)
try {
// 发送一个简单的测试请求
const testUrl = `${service.url}/chat/completions`
const response = await fetch(testUrl, {
method: 'POST',
headers,
signal: controller.signal,
body: JSON.stringify({
model: service.type === 'volcengine' ? 'doubao-lite-4k' : 'test',
messages: [{ role: 'user', content: 'hi' }],
max_tokens: 1
})
})
clearTimeout(timeoutId)
// 只要不是认证错误就算通过
if (response.status === 401 || response.status === 403) {
const errorText = await response.text()
throw new Error(`认证失败: ${errorText}`)
}
} catch (error) {
clearTimeout(timeoutId)
if (error instanceof Error) {
if (error.name === 'AbortError') {
throw new Error('连接超时')
}
throw error
}
throw new Error('连接测试失败')
}
}
// 获取可用模型列表
private async fetchModels(service: ModelService): Promise<string[]> {
// 某些服务使用预定义模型列表,不需要 API 调用
const url = this.getModelsEndpoint(service)
if (!url) {
// 对于使用预定义模型列表的服务,发送一个测试请求验证连接
await this.testServiceConnection(service)
// 返回预定义模型列表
return this.parseModelsResponse({}, service.type)
}
const headers: HeadersInit = {
'Content-Type': 'application/json'
}
// 根据服务类型设置认证头
switch (service.type) {
case 'openai':
case 'local':
case 'dashscope':
case 'volcengine':
headers['Authorization'] = `Bearer ${service.apiKey}`
break
case 'claude':
headers['x-api-key'] = service.apiKey
headers['anthropic-version'] = '2023-06-01'
break
case 'gemini':
// Gemini使用URL参数传递API密钥
break
case 'azure':
headers['api-key'] = service.apiKey
break
case 'custom':
// 解析自定义配置
try {
const config = JSON.parse(service.customConfig || '{}')
Object.assign(headers, config.headers || {})
} catch (e) {
console.warn('自定义配置解析失败:', e)
}
break
}
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 10000) // 10秒超时
try {
const response = await fetch(url, {
method: 'GET',
headers,
signal: controller.signal
})
clearTimeout(timeoutId)
if (!response.ok) {
const errorText = await response.text()
throw new Error(`HTTP ${response.status}: ${errorText}`)
}
const data = await response.json()
return this.parseModelsResponse(data, service.type)
} catch (error) {
clearTimeout(timeoutId)
if (error instanceof Error) {
if (error.name === 'AbortError') {
throw new Error('连接超时')
}
throw error
}
throw new Error('未知错误')
}
}
// 获取模型列表API端点
private getModelsEndpoint(service: ModelService): string {
switch (service.type) {
case 'openai':
case 'local':
return `${service.url}/models`
case 'dashscope':
// 阿里云 DashScope 使用 /models 端点
return `${service.url}/models`
case 'volcengine':
// 火山引擎使用预定义模型列表API 不提供 /models 端点)
return ''
case 'claude':
// Claude API 没有公开的模型列表端点,返回预定义模型
return ''
case 'gemini':
return `${service.url}/models?key=${service.apiKey}`
case 'azure':
// Azure OpenAI 使用不同的端点格式
const azureUrl = service.url.replace(/\/$/, '')
return `${azureUrl}/openai/deployments?api-version=2023-12-01-preview`
case 'custom':
return `${service.url}/models`
default:
return `${service.url}/models`
}
}
// 解析不同服务的模型响应
private parseModelsResponse(data: any, type: string): string[] {
switch (type) {
case 'openai':
case 'local':
if (data.data && Array.isArray(data.data)) {
return data.data.map((model: any) => model.id).filter(Boolean)
}
break
case 'dashscope':
// 阿里云 DashScope 格式
if (data.data && Array.isArray(data.data)) {
return data.data.map((model: any) => model.id || model.model_id).filter(Boolean)
}
// 如果返回格式不同,尝试其他可能的格式
if (data.models && Array.isArray(data.models)) {
return data.models.map((model: any) => model.id || model.model_id || model.name).filter(Boolean)
}
break
case 'volcengine':
// 火山引擎推荐模型列表
// 参考: https://www.volcengine.com/docs/82379/1330310
return [
// DeepSeek-V3 系列 - 深度思考模型
'deepseek-v3-1-terminus', // DeepSeek V3.1 terminus版本
'deepseek-v3-1-250821', // DeepSeek V3.1 250821版本
// Doubao Seed 1.6 系列 - 深度思考模型(推荐)
'doubao-seed-1-6-vision-250815', // 多模态深度思考(图片+视频+GUI)
'doubao-seed-1-6-250615', // 纯文本深度思考
'doubao-seed-1-6-flash-250828', // 快速多模态深度思考
'doubao-seed-1-6-thinking-250715', // 纯思考模型
]
case 'claude':
// Claude 预定义模型列表
return [
'claude-3-5-sonnet-20241022',
'claude-3-haiku-20240307',
'claude-3-sonnet-20240229',
'claude-3-opus-20240229'
]
case 'gemini':
if (data.models && Array.isArray(data.models)) {
return data.models
.map((model: any) => model.name?.replace('models/', ''))
.filter(Boolean)
}
break
case 'azure':
if (data.data && Array.isArray(data.data)) {
return data.data.map((deployment: any) => deployment.id).filter(Boolean)
}
break
case 'custom':
// 尝试多种可能的响应格式
if (data.models && Array.isArray(data.models)) {
return data.models.map((m: any) => typeof m === 'string' ? m : m.id || m.name).filter(Boolean)
}
if (data.data && Array.isArray(data.data)) {
return data.data.map((m: any) => typeof m === 'string' ? m : m.id || m.name).filter(Boolean)
}
if (Array.isArray(data)) {
return data.map((m: any) => typeof m === 'string' ? m : m.id || m.name).filter(Boolean)
}
break
}
return []
}
// 发送聊天请求
async sendChatRequest(serviceId: string, messages: any[], model: string): Promise<ApiResponse<any>> {
const startTime = performance.now()
console.log('⏱️ [sendChatRequest] 开始请求', { serviceId, model, messages数量: messages.length })
const service = this.services.get(serviceId)
console.log('🔍 [sendChatRequest] serviceId:', serviceId, 'service:', service)
if (!service || service.status !== 'connected') {
return {
success: false,
error: '服务未连接'
}
}
// 检查URL是否有效
if (!service.url || !service.url.startsWith('http')) {
console.error('❌ [sendChatRequest] 无效的服务URL:', service.url)
return {
success: false,
error: `服务URL无效: ${service.url}`
}
}
try {
const beforeRequest = performance.now()
console.log('⏱️ [sendChatRequest] 准备耗时:', (beforeRequest - startTime).toFixed(2), 'ms')
const response = await this.makeChatRequest(service, messages, model)
const afterRequest = performance.now()
console.log('⏱️ [sendChatRequest] 请求耗时:', (afterRequest - beforeRequest).toFixed(2), 'ms')
console.log('⏱️ [sendChatRequest] 总耗时:', (afterRequest - startTime).toFixed(2), 'ms')
return {
success: true,
data: response
}
} catch (error) {
console.error('❌ [sendChatRequest] 请求异常:', error)
return {
success: false,
error: error instanceof Error ? error.message : '请求失败'
}
}
}
// 发送流式聊天请求
async sendChatRequestStream(
serviceId: string,
messages: any[],
model: string,
onChunk: (chunk: string) => void,
tools?: any[]
): Promise<ApiResponse<{ toolCalls?: any[] }>> {
const startTime = performance.now()
console.log('🚀🚀🚀 [sendChatRequestStream] === 进入流式请求方法 ===')
console.log('⏱️ [sendChatRequestStream] 开始流式请求', { serviceId, model, messages数量: messages.length })
const service = this.services.get(serviceId)
if (!service || service.status !== 'connected') {
return {
success: false,
error: '服务未连接'
}
}
if (!service.url || !service.url.startsWith('http')) {
return {
success: false,
error: `服务URL无效: ${service.url}`
}
}
try {
const toolCalls = await this.makeChatRequestStream(service, messages, model, onChunk, tools)
const endTime = performance.now()
console.log('⏱️ [sendChatRequestStream] 流式请求完成,总耗时:', (endTime - startTime).toFixed(2), 'ms')
return {
success: true,
data: { toolCalls }
}
} catch (error) {
console.error('❌ [sendChatRequestStream] 流式请求异常:', error)
return {
success: false,
error: error instanceof Error ? error.message : '流式请求失败'
}
}
}
// 实际的聊天请求
private async makeChatRequest(service: ModelService, messages: any[], model: string): Promise<any> {
const requestStartTime = performance.now()
const headers: HeadersInit = {
'Content-Type': 'application/json'
}
let url = ''
let body: any = {}
console.log('🔍 [makeChatRequest] 服务信息:', {
type: service.type,
name: service.name,
url: service.url,
model
})
switch (service.type) {
case 'openai':
case 'local':
headers['Authorization'] = `Bearer ${service.apiKey}`
url = `${service.url}/chat/completions`
body = {
model,
messages,
stream: false
}
break
case 'dashscope':
headers['Authorization'] = `Bearer ${service.apiKey}`
url = `${service.url}/chat/completions`
body = {
model,
messages,
stream: false
}
break
case 'volcengine':
headers['Authorization'] = `Bearer ${service.apiKey}`
url = `${service.url}/chat/completions`
body = {
model,
messages,
stream: false
}
break
case 'claude':
headers['x-api-key'] = service.apiKey
headers['anthropic-version'] = '2023-06-01'
url = `${service.url}/messages`
body = {
model,
messages: this.convertToClaudeFormat(messages),
max_tokens: 4096
}
break
case 'gemini':
url = `${service.url}/models/${model}:generateContent?key=${service.apiKey}`
body = {
contents: this.convertToGeminiFormat(messages)
}
break
case 'azure':
headers['api-key'] = service.apiKey
url = `${service.url}/openai/deployments/${model}/chat/completions?api-version=2023-12-01-preview`
body = {
messages,
stream: false
}
break
case 'custom':
try {
const config = JSON.parse(service.customConfig || '{}')
Object.assign(headers, config.headers || {})
} catch (e) {
console.warn('自定义配置解析失败:', e)
}
url = `${service.url}/chat/completions`
body = {
model,
messages,
stream: false
}
break
}
console.log('🔍 [makeChatRequest] 最终请求URL:', url)
console.log('🔍 [makeChatRequest] 请求体大小:', JSON.stringify(body).length, '字节')
const beforeFetch = performance.now()
console.log('⏱️ [makeChatRequest] 构建请求耗时:', (beforeFetch - requestStartTime).toFixed(2), 'ms')
// 添加30秒超时控制
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 30000)
try {
const response = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: controller.signal
})
clearTimeout(timeoutId)
const afterFetch = performance.now()
console.log('⏱️ [makeChatRequest] 网络请求耗时:', (afterFetch - beforeFetch).toFixed(2), 'ms')
console.log('🔍 [makeChatRequest] 响应状态:', response.status, response.statusText)
if (!response.ok) {
const errorText = await response.text()
console.error('❌ [makeChatRequest] 请求失败:', {
status: response.status,
statusText: response.statusText,
url,
errorText
})
throw new Error(`HTTP ${response.status}: ${errorText}`)
}
const beforeParse = performance.now()
const result = await response.json()
const afterParse = performance.now()
console.log('⏱️ [makeChatRequest] 解析响应耗时:', (afterParse - beforeParse).toFixed(2), 'ms')
console.log('⏱️ [makeChatRequest] makeChatRequest总耗时:', (afterParse - requestStartTime).toFixed(2), 'ms')
return result
} catch (error) {
clearTimeout(timeoutId)
if (error instanceof Error && error.name === 'AbortError') {
throw new Error('请求超时(30秒)')
}
throw error
}
}
// 流式聊天请求
private async makeChatRequestStream(
service: ModelService,
messages: any[],
model: string,
onChunk: (text: string) => void,
tools?: any[]
): Promise<any[] | undefined> {
const requestStartTime = performance.now()
const headers: HeadersInit = {
'Content-Type': 'application/json'
}
let url = ''
let body: any = {}
// 构建请求 (与非流式相同,但 stream: true)
switch (service.type) {
case 'openai':
case 'local':
case 'dashscope':
case 'volcengine':
headers['Authorization'] = `Bearer ${service.apiKey}`
url = `${service.url}/chat/completions`
body = {
model,
messages,
stream: true, // ← 启用流式
...(tools && tools.length > 0 ? { tools, tool_choice: 'auto' } : {})
}
break
case 'claude':
headers['x-api-key'] = service.apiKey
headers['anthropic-version'] = '2023-06-01'
url = `${service.url}/messages`
body = {
model,
messages: this.convertToClaudeFormat(messages),
max_tokens: 4096,
stream: true
}
break
case 'azure':
headers['api-key'] = service.apiKey
url = `${service.url}/openai/deployments/${model}/chat/completions?api-version=2023-12-01-preview`
body = {
messages,
stream: true
}
break
default:
url = `${service.url}/chat/completions`
body = {
model,
messages,
stream: true
}
break
}
console.log('🔍 [makeChatRequestStream] 流式请求URL:', url)
console.log('🔍 [makeChatRequestStream] 流式请求体大小:', JSON.stringify(body).length, '字节')
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 60000) // 流式请求60秒超时
try {
const beforeFetch = performance.now()
console.log('⏱️ [makeChatRequestStream] 构建请求耗时:', (beforeFetch - requestStartTime).toFixed(2), 'ms')
const response = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: controller.signal
})
clearTimeout(timeoutId)
if (!response.ok) {
const errorText = await response.text()
throw new Error(`HTTP ${response.status}: ${errorText}`)
}
const afterFetch = performance.now()
console.log('⏱️ [makeChatRequestStream] 首字节响应耗时:', (afterFetch - beforeFetch).toFixed(2), 'ms')
// 读取流
const reader = response.body?.getReader()
if (!reader) {
throw new Error('无法获取响应流')
}
console.log('🌊🌊🌊 [makeChatRequestStream] === 开始读取流数据 ===')
const decoder = new TextDecoder()
let buffer = ''
let chunkCount = 0
let totalChars = 0
const firstChunkTimeStart = performance.now()
let collectedToolCalls: any[] = []
const toolCallsMap = new Map<number, any>()
while (true) {
const { done, value } = await reader.read()
if (done) break
chunkCount++
if (chunkCount === 1) {
console.log('⚡⚡⚡ [makeChatRequestStream] 收到第一个数据块!耗时:', (performance.now() - firstChunkTimeStart).toFixed(2), 'ms')
}
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
for (const line of lines) {
if (line.trim() === '' || line.trim() === 'data: [DONE]') {
continue
}
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.slice(6))
const delta = data.choices?.[0]?.delta
// 处理普通内容
const content = delta?.content
if (content) {
totalChars += content.length
onChunk(content)
}
// 处理工具调用
if (delta?.tool_calls) {
for (const toolCall of delta.tool_calls) {
const index = toolCall.index
if (!toolCallsMap.has(index)) {
toolCallsMap.set(index, {
id: toolCall.id || '',
type: toolCall.type || 'function',
function: {
name: toolCall.function?.name || '',
arguments: ''
}
})
}
const existing = toolCallsMap.get(index)!
if (toolCall.function?.name) {
existing.function.name = toolCall.function.name
}
if (toolCall.function?.arguments) {
existing.function.arguments += toolCall.function.arguments
}
}
}
} catch (e) {
// 忽略解析错误
}
}
}
}
// 收集所有工具调用
if (toolCallsMap.size > 0) {
collectedToolCalls = Array.from(toolCallsMap.values())
console.log('🔧 [makeChatRequestStream] 检测到工具调用:', collectedToolCalls.length, '个')
}
const endTime = performance.now()
console.log('⏱️ [makeChatRequestStream] 流式接收完成')
console.log('⏱️ [makeChatRequestStream] 接收块数:', chunkCount, '总字符数:', totalChars)
console.log('⏱️ [makeChatRequestStream] 流式总耗时:', (endTime - requestStartTime).toFixed(2), 'ms')
return collectedToolCalls.length > 0 ? collectedToolCalls : undefined
} catch (error) {
clearTimeout(timeoutId)
if (error instanceof Error && error.name === 'AbortError') {
throw new Error('流式请求超时(60秒)')
}
throw error
}
}
// 转换消息格式为Claude格式
private convertToClaudeFormat(messages: any[]): any[] {
return messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
role: msg.role === 'assistant' ? 'assistant' : 'user',
content: msg.content
}))
}
// 转换消息格式为Gemini格式
private convertToGeminiFormat(messages: any[]): any[] {
return messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
role: msg.role === 'assistant' ? 'model' : 'user',
parts: [{ text: msg.content }]
}))
}
// 添加服务
addService(service: ModelService): void {
this.services.set(service.id, service)
}
// 更新服务
updateService(service: ModelService): void {
this.services.set(service.id, service)
}
// 删除服务
removeService(serviceId: string): void {
this.services.delete(serviceId)
}
// 获取服务
getService(serviceId: string): ModelService | undefined {
return this.services.get(serviceId)
}
// 获取所有服务
getAllServices(): ModelService[] {
return Array.from(this.services.values())
}
// 连接服务
async connectService(serviceId: string): Promise<void> {
const service = this.services.get(serviceId)
if (!service) throw new Error('服务不存在')
service.status = 'connecting'
try {
const result = await this.testConnection(service)
if (result.success && result.data) {
service.status = 'connected'
service.models = result.data.models
service.errorMessage = undefined
service.lastUsed = new Date()
} else {
service.status = 'error'
service.errorMessage = result.error
throw new Error(result.error)
}
} catch (error) {
service.status = 'error'
service.errorMessage = error instanceof Error ? error.message : '连接失败'
throw error
}
}
// 断开服务
disconnectService(serviceId: string): void {
const service = this.services.get(serviceId)
if (service) {
service.status = 'disconnected'
service.models = []
service.errorMessage = undefined
}
}
// 健康检测 - 测试单个模型是否可用
async testModelHealth(service: ModelService, modelId: string): Promise<{
modelId: string
available: boolean
latency?: number
error?: string
}> {
const startTime = Date.now()
try {
// 发送一个最小的测试请求
const result = await this.sendChatRequest(service.id, [
{ role: 'user', content: 'hi' }
], modelId)
if (!result.success) {
throw new Error(result.error || '测试失败')
}
const latency = Date.now() - startTime
return {
modelId,
available: true,
latency
}
} catch (error) {
return {
modelId,
available: false,
error: error instanceof Error ? error.message : '测试失败'
}
}
}
// 批量健康检测 - 测试所有模型
async healthCheckAllModels(
service: ModelService,
onProgress?: (current: number, total: number, modelId: string) => void
): Promise<{
availableModels: string[]
unavailableModels: string[]
results: Array<{
modelId: string
available: boolean
latency?: number
error?: string
}>
}> {
const models = service.models || []
const results: Array<{
modelId: string
available: boolean
latency?: number
error?: string
}> = []
for (let i = 0; i < models.length; i++) {
const modelId = models[i]
// 通知进度
if (onProgress) {
onProgress(i + 1, models.length, modelId)
}
// 测试模型健康状态
const result = await this.testModelHealth(service, modelId)
results.push(result)
// 添加小延迟避免过快请求
if (i < models.length - 1) {
await new Promise(resolve => setTimeout(resolve, 200))
}
}
// 统计结果
const availableModels = results.filter(r => r.available).map(r => r.modelId)
const unavailableModels = results.filter(r => !r.available).map(r => r.modelId)
return {
availableModels,
unavailableModels,
results
}
}
}
// 导出单例实例
export const modelServiceManager = ModelServiceManager.getInstance()