first commit
This commit is contained in:
237
src/server/LLMService.ts
Normal file
237
src/server/LLMService.ts
Normal file
@@ -0,0 +1,237 @@
|
||||
import OpenAI from 'openai';
|
||||
import type { LLMConfig, Tool } from '../types/index.js';
|
||||
|
||||
export class LLMService {
|
||||
private openai?: OpenAI;
|
||||
private config?: LLMConfig;
|
||||
|
||||
/**
|
||||
* 配置 LLM 服务
|
||||
*/
|
||||
configure(config: LLMConfig): void {
|
||||
this.config = config;
|
||||
|
||||
if (config.provider === 'openai' && config.apiKey) {
|
||||
this.openai = new OpenAI({
|
||||
apiKey: config.apiKey,
|
||||
baseURL: config.baseUrl
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据用户输入和工具模式生成参数
|
||||
*/
|
||||
async generateParameters(
|
||||
userInput: string,
|
||||
tool: Tool
|
||||
): Promise<Record<string, any>> {
|
||||
if (!this.openai || !this.config?.enabled) {
|
||||
throw new Error('LLM 服务未配置或未启用');
|
||||
}
|
||||
|
||||
const systemPrompt = this.buildParameterGenerationPrompt(tool);
|
||||
|
||||
try {
|
||||
const response = await this.openai.chat.completions.create({
|
||||
model: this.config.model,
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userInput }
|
||||
],
|
||||
temperature: this.config.temperature || 0.1,
|
||||
max_tokens: this.config.maxTokens || 1000,
|
||||
response_format: { type: 'json_object' }
|
||||
});
|
||||
|
||||
const content = response.choices[0]?.message?.content;
|
||||
if (!content) {
|
||||
throw new Error('LLM 未返回有效响应');
|
||||
}
|
||||
|
||||
return JSON.parse(content);
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
throw new Error(`LLM 调用失败: ${error.message}`);
|
||||
}
|
||||
throw new Error('LLM 调用失败: 未知错误');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 分析用户意图并选择合适的工具
|
||||
*/
|
||||
async analyzeIntent(
|
||||
userInput: string,
|
||||
availableTools: Tool[]
|
||||
): Promise<{
|
||||
selectedTool?: string;
|
||||
confidence: number;
|
||||
reasoning: string;
|
||||
suggestedParameters?: Record<string, any>;
|
||||
}> {
|
||||
if (!this.openai || !this.config?.enabled) {
|
||||
return {
|
||||
confidence: 0,
|
||||
reasoning: 'LLM 服务未启用,请手动选择工具'
|
||||
};
|
||||
}
|
||||
|
||||
const systemPrompt = this.buildIntentAnalysisPrompt(availableTools);
|
||||
|
||||
try {
|
||||
const response = await this.openai.chat.completions.create({
|
||||
model: this.config.model,
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: userInput }
|
||||
],
|
||||
temperature: this.config.temperature || 0.1,
|
||||
max_tokens: this.config.maxTokens || 1000,
|
||||
response_format: { type: 'json_object' }
|
||||
});
|
||||
|
||||
const content = response.choices[0]?.message?.content;
|
||||
if (!content) {
|
||||
return {
|
||||
confidence: 0,
|
||||
reasoning: 'LLM 分析失败'
|
||||
};
|
||||
}
|
||||
|
||||
return JSON.parse(content);
|
||||
} catch (error) {
|
||||
console.error('LLM 意图分析失败:', error);
|
||||
return {
|
||||
confidence: 0,
|
||||
reasoning: 'LLM 分析过程中出现错误'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 生成对话响应
|
||||
*/
|
||||
async generateResponse(
|
||||
userInput: string,
|
||||
context?: string
|
||||
): Promise<string> {
|
||||
if (!this.openai || !this.config?.enabled) {
|
||||
throw new Error('LLM 服务未配置或未启用');
|
||||
}
|
||||
|
||||
const messages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }> = [
|
||||
{
|
||||
role: 'system',
|
||||
content: `你是一个 MCP (Model Context Protocol) 客户端的智能助手。你可以帮助用户:
|
||||
1. 理解和使用各种 MCP 服务器提供的工具
|
||||
2. 分析工具执行结果并给出建议
|
||||
3. 协助配置 MCP 服务器
|
||||
|
||||
请用友好、专业的语调回复用户。${context ? `\n\n当前上下文:${context}` : ''}`
|
||||
},
|
||||
{ role: 'user', content: userInput }
|
||||
];
|
||||
|
||||
try {
|
||||
const response = await this.openai.chat.completions.create({
|
||||
model: this.config.model,
|
||||
messages,
|
||||
temperature: this.config.temperature || 0.7,
|
||||
max_tokens: this.config.maxTokens || 2000
|
||||
});
|
||||
|
||||
return response.choices[0]?.message?.content || '抱歉,我无法生成回复。';
|
||||
} catch (error) {
|
||||
console.error('生成对话回复失败:', error);
|
||||
throw new Error('生成回复失败,请稍后重试');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建参数生成提示
|
||||
*/
|
||||
private buildParameterGenerationPrompt(tool: Tool): string {
|
||||
const properties = tool.inputSchema?.properties || {};
|
||||
const required = tool.inputSchema?.required || [];
|
||||
|
||||
const propertiesDesc = Object.entries(properties)
|
||||
.map(([key, prop]: [string, any]) => {
|
||||
const isRequired = required.includes(key) ? ' (必需)' : ' (可选)';
|
||||
const typeInfo = prop.type ? `类型: ${prop.type}` : '';
|
||||
const enumInfo = prop.enum ? `可选值: ${prop.enum.join(', ')}` : '';
|
||||
const desc = prop.description || '无描述';
|
||||
|
||||
return `- ${key}${isRequired}: ${desc}${typeInfo ? ` | ${typeInfo}` : ''}${enumInfo ? ` | ${enumInfo}` : ''}`;
|
||||
})
|
||||
.join('\n');
|
||||
|
||||
return `你是一个参数生成助手。根据用户的输入,为工具 "${tool.name}" 生成合适的参数。
|
||||
|
||||
工具描述: ${tool.description || '无描述'}
|
||||
|
||||
参数说明:
|
||||
${propertiesDesc || '此工具无参数'}
|
||||
|
||||
要求:
|
||||
1. 仔细分析用户输入,理解其真实意图
|
||||
2. 为每个必需参数生成合理的值
|
||||
3. 为相关的可选参数也生成适当的值
|
||||
4. 如果无法确定某个参数的值,可以设置为 null 或合理的默认值
|
||||
5. 返回标准的 JSON 对象格式
|
||||
6. 确保生成的参数符合工具的要求
|
||||
|
||||
示例输出格式:
|
||||
{
|
||||
"parameter1": "value1",
|
||||
"parameter2": "value2",
|
||||
"parameter3": null
|
||||
}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建意图分析提示
|
||||
*/
|
||||
private buildIntentAnalysisPrompt(tools: Tool[]): string {
|
||||
const toolList = tools
|
||||
.map(tool => `- ${tool.name}: ${tool.description || '无描述'}`)
|
||||
.join('\n');
|
||||
|
||||
return `你是一个意图分析助手。分析用户输入,选择最合适的工具来完成用户的请求。
|
||||
|
||||
可用工具:
|
||||
${toolList || '暂无可用工具'}
|
||||
|
||||
分析要求:
|
||||
1. 仔细理解用户的真实意图和需求
|
||||
2. 从可用工具中选择最匹配的工具
|
||||
3. 评估匹配的置信度 (0-100,数字越高表示越确定)
|
||||
4. 提供详细的选择理由
|
||||
5. 如果适用,预生成一些参数建议
|
||||
|
||||
返回 JSON 格式:
|
||||
{
|
||||
"selectedTool": "最匹配的工具名称,如果没有合适的工具则为null",
|
||||
"confidence": 85,
|
||||
"reasoning": "选择这个工具的详细理由,或者为什么没有找到合适工具的原因",
|
||||
"suggestedParameters": {
|
||||
"param1": "建议的参数值",
|
||||
"param2": "另一个参数值"
|
||||
}
|
||||
}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* 检查服务是否可用
|
||||
*/
|
||||
isAvailable(): boolean {
|
||||
return !!(this.openai && this.config?.enabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取当前配置
|
||||
*/
|
||||
getConfig(): LLMConfig | undefined {
|
||||
return this.config;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user