Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions backend/app/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,10 @@ def assemble_db_connection(cls, v: str | None, values: dict[str, any]) -> str:
MINIMAX_API_KEY: Optional[str] = None
DOUBAO_API_KEY: Optional[str] = None
OLLAMA_BASE_URL: Optional[str] = "http://localhost:11434/v1"
SILICONFLOW_API_KEY: Optional[str] = None
SENSENOVA_API_KEY: Optional[str] = None
AIPING_API_KEY: Optional[str] = None
PPIO_API_KEY: Optional[str] = None

# GitHub配置
GITHUB_TOKEN: Optional[str] = None
Expand Down
2 changes: 2 additions & 0 deletions backend/app/services/llm/adapters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@
from .baidu_adapter import BaiduAdapter
from .minimax_adapter import MinimaxAdapter
from .doubao_adapter import DoubaoAdapter
from .lazyllm_adapter import LazyLLMAdapter

__all__ = [
"LiteLLMAdapter",
"BaiduAdapter",
"MinimaxAdapter",
"DoubaoAdapter",
"LazyLLMAdapter",
]
236 changes: 236 additions & 0 deletions backend/app/services/llm/adapters/lazyllm_adapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,236 @@
"""
LazyLLM 适配器
通过 LazyLLM 的 OnlineChatModule 提供统一的大模型接入服务

支持的提供商:
- 通过 LazyLLM 支持的所有在线模型提供商
- 包括: OpenAI, GLM, Qwen, Kimi, SenseNova, DeepSeek, Doubao, Siliconflow, PPIO, AIPing 等
"""

import os
import logging
import lazyllm
import asyncio
from ..base_adapter import BaseLLMAdapter
from ..types import (
LLMConfig,
LLMRequest,
LLMResponse,
LLMProvider,
LLMError,
)

logger = logging.getLogger(__name__)


class LazyLLMAdapter(BaseLLMAdapter):
"""LazyLLM 适配器

利用 LazyLLM 的 OnlineChatModule 提供统一的大模型接口
支持多种在线 LLM 提供商的无缝接入
"""

# DeepAudit Provider 到 LazyLLM source 的映射
PROVIDER_SOURCE_MAP = {
LLMProvider.OPENAI: "openai",
LLMProvider.GEMINI: "gemini",
LLMProvider.QWEN: "qwen",
LLMProvider.DEEPSEEK: "deepseek",
LLMProvider.ZHIPU: "glm",
LLMProvider.MOONSHOT: "kimi",
LLMProvider.DOUBAO: "doubao",
LLMProvider.SILICONFLOW: "siliconflow",
LLMProvider.MINIMAX: "minimax",
LLMProvider.SENSENOVA: "sensenova",
LLMProvider.PPIO: "ppio",
LLMProvider.AIPING: "aiping",
}

# API_KEY的格式转换
PROVIDER_ENV_MAP = {
LLMProvider.OPENAI: "OPENAI_API_KEY",
LLMProvider.GEMINI: "GEMINI_API_KEY",
LLMProvider.QWEN: "QWEN_API_KEY",
LLMProvider.DEEPSEEK: "DEEPSEEK_API_KEY",
LLMProvider.ZHIPU: "ZHIPU_API_KEY",
LLMProvider.MOONSHOT: "MOONSHOT_API_KEY",
LLMProvider.DOUBAO: "DOUBAO_API_KEY",
LLMProvider.SILICONFLOW: "SILICONFLOW_API_KEY",
LLMProvider.MINIMAX: "MINIMAX_API_KEY",
LLMProvider.SENSENOVA: "SENSENOVA_API_KEY",
LLMProvider.PPIO: "PPIO_API_KEY",
LLMProvider.AIPING: "AIPING_API_KEY",
}

def __init__(self, config: LLMConfig):
super().__init__(config)
self._lazyllm_module = None
self._source = self._get_lazyllm_source()

def _get_lazyllm_source(self) -> str:
"""获取 LazyLLM 的 source 名称"""
source = self.PROVIDER_SOURCE_MAP.get(self.config.provider)
if not source:
raise LLMError(
f"LazyLLM 不支持提供商: {self.config.provider}",
self.config.provider
)
return source

def _setup_environment(self):
"""设置 LazyLLM 所需的环境变量"""
env_key = f"LAZYLLM_{self._source.upper()}_API_KEY"
provider_env_key = self.PROVIDER_ENV_MAP.get(self.config.provider)
candidate_key = os.getenv(provider_env_key) if provider_env_key else None

if not candidate_key:
candidate_key = self.config.api_key

if candidate_key:
os.environ[env_key] = candidate_key

# SenseNova 额外 secret_key
if self.config.provider == LLMProvider.SENSENOVA:
headers = self.config.custom_headers or {}
secret_key = headers.get("secret_key") or os.getenv("SENSENOVA_SECRET_KEY")
if secret_key:
os.environ["LAZYLLM_SENSENOVA_SECRET_KEY"] = secret_key

def _get_lazyllm_module(self):
"""创建 LazyLLM OnlineChatModule 实例"""
if self._lazyllm_module is None:
try:
self._setup_environment()
# 创建 OnlineChatModule
kwargs = {
"source": self._source,
"stream": False, # 非流式输出
}

if self.config.base_url:
kwargs["base_url"] = self.config.base_url

if self.config.model:
kwargs["model"] = self.config.model
self._lazyllm_module = lazyllm.OnlineChatModule(**kwargs)
logger.info(f"LazyLLM 模块初始化成功: source={self._source}, model={self.config.model}")

except Exception as e:
raise LLMError(
f"LazyLLM 模块初始化失败: {str(e)}",
self.config.provider,
original_error=e
)

return self._lazyllm_module

async def complete(self, request: LLMRequest) -> LLMResponse:
"""执行 LLM 推理"""
try:
await self.validate_config()
return await self.retry(lambda: self._send_request(request))
except Exception as error:
api_response = getattr(error, 'api_response', None)
self.handle_error(error, "LazyLLM API 调用失败", api_response=api_response)

async def _send_request(self, request: LLMRequest) -> LLMResponse:
"""发送请求到 LazyLLM"""

module = self._get_lazyllm_module()

# 构建消息历史
# LazyLLM 期望的格式: 最后一条消息是 input,之前的是 history
messages = request.messages
if not messages:
raise LLMError("消息列表为空", self.config.provider)

# 分离最后一条用户消息和历史
user_input = messages[-1].content if messages[-1].role == "user" else ""

# 构建历史记录 (格式: [[user, assistant], [user, assistant], ...])
history = []
i = 0
while i < len(messages) - 1:
if messages[i].role == "user" and i + 1 < len(messages) and messages[i + 1].role == "assistant":
history.append([messages[i].content, messages[i + 1].content])
i += 2
else:
i += 1

# 准备调用参数
call_kwargs = {}

# 添加运行时参数(覆盖静态配置)
if request.temperature is not None:
call_kwargs["temperature"] = request.temperature
elif self.config.temperature is not None:
call_kwargs["temperature"] = self.config.temperature

if request.max_tokens is not None:
call_kwargs["max_tokens"] = request.max_tokens
elif self.config.max_tokens is not None:
call_kwargs["max_tokens"] = self.config.max_tokens

if request.top_p is not None:
call_kwargs["top_p"] = request.top_p
elif self.config.top_p is not None:
call_kwargs["top_p"] = self.config.top_p

# LazyLLM 的同步调用需要在 executor 中运行
def _call_lazyllm():
try:
result = module(
user_input,
llm_chat_history=history if history else None,
**call_kwargs
)
return result
except Exception as e:
logger.error(f"LazyLLM 调用失败: {e}")
raise

# 在线程池中执行同步调用
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, _call_lazyllm)

# 解析响应
if isinstance(result, str):
content = result
elif isinstance(result, dict):
content = result.get("content", str(result))
else:
content = str(result)

# 返回基本响应(不包含 usage 信息)
return LLMResponse(
content=content,
model=self.config.model,
usage=None, # LazyLLM 的 usage 记录在其内部系统中
finish_reason="stop"
)

async def validate_config(self) -> bool:
"""验证配置是否有效"""
await super().validate_config()

# 检查 LazyLLM 是否支持该提供商
if self.config.provider not in self.PROVIDER_SOURCE_MAP:
raise LLMError(
f"LazyLLM 适配器不支持提供商: {self.config.provider}",
self.config.provider
)

return True

@classmethod
def supports_provider(cls, provider: LLMProvider) -> bool:
"""检查是否支持指定的提供商"""
return provider in cls.PROVIDER_SOURCE_MAP

async def close(self):
"""关闭适配器"""
await super().close()
# LazyLLM 模块的清理(如果需要)
self._lazyllm_module = None


16 changes: 14 additions & 2 deletions backend/app/services/llm/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
BaiduAdapter,
MinimaxAdapter,
DoubaoAdapter,
LazyLLMAdapter,
)


Expand Down Expand Up @@ -48,7 +49,14 @@ def create_adapter(cls, config: LLMConfig) -> BaseLLMAdapter:

@classmethod
def _instantiate_adapter(cls, config: LLMConfig) -> BaseLLMAdapter:
"""根据提供商类型实例化适配器"""
"""根据提供商类型实例化适配器

优先级策略:
1. 必须使用原生适配器的提供商(API 格式特殊)
2. LiteLLM 支持的提供商
3. LazyLLM 支持的提供商
4. 不支持的提供商
"""
# 如果未指定模型,使用默认模型
if not config.model:
config.model = DEFAULT_MODELS.get(config.provider, "gpt-4o-mini")
Expand All @@ -57,9 +65,13 @@ def _instantiate_adapter(cls, config: LLMConfig) -> BaseLLMAdapter:
if config.provider in NATIVE_ONLY_PROVIDERS:
return cls._create_native_adapter(config)

# 其他提供商使用 LiteLLM
# 其他提供商优先使用 LiteLLM
if LiteLLMAdapter.supports_provider(config.provider):
return LiteLLMAdapter(config)

# 使用 LazyLLM 适配器
if LazyLLMAdapter.supports_provider(config.provider):
return LazyLLMAdapter(config)

# 不支持的提供商
raise ValueError(f"不支持的LLM提供商: {config.provider}")
Expand Down
13 changes: 13 additions & 0 deletions backend/app/services/llm/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,10 @@ def _get_provider_api_key_from_user_config(self, provider: LLMProvider, user_llm
LLMProvider.BAIDU: 'baiduApiKey',
LLMProvider.MINIMAX: 'minimaxApiKey',
LLMProvider.DOUBAO: 'doubaoApiKey',
LLMProvider.SILICONFLOW: 'siliconflowApiKey',
LLMProvider.SENSENOVA: 'sensenovaApiKey',
LLMProvider.AIPING: 'aipingApiKey',
LLMProvider.PPIO: 'ppioApiKey',
}
key_name = provider_key_map.get(provider)
if key_name:
Expand All @@ -149,6 +153,10 @@ def _get_provider_api_key(self, provider: LLMProvider) -> str:
LLMProvider.BAIDU: 'BAIDU_API_KEY',
LLMProvider.MINIMAX: 'MINIMAX_API_KEY',
LLMProvider.DOUBAO: 'DOUBAO_API_KEY',
LLMProvider.SILICONFLOW: 'SILICONFLOW_API_KEY',
LLMProvider.SENSENOVA: 'SENSENOVA_API_KEY',
LLMProvider.AIPING: 'AIPING_API_KEY',
LLMProvider.PPIO: 'PPIO_API_KEY',
LLMProvider.OLLAMA: None, # Ollama 不需要 API Key
}
key_name = provider_key_map.get(provider)
Expand Down Expand Up @@ -178,6 +186,10 @@ def _parse_provider(self, provider_str: str) -> LLMProvider:
'minimax': LLMProvider.MINIMAX,
'doubao': LLMProvider.DOUBAO,
'ollama': LLMProvider.OLLAMA,
'siliconflow': LLMProvider.SILICONFLOW,
'sensenova': LLMProvider.SENSENOVA,
'aiping': LLMProvider.AIPING,
'ppio': LLMProvider.PPIO,
}
return provider_map.get(provider_str.lower(), LLMProvider.OPENAI)

Expand Down Expand Up @@ -387,6 +399,7 @@ async def analyze_code(self, code: str, language: str) -> Dict[str, Any]:
)

response = await adapter.complete(request)

content = response.content

# 记录 LLM 原始响应(用于调试)
Expand Down
12 changes: 12 additions & 0 deletions backend/app/services/llm/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ class LLMProvider(str, Enum):
MINIMAX = "minimax" # MiniMax
DOUBAO = "doubao" # 字节豆包
OLLAMA = "ollama" # Ollama 本地大模型
SILICONFLOW = "siliconflow" # SiliconFlow
SENSENOVA = "sensenova" # SenseNova
AIPING = "aiping" # AI PING
PPIO = "ppio" # PPIO AI


@dataclass
Expand Down Expand Up @@ -102,6 +106,10 @@ def __init__(
LLMProvider.MINIMAX: "minimax-m2",
LLMProvider.DOUBAO: "doubao-1.6-pro",
LLMProvider.OLLAMA: "llama3.3-70b",
LLMProvider.SILICONFLOW: "deepseek-ai/DeepSeek-V3.2",
LLMProvider.SENSENOVA: "SenseChat-5",
LLMProvider.AIPING: "DeepSeek-V3.2",
LLMProvider.PPIO: "deepseek/deepseek-v3.2",
}


Expand All @@ -118,6 +126,10 @@ def __init__(
LLMProvider.OLLAMA: "http://localhost:11434/v1",
LLMProvider.GEMINI: "https://generativelanguage.googleapis.com/v1beta",
LLMProvider.CLAUDE: "https://api.anthropic.com/v1",
LLMProvider.SENSENOVA: "https://api.sensenova.com/v1",
LLMProvider.SILICONFLOW: "https://api.siliconflow.cn/v1",
LLMProvider.AIPING: "https://api.aiping.cloud/v1",
LLMProvider.PPIO: "https://api.ppio.ai/v1",
}


Expand Down
1 change: 1 addition & 0 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ httpx>=0.25.0
# ============ LLM Integration ============
litellm>=1.0.0
tiktoken>=0.5.2
lazyllm>=0.7.3

# ============ Report Generation ============
reportlab>=4.0.0
Expand Down
Loading