| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364 |
- # @description:
- # @author: licanglong
- # @date: 2026/1/5 9:47
- import json
- import openai
- from openai.types.chat import ChatCompletion
- from app.core import CTX, BizException
- from app.utils import ali_search_tool, get_current_time
- class LLMClient:
- def get_llm_provider(self):
- pass
- def generate(self) -> str:
- pass
- async def llm_call(tools, messages):
- client = openai.AsyncOpenAI(
- api_key=CTX.ENV.getprop("llm.qwen.api_key", raise_error=True),
- base_url=CTX.ENV.getprop("llm.qwen.base_url", raise_error=True),
- )
- completion: ChatCompletion = await client.chat.completions.create(
- model="qwen-plus-latest",
- messages=messages,
- tools=tools if tools else None,
- tool_choice="auto" if tools else "none",
- # extra_body={
- # "enable_thinking": True
- # }
- )
- if not completion.choices:
- raise BizException("LLM响应异常")
- if completion.choices[0].finish_reason == "tool_calls":
- tool_infos = []
- tool_calls = completion.choices[0].message.tool_calls
- for call in tool_calls:
- if call.function.name == "ali_search_tool":
- args = json.loads(call.function.arguments)
- keyword = args["keyword"]
- info = await ali_search_tool(keyword)
- info['pageItems'] = info['pageItems'][:(min(5, len(info['pageItems'])))]
- tool_infos.append({
- "role": "tool",
- "tool_call_id": call.id,
- "content": json.dumps(info, ensure_ascii=False)
- })
- elif call.function.name == "get_current_time":
- info = get_current_time()
- tool_infos.append({
- "role": "tool",
- "tool_call_id": call.id,
- "content": info
- })
- return await llm_call(tools, [*messages, completion.choices[0].message, *tool_infos])
- generate_content = completion.choices[0].message.content
- return generate_content
|