# @description: # @author: licanglong # @date: 2025/11/20 14:22 import json import openai from flask import jsonify, request from openai.types.chat import ChatCompletion from app.blueprints.risk import risk_bp from app.client.VectorStoreClient import vector_store_client from app.constants.vector_store import VectorStoreCollection from app.core import BizException, CTX from app.models.Result import SysResult from app.models.dto import FinalDecisionResult, RiskEvidenceResult from app.prompt import person_consumption_prompt, external_evidence_search_prompt from app.utils import AIStreamJSONParser @risk_bp.route('/decide', methods=['POST']) def risk_decide(): """ 发票风险裁决 :return: """ invoice_data = request.json vector = vector_store_client.embedding.encode(f""" 特定业务类型:{invoice_data['tdywlx'] or ''} 购买方名称: {invoice_data['gmfmc'] or ''} 货物名称:{invoice_data['hwmc'] or ''} 规格型号:{invoice_data['ggxh'] or ''} 开票人:{invoice_data['kpr'] or ''} """) rules = vector_store_client.client.query_points( collection_name=VectorStoreCollection.RULE_EMBED_STORE, query=vector.tolist(), limit=5, score_threshold=0.5 ) cases = vector_store_client.client.query_points( collection_name=VectorStoreCollection.CASE_EMBED_STORE, query=vector.tolist(), limit=5, score_threshold=0.5 ) merchants = vector_store_client.client.query_points( collection_name=VectorStoreCollection.MERCHANTS_EMBED_STORE, query=vector.tolist(), limit=5, score_threshold=0.5 ) edges = vector_store_client.client.query_points( collection_name=VectorStoreCollection.EDGES_EMBED_STORE, query=vector.tolist(), limit=5, score_threshold=0.5 ) input_data = { "invoice_context": invoice_data, "rules": [hit.payload for hit in rules.points], "cases": [hit.payload for hit in cases.points], "industry": [hit.payload for hit in merchants.points], "signals": [hit.payload for hit in edges.points] } final_user_prompt = person_consumption_prompt.get_person_consumption_user_prompt( json.dumps(input_data, ensure_ascii=False)) final_user_prompt = final_user_prompt.replace("{{input_data_desc}}", "") client = openai.OpenAI( api_key=CTX.ENV.getprop("llm.qwen.api_key", raise_error=True), base_url=CTX.ENV.getprop("llm.qwen.base_url", raise_error=True), ) completion = client.chat.completions.create( model="qwen-plus", messages=[{'role': 'system', 'content': person_consumption_prompt.system_prompt}, {'role': 'user', 'content': final_user_prompt}], stream=True, stream_options={"include_usage": True} ) parser = AIStreamJSONParser() for chunk in completion: parser.feed_chunk(chunk.model_dump_json()) if parser.is_finished(chunk.model_dump_json()): break result = parser.get_result() decision_result: FinalDecisionResult = FinalDecisionResult.model_validate(result) return jsonify(SysResult.success(data=decision_result.dict())) @risk_bp.route('/evidence', methods=['POST']) def evidence_replenish(): invoice_data = request.json input_data = { "invoice_context": invoice_data } final_external_evidence_user_prompt = external_evidence_search_prompt.get_external_evidence_user_prompt( json.dumps(input_data, ensure_ascii=False)) client = openai.OpenAI( api_key=CTX.ENV.getprop("llm.qwen.api_key", raise_error=True), base_url=CTX.ENV.getprop("llm.qwen.base_url", raise_error=True), ) completion: ChatCompletion = client.chat.completions.create( model="qwen-plus", messages=[ {'role': 'system', 'content': external_evidence_search_prompt.external_evidence_system_prompt}, {'role': 'user', 'content': final_external_evidence_user_prompt}], extra_body={"enable_search": True, "search_options": {"search_strategy": "agent", "enable_source": True}} ) if not completion.choices: raise BizException("LLM响应异常") generate_content = completion.choices[0].message.content evidence_result: RiskEvidenceResult = RiskEvidenceResult.model_validate(json.loads(generate_content)) return jsonify(SysResult.success(data=evidence_result.dict()))