routes.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. # @description:
  2. # @author: licanglong
  3. # @date: 2025/11/20 14:22
  4. import json
  5. import openai
  6. from flask import jsonify, request
  7. from openai.types.chat import ChatCompletion
  8. from app.blueprints.risk import risk_bp
  9. from app.client.VectorStoreClient import vector_store_client
  10. from app.constants.vector_store import VectorStoreCollection
  11. from app.core import BizException, CTX
  12. from app.models.Result import SysResult
  13. from app.models.dto import FinalDecisionResult, RiskEvidenceResult
  14. from app.prompt import person_consumption_prompt, external_evidence_search_prompt
  15. from app.utils import AIStreamJSONParser
  16. @risk_bp.route('/decide', methods=['POST'])
  17. def risk_decide():
  18. """
  19. 发票风险裁决
  20. :return:
  21. """
  22. invoice_data = request.json
  23. vector = vector_store_client.embedding.encode(f"""
  24. 特定业务类型:{invoice_data['tdywlx'] or ''}
  25. 购买方名称: {invoice_data['gmfmc'] or ''}
  26. 货物名称:{invoice_data['hwmc'] or ''}
  27. 规格型号:{invoice_data['ggxh'] or ''}
  28. 开票人:{invoice_data['kpr'] or ''}
  29. """)
  30. rules = vector_store_client.client.query_points(
  31. collection_name=VectorStoreCollection.RULE_EMBED_STORE,
  32. query=vector.tolist(),
  33. limit=5,
  34. score_threshold=0.5
  35. )
  36. cases = vector_store_client.client.query_points(
  37. collection_name=VectorStoreCollection.CASE_EMBED_STORE,
  38. query=vector.tolist(),
  39. limit=5,
  40. score_threshold=0.5
  41. )
  42. merchants = vector_store_client.client.query_points(
  43. collection_name=VectorStoreCollection.MERCHANTS_EMBED_STORE,
  44. query=vector.tolist(),
  45. limit=5,
  46. score_threshold=0.5
  47. )
  48. edges = vector_store_client.client.query_points(
  49. collection_name=VectorStoreCollection.EDGES_EMBED_STORE,
  50. query=vector.tolist(),
  51. limit=5,
  52. score_threshold=0.5
  53. )
  54. input_data = {
  55. "invoice_context": invoice_data,
  56. "rules": [hit.payload for hit in rules.points],
  57. "cases": [hit.payload for hit in cases.points],
  58. "industry": [hit.payload for hit in merchants.points],
  59. "signals": [hit.payload for hit in edges.points]
  60. }
  61. final_user_prompt = person_consumption_prompt.get_person_consumption_user_prompt(
  62. json.dumps(input_data, ensure_ascii=False))
  63. final_user_prompt = final_user_prompt.replace("{{input_data_desc}}", "")
  64. client = openai.OpenAI(
  65. api_key=CTX.ENV.getprop("llm.qwen.api_key", raise_error=True),
  66. base_url=CTX.ENV.getprop("llm.qwen.base_url", raise_error=True),
  67. )
  68. completion = client.chat.completions.create(
  69. model="qwen-plus",
  70. messages=[{'role': 'system', 'content': person_consumption_prompt.system_prompt},
  71. {'role': 'user', 'content': final_user_prompt}],
  72. stream=True,
  73. stream_options={"include_usage": True}
  74. )
  75. parser = AIStreamJSONParser()
  76. for chunk in completion:
  77. parser.feed_chunk(chunk.model_dump_json())
  78. if parser.is_finished(chunk.model_dump_json()):
  79. break
  80. result = parser.get_result()
  81. decision_result: FinalDecisionResult = FinalDecisionResult.model_validate(result)
  82. return jsonify(SysResult.success(data=decision_result.dict()))
  83. @risk_bp.route('/evidence', methods=['POST'])
  84. def evidence_replenish():
  85. invoice_data = request.json
  86. input_data = {
  87. "invoice_context": invoice_data
  88. }
  89. final_external_evidence_user_prompt = external_evidence_search_prompt.get_external_evidence_user_prompt(
  90. json.dumps(input_data, ensure_ascii=False))
  91. client = openai.OpenAI(
  92. api_key=CTX.ENV.getprop("llm.qwen.api_key", raise_error=True),
  93. base_url=CTX.ENV.getprop("llm.qwen.base_url", raise_error=True),
  94. )
  95. completion: ChatCompletion = client.chat.completions.create(
  96. model="qwen-plus",
  97. messages=[
  98. {'role': 'system', 'content': external_evidence_search_prompt.external_evidence_system_prompt},
  99. {'role': 'user', 'content': final_external_evidence_user_prompt}],
  100. extra_body={"enable_search": True,
  101. "search_options": {"search_strategy": "agent", "enable_source": True}}
  102. )
  103. if not completion.choices:
  104. raise BizException("LLM响应异常")
  105. generate_content = completion.choices[0].message.content
  106. evidence_result: RiskEvidenceResult = RiskEvidenceResult.model_validate(json.loads(generate_content))
  107. return jsonify(SysResult.success(data=evidence_result.dict()))