From 348f4e0fe064fda257351a31afcd4975b2353b6e Mon Sep 17 00:00:00 2001 From: lkddi Date: Sat, 28 Mar 2026 20:53:42 +0800 Subject: [PATCH] =?UTF-8?q?fix(=E7=99=BE=E5=AE=B6=E4=B9=90AI=E9=A2=84?= =?UTF-8?q?=E6=B5=8B):=20=E5=85=BC=E5=AE=B9=E6=8E=A8=E7=90=86=E5=9E=8B?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B(StepFun/DeepSeek-R1)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - max_tokens 从 10 调整到 1000,避免推理模型因 finish_reason:length 截断 - content 为 null 时从 reasoning 字段正则提取预测关键词作为兜底 - 经 openrouter/stepfun-step-3.5-flash 实测验证通过 --- app/Services/BaccaratPredictionService.php | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/app/Services/BaccaratPredictionService.php b/app/Services/BaccaratPredictionService.php index c109f54..3d6f0c4 100644 --- a/app/Services/BaccaratPredictionService.php +++ b/app/Services/BaccaratPredictionService.php @@ -125,8 +125,8 @@ PROMPT; ->timeout(self::REQUEST_TIMEOUT) ->post($endpoint, [ 'model' => $config->model, - 'temperature' => 0.3, // 预测任务偏确定性,使用较低温度 - 'max_tokens' => 10, // 只需要输出单个词 + 'temperature' => 0.3, // 预测任务偏确定性,使用较低温度 + 'max_tokens' => 1000, // 推理模型需要大量 token 完成思考后才输出 content 'messages' => [ [ 'role' => 'system', @@ -147,7 +147,17 @@ PROMPT; } $data = $response->json(); - $reply = trim($data['choices'][0]['message']['content'] ?? ''); + $message = $data['choices'][0]['message'] ?? []; + + // 兼容推理模型(如 DeepSeek-R1、StepFun):content 可能为 null, + // 此时从 reasoning 字段中正则匹配最后出现的预测关键词作为兜底。 + $reply = trim($message['content'] ?? ''); + if ($reply === '') { + $reasoning = $message['reasoning'] ?? ''; + if (preg_match('/[大小豹子]+(?=[^大小豹子]*$)/u', $reasoning, $m)) { + $reply = $m[0]; + } + } $promptTokens = $data['usage']['prompt_tokens'] ?? 0; $completionTokens = $data['usage']['completion_tokens'] ?? 0;