fix(百家乐AI预测): 兼容推理型模型(StepFun/DeepSeek-R1)
- max_tokens 从 10 调整到 1000,避免推理模型因 finish_reason:length 截断 - content 为 null 时从 reasoning 字段正则提取预测关键词作为兜底 - 经 openrouter/stepfun-step-3.5-flash 实测验证通过
This commit is contained in:
@@ -125,8 +125,8 @@ PROMPT;
|
||||
->timeout(self::REQUEST_TIMEOUT)
|
||||
->post($endpoint, [
|
||||
'model' => $config->model,
|
||||
'temperature' => 0.3, // 预测任务偏确定性,使用较低温度
|
||||
'max_tokens' => 10, // 只需要输出单个词
|
||||
'temperature' => 0.3, // 预测任务偏确定性,使用较低温度
|
||||
'max_tokens' => 1000, // 推理模型需要大量 token 完成思考后才输出 content
|
||||
'messages' => [
|
||||
[
|
||||
'role' => 'system',
|
||||
@@ -147,7 +147,17 @@ PROMPT;
|
||||
}
|
||||
|
||||
$data = $response->json();
|
||||
$reply = trim($data['choices'][0]['message']['content'] ?? '');
|
||||
$message = $data['choices'][0]['message'] ?? [];
|
||||
|
||||
// 兼容推理模型(如 DeepSeek-R1、StepFun):content 可能为 null,
|
||||
// 此时从 reasoning 字段中正则匹配最后出现的预测关键词作为兜底。
|
||||
$reply = trim($message['content'] ?? '');
|
||||
if ($reply === '') {
|
||||
$reasoning = $message['reasoning'] ?? '';
|
||||
if (preg_match('/[大小豹子]+(?=[^大小豹子]*$)/u', $reasoning, $m)) {
|
||||
$reply = $m[0];
|
||||
}
|
||||
}
|
||||
|
||||
$promptTokens = $data['usage']['prompt_tokens'] ?? 0;
|
||||
$completionTokens = $data['usage']['completion_tokens'] ?? 0;
|
||||
|
||||
Reference in New Issue
Block a user