diff --git a/pkg/agent/loop.go b/pkg/agent/loop.go index cd4276155..870a62234 100644 --- a/pkg/agent/loop.go +++ b/pkg/agent/loop.go @@ -616,8 +616,9 @@ func (al *AgentLoop) runLLMIteration(ctx context.Context, messages []providers.M // Build assistant message with tool calls assistantMsg := providers.Message{ - Role: "assistant", - Content: response.Content, + Role: "assistant", + Content: response.Content, + ReasoningContent: response.ReasoningContent, // Preserve for thinking models (e.g., GLM-Z1) } for _, tc := range response.ToolCalls { argumentsJSON, _ := json.Marshal(tc.Arguments) diff --git a/pkg/providers/http_provider.go b/pkg/providers/http_provider.go index 4cf2c6db2..611ee7971 100644 --- a/pkg/providers/http_provider.go +++ b/pkg/providers/http_provider.go @@ -127,7 +127,8 @@ func (p *HTTPProvider) parseResponse(body []byte) (*LLMResponse, error) { var apiResponse struct { Choices []struct { Message struct { - Content string `json:"content"` + Content string `json:"content"` + ReasoningContent string `json:"reasoning_content"` // For thinking models (e.g., GLM-Z1) ToolCalls []struct { ID string `json:"id"` Type string `json:"type"` @@ -186,10 +187,11 @@ func (p *HTTPProvider) parseResponse(body []byte) (*LLMResponse, error) { } return &LLMResponse{ - Content: choice.Message.Content, - ToolCalls: toolCalls, - FinishReason: choice.FinishReason, - Usage: apiResponse.Usage, + Content: choice.Message.Content, + ReasoningContent: choice.Message.ReasoningContent, + ToolCalls: toolCalls, + FinishReason: choice.FinishReason, + Usage: apiResponse.Usage, }, nil } diff --git a/pkg/providers/types.go b/pkg/providers/types.go index 88b62e975..11917806b 100644 --- a/pkg/providers/types.go +++ b/pkg/providers/types.go @@ -16,10 +16,11 @@ type FunctionCall struct { } type LLMResponse struct { - Content string `json:"content"` - ToolCalls []ToolCall `json:"tool_calls,omitempty"` - FinishReason string `json:"finish_reason"` - Usage *UsageInfo `json:"usage,omitempty"` + Content string `json:"content"` + ReasoningContent string `json:"reasoning_content,omitempty"` // For thinking models (e.g., GLM-Z1) + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + FinishReason string `json:"finish_reason"` + Usage *UsageInfo `json:"usage,omitempty"` } type UsageInfo struct { @@ -29,10 +30,11 @@ type UsageInfo struct { } type Message struct { - Role string `json:"role"` - Content string `json:"content"` - ToolCalls []ToolCall `json:"tool_calls,omitempty"` - ToolCallID string `json:"tool_call_id,omitempty"` + Role string `json:"role"` + Content string `json:"content"` + ReasoningContent string `json:"reasoning_content,omitempty"` // For thinking models (e.g., GLM-Z1) + ToolCalls []ToolCall `json:"tool_calls,omitempty"` + ToolCallID string `json:"tool_call_id,omitempty"` } type LLMProvider interface { diff --git a/pkg/tools/toolloop.go b/pkg/tools/toolloop.go index 1302079b4..e63361342 100644 --- a/pkg/tools/toolloop.go +++ b/pkg/tools/toolloop.go @@ -97,8 +97,9 @@ func RunToolLoop(ctx context.Context, config ToolLoopConfig, messages []provider // 6. Build assistant message with tool calls assistantMsg := providers.Message{ - Role: "assistant", - Content: response.Content, + Role: "assistant", + Content: response.Content, + ReasoningContent: response.ReasoningContent, // Preserve for thinking models (e.g., GLM-Z1) } for _, tc := range response.ToolCalls { argumentsJSON, _ := json.Marshal(tc.Arguments)