File size: 3,223 Bytes
1766992
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
package services

import (
	"bytes"
	"github.com/libaxuan/cursor2api-go/models"
	"github.com/libaxuan/cursor2api-go/utils"
	"encoding/json"
	"time"
)

// BuildResponseUsage converts chat-completions usage into responses usage.
func BuildResponseUsage(usage models.Usage) *models.ResponseUsage {
	if usage.PromptTokens == 0 && usage.CompletionTokens == 0 && usage.TotalTokens == 0 {
		return nil
	}

	return &models.ResponseUsage{
		InputTokens:  usage.PromptTokens,
		OutputTokens: usage.CompletionTokens,
		TotalTokens:  usage.TotalTokens,
		OutputTokensDetails: &models.ResponseOutputTokensDetails{
			ReasoningTokens: 0,
		},
	}
}

// NewResponseFromRequest builds a Responses API response with shared request metadata.
func NewResponseFromRequest(req *models.ResponseRequest, id string, createdAt int64, status string, output []interface{}, outputText string, usage *models.ResponseUsage, completedAt *int64) models.Response {
	if output == nil {
		output = []interface{}{}
	}

	resp := models.Response{
		ID:                 id,
		Object:             "response",
		CreatedAt:          createdAt,
		Status:             status,
		CompletedAt:        completedAt,
		Background:         req.Background,
		Instructions:       decodeRaw(req.Instructions),
		MaxOutputTokens:    req.MaxOutputTokens,
		MaxToolCalls:       req.MaxToolCalls,
		Model:              req.Model,
		Output:             output,
		OutputText:         outputText,
		ParallelToolCalls:  resolveParallelToolCalls(req.ParallelToolCalls),
		PreviousResponseID: req.PreviousResponseID,
		Reasoning:          decodeReasoning(req.Reasoning),
		Store:              req.Store,
		Temperature:        req.Temperature,
		Text:               decodeRaw(req.Text),
		ToolChoice:         decodeRaw(req.ToolChoice),
		Tools:              req.Tools,
		TopP:               req.TopP,
		Truncation:         req.Truncation,
		Usage:              usage,
		User:               req.User,
		Metadata:           req.Metadata,
	}

	return resp
}

// BuildCompletedResponse builds a completed response with a generated ID.
func BuildCompletedResponse(req *models.ResponseRequest, output []interface{}, outputText string, usage *models.ResponseUsage) models.Response {
	now := time.Now().Unix()
	complete := now
	return NewResponseFromRequest(req, utils.GenerateResponseID(), now, "completed", output, outputText, usage, &complete)
}

func resolveParallelToolCalls(raw *bool) bool {
	if raw == nil {
		return false
	}
	return *raw
}

func decodeRaw(raw json.RawMessage) interface{} {
	trimmed := bytes.TrimSpace(raw)
	if len(trimmed) == 0 || string(trimmed) == "null" {
		return nil
	}
	var val interface{}
	if err := json.Unmarshal(trimmed, &val); err != nil {
		return nil
	}
	return val
}

func decodeReasoning(raw json.RawMessage) *models.ResponseReasoning {
	trimmed := bytes.TrimSpace(raw)
	if len(trimmed) == 0 || string(trimmed) == "null" {
		return nil
	}
	var val map[string]interface{}
	if err := json.Unmarshal(trimmed, &val); err != nil {
		return nil
	}
	reasoning := &models.ResponseReasoning{}
	if effort, ok := val["effort"].(string); ok {
		reasoning.Effort = &effort
	}
	if summary, ok := val["summary"]; ok {
		reasoning.Summary = summary
	}
	return reasoning
}