From fcb1029b0d756938d336540eb646017190959d32 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Mar 2025 10:38:28 -0400 Subject: [PATCH 1/8] enhance: forward OpenAI computer-use models to responses API Signed-off-by: Donnie Adams --- deepseek-model-provider/go.mod | 2 - deepseek-model-provider/go.sum | 2 - groq-model-provider/go.mod | 2 - groq-model-provider/go.sum | 2 - ollama-model-provider/go.mod | 2 - ollama-model-provider/go.sum | 2 - openai-model-provider/go.mod | 12 +- openai-model-provider/go.sum | 16 +- openai-model-provider/main.go | 197 +++++++++++++++++++++++- openai-model-provider/proxy/validate.go | 3 +- vllm-model-provider/go.mod | 2 +- vllm-model-provider/go.sum | 4 +- xai-model-provider/go.mod | 2 - xai-model-provider/go.sum | 2 - 14 files changed, 221 insertions(+), 29 deletions(-) diff --git a/deepseek-model-provider/go.mod b/deepseek-model-provider/go.mod index cc02eb300..83c5f1a95 100644 --- a/deepseek-model-provider/go.mod +++ b/deepseek-model-provider/go.mod @@ -5,5 +5,3 @@ go 1.23.4 replace github.com/obot-platform/tools/openai-model-provider => ../openai-model-provider require github.com/obot-platform/tools/openai-model-provider v0.0.0 - -require github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 // indirect diff --git a/deepseek-model-provider/go.sum b/deepseek-model-provider/go.sum index 35cd45d79..e69de29bb 100644 --- a/deepseek-model-provider/go.sum +++ b/deepseek-model-provider/go.sum @@ -1,2 +0,0 @@ -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= diff --git a/groq-model-provider/go.mod b/groq-model-provider/go.mod index ee8b73acf..39b62b727 100644 --- a/groq-model-provider/go.mod +++ b/groq-model-provider/go.mod @@ -5,5 +5,3 @@ go 1.23.4 replace github.com/obot-platform/tools/openai-model-provider => ../openai-model-provider require github.com/obot-platform/tools/openai-model-provider v0.0.0 - -require github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 // indirect diff --git a/groq-model-provider/go.sum b/groq-model-provider/go.sum index 35cd45d79..e69de29bb 100644 --- a/groq-model-provider/go.sum +++ b/groq-model-provider/go.sum @@ -1,2 +0,0 @@ -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= diff --git a/ollama-model-provider/go.mod b/ollama-model-provider/go.mod index 3c8ee6afc..39d5ddb07 100644 --- a/ollama-model-provider/go.mod +++ b/ollama-model-provider/go.mod @@ -5,5 +5,3 @@ go 1.23.4 replace github.com/obot-platform/tools/openai-model-provider => ../openai-model-provider require github.com/obot-platform/tools/openai-model-provider v0.0.0 - -require github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 // indirect diff --git a/ollama-model-provider/go.sum b/ollama-model-provider/go.sum index 35cd45d79..e69de29bb 100644 --- a/ollama-model-provider/go.sum +++ b/ollama-model-provider/go.sum @@ -1,2 +0,0 @@ -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= diff --git a/openai-model-provider/go.mod b/openai-model-provider/go.mod index 4435c1827..9df2829f8 100644 --- a/openai-model-provider/go.mod +++ b/openai-model-provider/go.mod @@ -2,4 +2,14 @@ module github.com/obot-platform/tools/openai-model-provider go 1.23.4 -require github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 +require ( + github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d + github.com/openai/openai-go v0.1.0-beta.2 +) + +require ( + github.com/tidwall/gjson v1.14.4 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect +) diff --git a/openai-model-provider/go.sum b/openai-model-provider/go.sum index 35cd45d79..98aab82c7 100644 --- a/openai-model-provider/go.sum +++ b/openai-model-provider/go.sum @@ -1,2 +1,14 @@ -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d h1:p5uqZufDIMQzAALblZFkr8fwbnZbFXbBCR1ZMAFylXk= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/openai/openai-go v0.1.0-beta.2 h1:Ra5nCFkbEl9w+UJwAciC4kqnIBUCcJazhmMA0/YN894= +github.com/openai/openai-go v0.1.0-beta.2/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= diff --git a/openai-model-provider/main.go b/openai-model-provider/main.go index 62073e72b..7456fa0f5 100644 --- a/openai-model-provider/main.go +++ b/openai-model-provider/main.go @@ -1,13 +1,22 @@ package main import ( + "bytes" + "encoding/json" "fmt" + "io" "net/http" "net/http/httputil" "os" + "strings" + gopenai "github.com/gptscript-ai/chat-completion-client" "github.com/obot-platform/tools/openai-model-provider/openaiproxy" "github.com/obot-platform/tools/openai-model-provider/proxy" + "github.com/openai/openai-go/packages/param" + "github.com/openai/openai-go/responses" + "github.com/openai/openai-go/shared" + "github.com/openai/openai-go/shared/constant" ) func main() { @@ -23,12 +32,14 @@ func main() { } cfg := &proxy.Config{ - APIKey: apiKey, - ListenPort: port, - BaseURL: "https://api.openai.com/v1", - RewriteModelsFn: proxy.DefaultRewriteModelsResponse, - Name: "OpenAI", - CustomPathHandleFuncs: map[string]http.HandlerFunc{}, + APIKey: apiKey, + ListenPort: port, + BaseURL: "https://api.openai.com/v1", + RewriteModelsFn: proxy.DefaultRewriteModelsResponse, + Name: "OpenAI", + CustomPathHandleFuncs: map[string]http.HandlerFunc{ + "/v1/": translateResponsesAPI(apiKey), + }, } openaiProxy := openaiproxy.NewServer(cfg) @@ -48,3 +59,177 @@ func main() { panic(err) } } + +type responsesRequestTranslator struct { + apiKey string + wasTranslated bool +} + +func translateResponsesAPI(apiKey string) func(rw http.ResponseWriter, req *http.Request) { + return func(rw http.ResponseWriter, req *http.Request) { + r := &responsesRequestTranslator{apiKey: apiKey} + (&httputil.ReverseProxy{ + Director: r.openaiProxyWithComputerUse, + ModifyResponse: r.modifyResponsesAPIResponse, + }).ServeHTTP(rw, req) + } +} + +func (r *responsesRequestTranslator) openaiProxyWithComputerUse(req *http.Request) { + req.URL.Scheme = "https" + req.URL.Host = "api.openai.com" + req.Host = req.URL.Host + req.Body, req.URL.Path, r.wasTranslated = rewriteBody(req.Body, req.URL.Path) + + req.Header.Set("Authorization", "Bearer "+r.apiKey) +} + +func rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, bool) { + if body == nil || path != proxy.ChatCompletionsPath { + return body, path, false + } + + bodyBytes, err := io.ReadAll(body) + if err != nil { + return body, path, false + } + + var chatCompletionRequest gopenai.ChatCompletionRequest + if err := json.Unmarshal(bodyBytes, &chatCompletionRequest); err != nil || !strings.HasPrefix(chatCompletionRequest.Model, "computer-use-") { + // Best effort, just return the original body and path on error. + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, false + } + + var ( + text responses.ResponseTextConfigParam + inputItems []responses.ResponseInputItemUnionParam + tools []responses.ToolUnionParam + instructions string + ) + // Translate the response format + if chatCompletionRequest.ResponseFormat != nil { + switch chatCompletionRequest.ResponseFormat.Type { + case gopenai.ChatCompletionResponseFormatTypeText: + text = responses.ResponseTextConfigParam{ + Format: responses.ResponseFormatTextConfigUnionParam{ + OfText: &shared.ResponseFormatTextParam{ + Type: constant.Text(gopenai.ChatCompletionResponseFormatTypeText), + }, + }, + } + case gopenai.ChatCompletionResponseFormatTypeJSONObject: + text = responses.ResponseTextConfigParam{ + Format: responses.ResponseFormatTextConfigUnionParam{ + OfJSONObject: &shared.ResponseFormatJSONObjectParam{ + Type: constant.JSONObject(gopenai.ChatCompletionResponseFormatTypeJSONObject), + }, + }, + } + default: + // Best effort log and move on. + fmt.Fprintln(os.Stderr, "Unsupported response format type:", chatCompletionRequest.ResponseFormat.Type) + } + } + // Translate the initial system message to instructions + if len(chatCompletionRequest.Messages) > 0 && (chatCompletionRequest.Messages[0].Role == gopenai.ChatMessageRoleSystem || chatCompletionRequest.Messages[0].Role == "developer") { + instructions = chatCompletionRequest.Messages[0].Content + chatCompletionRequest.Messages = chatCompletionRequest.Messages[1:] + } + // Translate the messages to input items + inputItems = make([]responses.ResponseInputItemUnionParam, 0, len(chatCompletionRequest.Messages)) + for _, message := range chatCompletionRequest.Messages { + switch { + case len(message.ToolCalls) > 0: + for _, call := range message.ToolCalls { + inputItems = append(inputItems, responses.ResponseInputItemParamOfFunctionCall( + call.Function.Arguments, + call.ID, + call.Function.Name, + )) + } + case message.Role == gopenai.ChatMessageRoleFunction: + inputItems = append(inputItems, responses.ResponseInputItemParamOfFunctionCallOutput( + message.ToolCallID, + message.Content, + )) + case message.Role == gopenai.ChatMessageRoleUser || message.Role == gopenai.ChatMessageRoleAssistant: + inputItems = append(inputItems, responses.ResponseInputItemParamOfMessage( + message.Content, + responses.EasyInputMessageRole(message.Role), + )) + default: + // Best effort log and move on. + fmt.Fprintln(os.Stderr, "Unsupported message role:", message.Role) + } + } + // Translate the tools to tool union params + var parameters map[string]any + for _, tool := range chatCompletionRequest.Tools { + parameters, _ = tool.Function.Parameters.(map[string]any) + tools = append(tools, responses.ToolParamOfFunction( + tool.Function.Name, + parameters, + true, + )) + } + // Translate the chat completion request to a responses API request + responsesRequest := responses.ResponseNewParams{ + Input: responses.ResponseNewParamsInputUnion{ + OfInputItemList: inputItems, + }, + Model: shared.ResponsesModel(chatCompletionRequest.Model), + Instructions: param.Opt[string]{ + Value: instructions, + }, + MaxOutputTokens: param.Opt[int64]{ + Value: int64(chatCompletionRequest.MaxTokens), + }, + ParallelToolCalls: param.Opt[bool]{ + Value: true, + }, + PreviousResponseID: param.Opt[string]{ + Value: "", + }, + Store: param.Opt[bool]{ + Value: false, + }, + Temperature: param.Opt[float64]{ + Value: float64(*chatCompletionRequest.Temperature), + }, + TopP: param.Opt[float64]{ + Value: float64(chatCompletionRequest.TopP), + }, + User: param.Opt[string]{ + Value: chatCompletionRequest.User, + }, + Reasoning: shared.ReasoningParam{}, + Include: nil, + Metadata: nil, + Truncation: responses.ResponseNewParamsTruncationDisabled, + Text: text, + ToolChoice: responses.ResponseNewParamsToolChoiceUnion{ + OfToolChoiceMode: param.Opt[responses.ToolChoiceOptions]{ + Value: responses.ToolChoiceOptionsAuto, + }, + }, + Tools: tools, + } + + // Marshal the responses request to JSON + responsesRequestBytes, err := json.Marshal(responsesRequest) + if err != nil { + // Best effort, just return the original body and path on error. + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, false + } + + // Return the new body and path + return io.NopCloser(bytes.NewBuffer(responsesRequestBytes)), "/v1/responses", true +} + +func (r *responsesRequestTranslator) modifyResponsesAPIResponse(resp *http.Response) error { + if r.wasTranslated || resp.StatusCode != http.StatusOK { + return nil + } + + return nil +} diff --git a/openai-model-provider/proxy/validate.go b/openai-model-provider/proxy/validate.go index 871938581..cfb909ee9 100644 --- a/openai-model-provider/proxy/validate.go +++ b/openai-model-provider/proxy/validate.go @@ -2,6 +2,7 @@ package proxy import ( "encoding/json" + "errors" "fmt" "log/slog" "net/http" @@ -12,7 +13,7 @@ import ( func handleValidationError(loggerPath, msg string) error { slog.Error(msg, "logger", loggerPath) fmt.Printf("{\"error\": \"%s\"}\n", msg) - return fmt.Errorf(msg) + return errors.New(msg) } func (cfg *Config) Validate(toolPath string) error { diff --git a/vllm-model-provider/go.mod b/vllm-model-provider/go.mod index 0632e4acb..46bd93e71 100644 --- a/vllm-model-provider/go.mod +++ b/vllm-model-provider/go.mod @@ -5,6 +5,6 @@ go 1.23.4 replace github.com/obot-platform/tools/openai-model-provider => ../openai-model-provider require ( - github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 + github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d github.com/obot-platform/tools/openai-model-provider v0.0.0 ) diff --git a/vllm-model-provider/go.sum b/vllm-model-provider/go.sum index 35cd45d79..235ae5bee 100644 --- a/vllm-model-provider/go.sum +++ b/vllm-model-provider/go.sum @@ -1,2 +1,2 @@ -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d h1:p5uqZufDIMQzAALblZFkr8fwbnZbFXbBCR1ZMAFylXk= +github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= diff --git a/xai-model-provider/go.mod b/xai-model-provider/go.mod index 1787bd0b8..1645373a7 100644 --- a/xai-model-provider/go.mod +++ b/xai-model-provider/go.mod @@ -5,5 +5,3 @@ go 1.23.4 replace github.com/obot-platform/tools/openai-model-provider => ../openai-model-provider require github.com/obot-platform/tools/openai-model-provider v0.0.0 - -require github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 // indirect diff --git a/xai-model-provider/go.sum b/xai-model-provider/go.sum index 35cd45d79..e69de29bb 100644 --- a/xai-model-provider/go.sum +++ b/xai-model-provider/go.sum @@ -1,2 +0,0 @@ -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789 h1:rfriXe+FFqZ5fZ+wGzLUivrq7Fyj2xfRdZjDsHf6Ps0= -github.com/gptscript-ai/chat-completion-client v0.0.0-20250123123106-c86554320789/go.mod h1:7P/o6/IWa1KqsntVf68hSnLKuu3+xuqm6lYhch1w4jo= From dd33b56ce3db6c19421760d99e7b59390bce162f Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Mar 2025 10:56:33 -0400 Subject: [PATCH 2/8] enhance: add stream support The way the SDK does streaming for the responses API is weird, we do the same here. Signed-off-by: Donnie Adams --- openai-model-provider/main.go | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/openai-model-provider/main.go b/openai-model-provider/main.go index 7456fa0f5..7e4c5f388 100644 --- a/openai-model-provider/main.go +++ b/openai-model-provider/main.go @@ -17,6 +17,7 @@ import ( "github.com/openai/openai-go/responses" "github.com/openai/openai-go/shared" "github.com/openai/openai-go/shared/constant" + "github.com/tidwall/sjson" ) func main() { @@ -76,28 +77,33 @@ func translateResponsesAPI(apiKey string) func(rw http.ResponseWriter, req *http } func (r *responsesRequestTranslator) openaiProxyWithComputerUse(req *http.Request) { + var contentLength int64 req.URL.Scheme = "https" req.URL.Host = "api.openai.com" req.Host = req.URL.Host - req.Body, req.URL.Path, r.wasTranslated = rewriteBody(req.Body, req.URL.Path) + req.Body, req.URL.Path, contentLength, r.wasTranslated = rewriteBody(req.Body, req.URL.Path) + if r.wasTranslated { + req.ContentLength = contentLength + req.Header.Set("Content-Length", fmt.Sprintf("%d", contentLength)) + } req.Header.Set("Authorization", "Bearer "+r.apiKey) } -func rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, bool) { +func rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, int64, bool) { if body == nil || path != proxy.ChatCompletionsPath { - return body, path, false + return body, path, 0, false } bodyBytes, err := io.ReadAll(body) if err != nil { - return body, path, false + return body, path, 0, false } var chatCompletionRequest gopenai.ChatCompletionRequest if err := json.Unmarshal(bodyBytes, &chatCompletionRequest); err != nil || !strings.HasPrefix(chatCompletionRequest.Model, "computer-use-") { // Best effort, just return the original body and path on error. - return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, false + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0, false } var ( @@ -219,11 +225,19 @@ func rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, bool) responsesRequestBytes, err := json.Marshal(responsesRequest) if err != nil { // Best effort, just return the original body and path on error. - return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, false + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0, false + } + + if chatCompletionRequest.Stream { + responsesRequestBytes, err = sjson.SetBytes(responsesRequestBytes, "stream", true) + if err != nil { + // Best effort, just return the original body and path on error. + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0, false + } } // Return the new body and path - return io.NopCloser(bytes.NewBuffer(responsesRequestBytes)), "/v1/responses", true + return io.NopCloser(bytes.NewBuffer(responsesRequestBytes)), "/v1/responses", int64(len(responsesRequestBytes)), true } func (r *responsesRequestTranslator) modifyResponsesAPIResponse(resp *http.Response) error { From 2e9ff107ae5e06429cea5d96e025049f53971c2c Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Mar 2025 11:00:00 -0400 Subject: [PATCH 3/8] Track streaming for response processing Signed-off-by: Donnie Adams --- openai-model-provider/main.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/openai-model-provider/main.go b/openai-model-provider/main.go index 7e4c5f388..1f018c545 100644 --- a/openai-model-provider/main.go +++ b/openai-model-provider/main.go @@ -64,6 +64,7 @@ func main() { type responsesRequestTranslator struct { apiKey string wasTranslated bool + streaming bool } func translateResponsesAPI(apiKey string) func(rw http.ResponseWriter, req *http.Request) { @@ -81,7 +82,7 @@ func (r *responsesRequestTranslator) openaiProxyWithComputerUse(req *http.Reques req.URL.Scheme = "https" req.URL.Host = "api.openai.com" req.Host = req.URL.Host - req.Body, req.URL.Path, contentLength, r.wasTranslated = rewriteBody(req.Body, req.URL.Path) + req.Body, req.URL.Path, contentLength = r.rewriteBody(req.Body, req.URL.Path) if r.wasTranslated { req.ContentLength = contentLength req.Header.Set("Content-Length", fmt.Sprintf("%d", contentLength)) @@ -90,20 +91,20 @@ func (r *responsesRequestTranslator) openaiProxyWithComputerUse(req *http.Reques req.Header.Set("Authorization", "Bearer "+r.apiKey) } -func rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, int64, bool) { +func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, int64) { if body == nil || path != proxy.ChatCompletionsPath { - return body, path, 0, false + return body, path, 0 } bodyBytes, err := io.ReadAll(body) if err != nil { - return body, path, 0, false + return body, path, 0 } var chatCompletionRequest gopenai.ChatCompletionRequest if err := json.Unmarshal(bodyBytes, &chatCompletionRequest); err != nil || !strings.HasPrefix(chatCompletionRequest.Model, "computer-use-") { // Best effort, just return the original body and path on error. - return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0, false + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } var ( @@ -225,19 +226,21 @@ func rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, int64, responsesRequestBytes, err := json.Marshal(responsesRequest) if err != nil { // Best effort, just return the original body and path on error. - return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0, false + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } if chatCompletionRequest.Stream { + r.streaming = true responsesRequestBytes, err = sjson.SetBytes(responsesRequestBytes, "stream", true) if err != nil { // Best effort, just return the original body and path on error. - return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0, false + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } } + r.wasTranslated = true // Return the new body and path - return io.NopCloser(bytes.NewBuffer(responsesRequestBytes)), "/v1/responses", int64(len(responsesRequestBytes)), true + return io.NopCloser(bytes.NewBuffer(responsesRequestBytes)), "/v1/responses", int64(len(responsesRequestBytes)) } func (r *responsesRequestTranslator) modifyResponsesAPIResponse(resp *http.Response) error { From 882434cb5797630c19af3be3a4af76cf50fe588a Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Mar 2025 11:00:32 -0400 Subject: [PATCH 4/8] Go mod tidy Signed-off-by: Donnie Adams --- openai-model-provider/go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openai-model-provider/go.mod b/openai-model-provider/go.mod index 9df2829f8..b869a7c28 100644 --- a/openai-model-provider/go.mod +++ b/openai-model-provider/go.mod @@ -5,11 +5,11 @@ go 1.23.4 require ( github.com/gptscript-ai/chat-completion-client v0.0.0-20250224164718-139cb4507b1d github.com/openai/openai-go v0.1.0-beta.2 + github.com/tidwall/sjson v1.2.5 ) require ( github.com/tidwall/gjson v1.14.4 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect - github.com/tidwall/sjson v1.2.5 // indirect ) From b6f8bf1f606bf0c44ac57b798b3975411eb148a8 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Mar 2025 11:58:52 -0400 Subject: [PATCH 5/8] Non-streaming response translation Signed-off-by: Donnie Adams --- openai-model-provider/main.go | 90 +++++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 3 deletions(-) diff --git a/openai-model-provider/main.go b/openai-model-provider/main.go index 1f018c545..6ffff9583 100644 --- a/openai-model-provider/main.go +++ b/openai-model-provider/main.go @@ -93,17 +93,25 @@ func (r *responsesRequestTranslator) openaiProxyWithComputerUse(req *http.Reques func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, int64) { if body == nil || path != proxy.ChatCompletionsPath { + // Not a chat completion request, just return the original body and path. return body, path, 0 } bodyBytes, err := io.ReadAll(body) if err != nil { + // Best effort, just return the original body and path on error. + fmt.Fprintf(os.Stderr, "Failed to read request body: %v\n", err) return body, path, 0 } var chatCompletionRequest gopenai.ChatCompletionRequest - if err := json.Unmarshal(bodyBytes, &chatCompletionRequest); err != nil || !strings.HasPrefix(chatCompletionRequest.Model, "computer-use-") { + if err := json.Unmarshal(bodyBytes, &chatCompletionRequest); err != nil { // Best effort, just return the original body and path on error. + fmt.Fprintf(os.Stderr, "Failed to unmarshal chat completion request: %v\n", err) + return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 + } + if !strings.HasPrefix(chatCompletionRequest.Model, "computer-use-") { + // Not a computer use model, just return the original body and path. return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } @@ -134,7 +142,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string } default: // Best effort log and move on. - fmt.Fprintln(os.Stderr, "Unsupported response format type:", chatCompletionRequest.ResponseFormat.Type) + fmt.Fprintf(os.Stderr, "Unsupported response format type: %v\n", chatCompletionRequest.ResponseFormat.Type) } } // Translate the initial system message to instructions @@ -166,7 +174,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string )) default: // Best effort log and move on. - fmt.Fprintln(os.Stderr, "Unsupported message role:", message.Role) + fmt.Fprintf(os.Stderr, "Unsupported message role: %v\n", message.Role) } } // Translate the tools to tool union params @@ -226,6 +234,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string responsesRequestBytes, err := json.Marshal(responsesRequest) if err != nil { // Best effort, just return the original body and path on error. + fmt.Fprintf(os.Stderr, "Failed to marshal responses request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } @@ -234,6 +243,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string responsesRequestBytes, err = sjson.SetBytes(responsesRequestBytes, "stream", true) if err != nil { // Best effort, just return the original body and path on error. + fmt.Fprintf(os.Stderr, "Failed to set stream in responses request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } } @@ -248,5 +258,79 @@ func (r *responsesRequestTranslator) modifyResponsesAPIResponse(resp *http.Respo return nil } + if !r.streaming { + return handleNonStreamingResponse(resp) + } + + return nil +} + +func handleNonStreamingResponse(resp *http.Response) error { + var responsesResponse responses.Response + if err := json.NewDecoder(resp.Body).Decode(&responsesResponse); err != nil { + return fmt.Errorf("failed to decode chat completion response: %w", err) + } + + choices := make([]gopenai.ChatCompletionChoice, 0, len(responsesResponse.Output)) + for i, choice := range responsesResponse.Output { + switch choice.Type { + case "message": + // Convert all outputs + for _, content := range choice.Content { + switch content.Type { + case "output_text": + choices = append(choices, gopenai.ChatCompletionChoice{ + Index: i, + Message: gopenai.ChatCompletionMessage{ + Role: string(choice.Role), + Content: content.AsOutputText().Text, + }, + FinishReason: gopenai.FinishReasonStop, + }) + case "refusal": + choices = append(choices, gopenai.ChatCompletionChoice{ + Index: i, + Message: gopenai.ChatCompletionMessage{ + Role: string(choice.Role), + Content: content.AsRefusal().Refusal, + }, + FinishReason: gopenai.FinishReasonContentFilter, + }) + } + } + case "function_call": + choices = append(choices, gopenai.ChatCompletionChoice{ + Index: i, + Message: gopenai.ChatCompletionMessage{ + Role: string(choice.Role), + Content: choice.AsFunctionCall().Name, + }, + FinishReason: gopenai.FinishReasonToolCalls, + }) + } + } + + chatCompletionResponse := gopenai.ChatCompletionResponse{ + ID: responsesResponse.ID, + Object: "chat.completion", + Created: int64(responsesResponse.CreatedAt), + Model: responsesResponse.Model, + Choices: choices, + Usage: gopenai.Usage{ + PromptTokens: int(responsesResponse.Usage.InputTokens), + CompletionTokens: int(responsesResponse.Usage.OutputTokens), + TotalTokens: int(responsesResponse.Usage.TotalTokens), + }, + SystemFingerprint: "", + } + + b, err := json.Marshal(chatCompletionResponse) + if err != nil { + return fmt.Errorf("failed to marshal chat completion response: %w", err) + } + + resp.Body = io.NopCloser(bytes.NewReader(b)) + resp.ContentLength = int64(len(b)) + resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(b))) return nil } From dfdda13ab76a198c653761bf569c3316edb5a0f6 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Wed, 26 Mar 2025 12:45:23 -0400 Subject: [PATCH 6/8] Actually use proxy when computer use is set Signed-off-by: Donnie Adams --- openai-model-provider/main.go | 30 ++++++++++++++-------------- openai-model-provider/proxy/proxy.go | 1 + 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/openai-model-provider/main.go b/openai-model-provider/main.go index 6ffff9583..e5ce096f7 100644 --- a/openai-model-provider/main.go +++ b/openai-model-provider/main.go @@ -38,16 +38,11 @@ func main() { BaseURL: "https://api.openai.com/v1", RewriteModelsFn: proxy.DefaultRewriteModelsResponse, Name: "OpenAI", - CustomPathHandleFuncs: map[string]http.HandlerFunc{ - "/v1/": translateResponsesAPI(apiKey), - }, } - openaiProxy := openaiproxy.NewServer(cfg) - reverseProxy := &httputil.ReverseProxy{ - Director: openaiProxy.Openaiv1ProxyRedirect, + cfg.CustomPathHandleFuncs = map[string]http.HandlerFunc{ + "/v1/": translateResponsesAPI(apiKey, openaiproxy.NewServer(cfg)), } - cfg.CustomPathHandleFuncs["/v1/"] = reverseProxy.ServeHTTP if len(os.Args) > 1 && os.Args[1] == "validate" { if err := cfg.Validate("/tools/openai-model-provider/validate"); err != nil { @@ -62,14 +57,16 @@ func main() { } type responsesRequestTranslator struct { + openAIProxy *openaiproxy.Server apiKey string wasTranslated bool streaming bool } -func translateResponsesAPI(apiKey string) func(rw http.ResponseWriter, req *http.Request) { +func translateResponsesAPI(apiKey string, openAIProxy *openaiproxy.Server) func(rw http.ResponseWriter, req *http.Request) { + fmt.Println("Translating responses API request") return func(rw http.ResponseWriter, req *http.Request) { - r := &responsesRequestTranslator{apiKey: apiKey} + r := &responsesRequestTranslator{apiKey: apiKey, openAIProxy: openAIProxy} (&httputil.ReverseProxy{ Director: r.openaiProxyWithComputerUse, ModifyResponse: r.modifyResponsesAPIResponse, @@ -86,12 +83,15 @@ func (r *responsesRequestTranslator) openaiProxyWithComputerUse(req *http.Reques if r.wasTranslated { req.ContentLength = contentLength req.Header.Set("Content-Length", fmt.Sprintf("%d", contentLength)) + } else { + r.openAIProxy.Openaiv1ProxyRedirect(req) } req.Header.Set("Authorization", "Bearer "+r.apiKey) } func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, int64) { + fmt.Fprintf(os.Stdout, "REWRITING REQUEST BODY: %s\n", path) if body == nil || path != proxy.ChatCompletionsPath { // Not a chat completion request, just return the original body and path. return body, path, 0 @@ -100,14 +100,14 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string bodyBytes, err := io.ReadAll(body) if err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stderr, "Failed to read request body: %v\n", err) + fmt.Fprintf(os.Stdout, "Failed to read request body: %v\n", err) return body, path, 0 } var chatCompletionRequest gopenai.ChatCompletionRequest if err := json.Unmarshal(bodyBytes, &chatCompletionRequest); err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stderr, "Failed to unmarshal chat completion request: %v\n", err) + fmt.Fprintf(os.Stdout, "Failed to unmarshal chat completion request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } if !strings.HasPrefix(chatCompletionRequest.Model, "computer-use-") { @@ -142,7 +142,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string } default: // Best effort log and move on. - fmt.Fprintf(os.Stderr, "Unsupported response format type: %v\n", chatCompletionRequest.ResponseFormat.Type) + fmt.Fprintf(os.Stdout, "Unsupported response format type: %v\n", chatCompletionRequest.ResponseFormat.Type) } } // Translate the initial system message to instructions @@ -174,7 +174,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string )) default: // Best effort log and move on. - fmt.Fprintf(os.Stderr, "Unsupported message role: %v\n", message.Role) + fmt.Fprintf(os.Stdout, "Unsupported message role: %v\n", message.Role) } } // Translate the tools to tool union params @@ -234,7 +234,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string responsesRequestBytes, err := json.Marshal(responsesRequest) if err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stderr, "Failed to marshal responses request: %v\n", err) + fmt.Fprintf(os.Stdout, "Failed to marshal responses request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } @@ -243,7 +243,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string responsesRequestBytes, err = sjson.SetBytes(responsesRequestBytes, "stream", true) if err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stderr, "Failed to set stream in responses request: %v\n", err) + fmt.Fprintf(os.Stdout, "Failed to set stream in responses request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } } diff --git a/openai-model-provider/proxy/proxy.go b/openai-model-provider/proxy/proxy.go index 18a42ba06..b6397c217 100644 --- a/openai-model-provider/proxy/proxy.go +++ b/openai-model-provider/proxy/proxy.go @@ -131,6 +131,7 @@ func (s *server) healthz(w http.ResponseWriter, _ *http.Request) { } func (s *server) proxyDirector(req *http.Request) { + fmt.Println("Proxying request to OpenAI the lame way") req.URL.Scheme = s.cfg.URL.Scheme req.URL.Host = s.cfg.URL.Host req.URL.Path = s.cfg.URL.JoinPath(strings.TrimPrefix(req.URL.Path, "/v1")).Path // join baseURL with request path - /v1 must be part of baseURL if it's needed From db621afa973d93bee190d78e6d11bb852f250443 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 27 Mar 2025 10:48:36 -0400 Subject: [PATCH 7/8] First working non-streaming solution Signed-off-by: Donnie Adams --- openai-model-provider/main.go | 41 +++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/openai-model-provider/main.go b/openai-model-provider/main.go index e5ce096f7..1e9c21667 100644 --- a/openai-model-provider/main.go +++ b/openai-model-provider/main.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "compress/gzip" "encoding/json" "fmt" "io" @@ -91,8 +92,8 @@ func (r *responsesRequestTranslator) openaiProxyWithComputerUse(req *http.Reques } func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string) (io.ReadCloser, string, int64) { - fmt.Fprintf(os.Stdout, "REWRITING REQUEST BODY: %s\n", path) if body == nil || path != proxy.ChatCompletionsPath { + fmt.Fprintf(os.Stderr, "Not a chat completion request, just returning original body and path\n") // Not a chat completion request, just return the original body and path. return body, path, 0 } @@ -100,14 +101,15 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string bodyBytes, err := io.ReadAll(body) if err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stdout, "Failed to read request body: %v\n", err) + fmt.Fprintf(os.Stderr, "Failed to read request body: %v\n", err) return body, path, 0 } + _ = body.Close() var chatCompletionRequest gopenai.ChatCompletionRequest if err := json.Unmarshal(bodyBytes, &chatCompletionRequest); err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stdout, "Failed to unmarshal chat completion request: %v\n", err) + fmt.Fprintf(os.Stderr, "Failed to unmarshal chat completion request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } if !strings.HasPrefix(chatCompletionRequest.Model, "computer-use-") { @@ -142,13 +144,13 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string } default: // Best effort log and move on. - fmt.Fprintf(os.Stdout, "Unsupported response format type: %v\n", chatCompletionRequest.ResponseFormat.Type) + fmt.Fprintf(os.Stderr, "Unsupported response format type: %v\n", chatCompletionRequest.ResponseFormat.Type) } } // Translate the initial system message to instructions if len(chatCompletionRequest.Messages) > 0 && (chatCompletionRequest.Messages[0].Role == gopenai.ChatMessageRoleSystem || chatCompletionRequest.Messages[0].Role == "developer") { instructions = chatCompletionRequest.Messages[0].Content - chatCompletionRequest.Messages = chatCompletionRequest.Messages[1:] + //chatCompletionRequest.Messages = chatCompletionRequest.Messages[1:] } // Translate the messages to input items inputItems = make([]responses.ResponseInputItemUnionParam, 0, len(chatCompletionRequest.Messages)) @@ -167,14 +169,14 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string message.ToolCallID, message.Content, )) - case message.Role == gopenai.ChatMessageRoleUser || message.Role == gopenai.ChatMessageRoleAssistant: + case message.Role == gopenai.ChatMessageRoleUser || message.Role == gopenai.ChatMessageRoleAssistant || message.Role == gopenai.ChatMessageRoleSystem: inputItems = append(inputItems, responses.ResponseInputItemParamOfMessage( message.Content, responses.EasyInputMessageRole(message.Role), )) default: // Best effort log and move on. - fmt.Fprintf(os.Stdout, "Unsupported message role: %v\n", message.Role) + fmt.Fprintf(os.Stderr, "Unsupported message role: %v\n", message.Role) } } // Translate the tools to tool union params @@ -184,7 +186,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string tools = append(tools, responses.ToolParamOfFunction( tool.Function.Name, parameters, - true, + false, )) } // Translate the chat completion request to a responses API request @@ -231,10 +233,10 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string } // Marshal the responses request to JSON - responsesRequestBytes, err := json.Marshal(responsesRequest) + responsesRequestBytes, err := json.MarshalIndent(responsesRequest, "", " ") if err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stdout, "Failed to marshal responses request: %v\n", err) + fmt.Fprintf(os.Stderr, "Failed to marshal responses request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } @@ -243,7 +245,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string responsesRequestBytes, err = sjson.SetBytes(responsesRequestBytes, "stream", true) if err != nil { // Best effort, just return the original body and path on error. - fmt.Fprintf(os.Stdout, "Failed to set stream in responses request: %v\n", err) + fmt.Fprintf(os.Stderr, "Failed to set stream in responses request: %v\n", err) return io.NopCloser(bytes.NewBuffer(bodyBytes)), path, 0 } } @@ -254,7 +256,7 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string } func (r *responsesRequestTranslator) modifyResponsesAPIResponse(resp *http.Response) error { - if r.wasTranslated || resp.StatusCode != http.StatusOK { + if !r.wasTranslated || resp.StatusCode != http.StatusOK { return nil } @@ -267,8 +269,19 @@ func (r *responsesRequestTranslator) modifyResponsesAPIResponse(resp *http.Respo func handleNonStreamingResponse(resp *http.Response) error { var responsesResponse responses.Response - if err := json.NewDecoder(resp.Body).Decode(&responsesResponse); err != nil { - return fmt.Errorf("failed to decode chat completion response: %w", err) + var body io.Reader = resp.Body + if resp.Header.Get("Content-Encoding") == "gzip" { + gzReader, err := gzip.NewReader(resp.Body) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzReader.Close() + resp.Header.Del("Content-Encoding") + body = gzReader + } + + if err := json.NewDecoder(body).Decode(&responsesResponse); err != nil { + return fmt.Errorf("failed to decode responses API response: %w", err) } choices := make([]gopenai.ChatCompletionChoice, 0, len(responsesResponse.Output)) From 2e697f73cc11fefb27d2c3c66dbbedb5d73ccfa0 Mon Sep 17 00:00:00 2001 From: Donnie Adams Date: Thu, 27 Mar 2025 12:34:39 -0400 Subject: [PATCH 8/8] Use some builtins Signed-off-by: Donnie Adams --- openai-model-provider/main.go | 57 +++++++++++------------------------ 1 file changed, 18 insertions(+), 39 deletions(-) diff --git a/openai-model-provider/main.go b/openai-model-provider/main.go index 1e9c21667..98b42a4f6 100644 --- a/openai-model-provider/main.go +++ b/openai-model-provider/main.go @@ -14,6 +14,7 @@ import ( gopenai "github.com/gptscript-ai/chat-completion-client" "github.com/obot-platform/tools/openai-model-provider/openaiproxy" "github.com/obot-platform/tools/openai-model-provider/proxy" + "github.com/openai/openai-go" "github.com/openai/openai-go/packages/param" "github.com/openai/openai-go/responses" "github.com/openai/openai-go/shared" @@ -118,10 +119,9 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string } var ( - text responses.ResponseTextConfigParam - inputItems []responses.ResponseInputItemUnionParam - tools []responses.ToolUnionParam - instructions string + text responses.ResponseTextConfigParam + inputItems []responses.ResponseInputItemUnionParam + tools []responses.ToolUnionParam ) // Translate the response format if chatCompletionRequest.ResponseFormat != nil { @@ -147,11 +147,6 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string fmt.Fprintf(os.Stderr, "Unsupported response format type: %v\n", chatCompletionRequest.ResponseFormat.Type) } } - // Translate the initial system message to instructions - if len(chatCompletionRequest.Messages) > 0 && (chatCompletionRequest.Messages[0].Role == gopenai.ChatMessageRoleSystem || chatCompletionRequest.Messages[0].Role == "developer") { - instructions = chatCompletionRequest.Messages[0].Content - //chatCompletionRequest.Messages = chatCompletionRequest.Messages[1:] - } // Translate the messages to input items inputItems = make([]responses.ResponseInputItemUnionParam, 0, len(chatCompletionRequest.Messages)) for _, message := range chatCompletionRequest.Messages { @@ -194,36 +189,20 @@ func (r *responsesRequestTranslator) rewriteBody(body io.ReadCloser, path string Input: responses.ResponseNewParamsInputUnion{ OfInputItemList: inputItems, }, - Model: shared.ResponsesModel(chatCompletionRequest.Model), - Instructions: param.Opt[string]{ - Value: instructions, - }, - MaxOutputTokens: param.Opt[int64]{ - Value: int64(chatCompletionRequest.MaxTokens), - }, - ParallelToolCalls: param.Opt[bool]{ - Value: true, - }, - PreviousResponseID: param.Opt[string]{ - Value: "", - }, - Store: param.Opt[bool]{ - Value: false, - }, - Temperature: param.Opt[float64]{ - Value: float64(*chatCompletionRequest.Temperature), - }, - TopP: param.Opt[float64]{ - Value: float64(chatCompletionRequest.TopP), - }, - User: param.Opt[string]{ - Value: chatCompletionRequest.User, - }, - Reasoning: shared.ReasoningParam{}, - Include: nil, - Metadata: nil, - Truncation: responses.ResponseNewParamsTruncationDisabled, - Text: text, + Model: shared.ResponsesModel(chatCompletionRequest.Model), + Instructions: openai.String(""), + MaxOutputTokens: openai.Int(int64(chatCompletionRequest.MaxTokens)), + ParallelToolCalls: openai.Bool(true), + PreviousResponseID: openai.String(""), + Store: openai.Bool(false), + Temperature: openai.Float(float64(*chatCompletionRequest.Temperature)), + TopP: openai.Float(float64(chatCompletionRequest.TopP)), + User: openai.String(chatCompletionRequest.User), + Reasoning: shared.ReasoningParam{}, + Include: nil, + Metadata: nil, + Truncation: responses.ResponseNewParamsTruncationDisabled, + Text: text, ToolChoice: responses.ResponseNewParamsToolChoiceUnion{ OfToolChoiceMode: param.Opt[responses.ToolChoiceOptions]{ Value: responses.ToolChoiceOptionsAuto,