summaryrefslogtreecommitdiff
path: root/internal/llm/openai_request_test.go
diff options
context:
space:
mode:
authorPaul Buetow <paul@buetow.org>2025-10-02 08:41:45 +0300
committerPaul Buetow <paul@buetow.org>2025-10-02 08:41:45 +0300
commite36a5446bc62842ae3b3e165f66fecb7285a8c6a (patch)
treed3f9f7a66d8b4e5fdb13903722580a8f90eae5d1 /internal/llm/openai_request_test.go
parentf14eb9199f4e1aee49594e590c08996244bb77b3 (diff)
feat: add OpenRouter providerv0.15.0
Diffstat (limited to 'internal/llm/openai_request_test.go')
-rw-r--r--internal/llm/openai_request_test.go6
1 files changed, 3 insertions, 3 deletions
diff --git a/internal/llm/openai_request_test.go b/internal/llm/openai_request_test.go
index 001e3b7..d053031 100644
--- a/internal/llm/openai_request_test.go
+++ b/internal/llm/openai_request_test.go
@@ -9,13 +9,13 @@ func TestBuildOAChatRequest_MaxTokensKeyByModel(t *testing.T) {
msgs := []Message{{Role: "user", Content: "hi"}}
mt := 123
// Legacy model: use max_tokens
- r1 := buildOAChatRequest(Options{Model: "gpt-4.1", MaxTokens: mt}, msgs, nil, false)
+ r1 := buildOAChatRequest(Options{Model: "gpt-4.1", MaxTokens: mt}, msgs, nil, false, "llm/test ")
b1, _ := json.Marshal(r1)
if !contains(string(b1), "max_tokens") || contains(string(b1), "max_completion_tokens") {
t.Fatalf("expected max_tokens only, got %s", string(b1))
}
// gpt-5 family: use max_completion_tokens
- r2 := buildOAChatRequest(Options{Model: "gpt-5.0-preview", MaxTokens: mt}, msgs, nil, false)
+ r2 := buildOAChatRequest(Options{Model: "gpt-5.0-preview", MaxTokens: mt}, msgs, nil, false, "llm/test ")
b2, _ := json.Marshal(r2)
if !contains(string(b2), "max_completion_tokens") || contains(string(b2), "max_tokens\":") {
t.Fatalf("expected max_completion_tokens only, got %s", string(b2))
@@ -25,7 +25,7 @@ func TestBuildOAChatRequest_MaxTokensKeyByModel(t *testing.T) {
func TestBuildOAChatRequest_TemperatureForcedForGpt5(t *testing.T) {
msgs := []Message{{Role: "user", Content: "hi"}}
// Explicit temp 0.2 → should be forced to 1.0 for gpt-5
- r := buildOAChatRequest(Options{Model: "gpt-5.0", Temperature: 0.2, MaxTokens: 50}, msgs, nil, false)
+ r := buildOAChatRequest(Options{Model: "gpt-5.0", Temperature: 0.2, MaxTokens: 50}, msgs, nil, false, "llm/test ")
b, _ := json.Marshal(r)
if !contains(string(b), "\"temperature\":1") {
t.Fatalf("expected forced temperature 1.0 for gpt-5, got %s", string(b))