lint: fix linter warnings reported by golangci-lint (#522)

- Fix #519
This commit is contained in:
Simon Klee
2023-11-07 10:23:06 +01:00
committed by GitHub
parent 9e0232f941
commit 0664105387
23 changed files with 425 additions and 431 deletions

View File

@@ -11,7 +11,7 @@ import (
"testing"
"time"
. "github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/internal/test/checks"
"github.com/sashabaranov/go-openai/jsonschema"
)
@@ -21,49 +21,47 @@ const (
xCustomHeaderValue = "test"
)
var (
rateLimitHeaders = map[string]any{
"x-ratelimit-limit-requests": 60,
"x-ratelimit-limit-tokens": 150000,
"x-ratelimit-remaining-requests": 59,
"x-ratelimit-remaining-tokens": 149984,
"x-ratelimit-reset-requests": "1s",
"x-ratelimit-reset-tokens": "6m0s",
}
)
var rateLimitHeaders = map[string]any{
"x-ratelimit-limit-requests": 60,
"x-ratelimit-limit-tokens": 150000,
"x-ratelimit-remaining-requests": 59,
"x-ratelimit-remaining-tokens": 149984,
"x-ratelimit-reset-requests": "1s",
"x-ratelimit-reset-tokens": "6m0s",
}
func TestChatCompletionsWrongModel(t *testing.T) {
config := DefaultConfig("whatever")
config := openai.DefaultConfig("whatever")
config.BaseURL = "http://localhost/v1"
client := NewClientWithConfig(config)
client := openai.NewClientWithConfig(config)
ctx := context.Background()
req := ChatCompletionRequest{
req := openai.ChatCompletionRequest{
MaxTokens: 5,
Model: "ada",
Messages: []ChatCompletionMessage{
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
}
_, err := client.CreateChatCompletion(ctx, req)
msg := fmt.Sprintf("CreateChatCompletion should return wrong model error, returned: %s", err)
checks.ErrorIs(t, err, ErrChatCompletionInvalidModel, msg)
checks.ErrorIs(t, err, openai.ErrChatCompletionInvalidModel, msg)
}
func TestChatCompletionsWithStream(t *testing.T) {
config := DefaultConfig("whatever")
config := openai.DefaultConfig("whatever")
config.BaseURL = "http://localhost/v1"
client := NewClientWithConfig(config)
client := openai.NewClientWithConfig(config)
ctx := context.Background()
req := ChatCompletionRequest{
req := openai.ChatCompletionRequest{
Stream: true,
}
_, err := client.CreateChatCompletion(ctx, req)
checks.ErrorIs(t, err, ErrChatCompletionStreamNotSupported, "unexpected error")
checks.ErrorIs(t, err, openai.ErrChatCompletionStreamNotSupported, "unexpected error")
}
// TestCompletions Tests the completions endpoint of the API using the mocked server.
@@ -71,12 +69,12 @@ func TestChatCompletions(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
_, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
@@ -89,12 +87,12 @@ func TestChatCompletionsWithHeaders(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
resp, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
resp, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
@@ -113,12 +111,12 @@ func TestChatCompletionsWithRateLimitHeaders(t *testing.T) {
client, server, teardown := setupOpenAITestServer()
defer teardown()
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
resp, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
resp, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
@@ -150,16 +148,16 @@ func TestChatCompletionsFunctions(t *testing.T) {
t.Run("bytes", func(t *testing.T) {
//nolint:lll
msg := json.RawMessage(`{"properties":{"count":{"type":"integer","description":"total number of words in sentence"},"words":{"items":{"type":"string"},"type":"array","description":"list of words in sentence"}},"type":"object","required":["count","words"]}`)
_, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo0613,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo0613,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Functions: []FunctionDefinition{{
Functions: []openai.FunctionDefinition{{
Name: "test",
Parameters: &msg,
}},
@@ -175,16 +173,16 @@ func TestChatCompletionsFunctions(t *testing.T) {
Count: 2,
Words: []string{"hello", "world"},
}
_, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo0613,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo0613,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Functions: []FunctionDefinition{{
Functions: []openai.FunctionDefinition{{
Name: "test",
Parameters: &msg,
}},
@@ -192,16 +190,16 @@ func TestChatCompletionsFunctions(t *testing.T) {
checks.NoError(t, err, "CreateChatCompletion with functions error")
})
t.Run("JSONSchemaDefinition", func(t *testing.T) {
_, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo0613,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo0613,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Functions: []FunctionDefinition{{
Functions: []openai.FunctionDefinition{{
Name: "test",
Parameters: &jsonschema.Definition{
Type: jsonschema.Object,
@@ -229,16 +227,16 @@ func TestChatCompletionsFunctions(t *testing.T) {
})
t.Run("JSONSchemaDefinitionWithFunctionDefine", func(t *testing.T) {
// this is a compatibility check
_, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo0613,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo0613,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
Functions: []FunctionDefine{{
Functions: []openai.FunctionDefine{{
Name: "test",
Parameters: &jsonschema.Definition{
Type: jsonschema.Object,
@@ -271,12 +269,12 @@ func TestAzureChatCompletions(t *testing.T) {
defer teardown()
server.RegisterHandler("/openai/deployments/*", handleChatCompletionEndpoint)
_, err := client.CreateChatCompletion(context.Background(), ChatCompletionRequest{
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
MaxTokens: 5,
Model: GPT3Dot5Turbo,
Messages: []ChatCompletionMessage{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: ChatMessageRoleUser,
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
@@ -293,12 +291,12 @@ func handleChatCompletionEndpoint(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
var completionReq ChatCompletionRequest
var completionReq openai.ChatCompletionRequest
if completionReq, err = getChatCompletionBody(r); err != nil {
http.Error(w, "could not read request", http.StatusInternalServerError)
return
}
res := ChatCompletionResponse{
res := openai.ChatCompletionResponse{
ID: strconv.Itoa(int(time.Now().Unix())),
Object: "test-object",
Created: time.Now().Unix(),
@@ -323,11 +321,11 @@ func handleChatCompletionEndpoint(w http.ResponseWriter, r *http.Request) {
return
}
res.Choices = append(res.Choices, ChatCompletionChoice{
Message: ChatCompletionMessage{
Role: ChatMessageRoleFunction,
res.Choices = append(res.Choices, openai.ChatCompletionChoice{
Message: openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleFunction,
// this is valid json so it should be fine
FunctionCall: &FunctionCall{
FunctionCall: &openai.FunctionCall{
Name: completionReq.Functions[0].Name,
Arguments: string(fcb),
},
@@ -339,9 +337,9 @@ func handleChatCompletionEndpoint(w http.ResponseWriter, r *http.Request) {
// generate a random string of length completionReq.Length
completionStr := strings.Repeat("a", completionReq.MaxTokens)
res.Choices = append(res.Choices, ChatCompletionChoice{
Message: ChatCompletionMessage{
Role: ChatMessageRoleAssistant,
res.Choices = append(res.Choices, openai.ChatCompletionChoice{
Message: openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleAssistant,
Content: completionStr,
},
Index: i,
@@ -349,7 +347,7 @@ func handleChatCompletionEndpoint(w http.ResponseWriter, r *http.Request) {
}
inputTokens := numTokens(completionReq.Messages[0].Content) * n
completionTokens := completionReq.MaxTokens * n
res.Usage = Usage{
res.Usage = openai.Usage{
PromptTokens: inputTokens,
CompletionTokens: completionTokens,
TotalTokens: inputTokens + completionTokens,
@@ -368,23 +366,23 @@ func handleChatCompletionEndpoint(w http.ResponseWriter, r *http.Request) {
}
// getChatCompletionBody Returns the body of the request to create a completion.
func getChatCompletionBody(r *http.Request) (ChatCompletionRequest, error) {
completion := ChatCompletionRequest{}
func getChatCompletionBody(r *http.Request) (openai.ChatCompletionRequest, error) {
completion := openai.ChatCompletionRequest{}
// read the request body
reqBody, err := io.ReadAll(r.Body)
if err != nil {
return ChatCompletionRequest{}, err
return openai.ChatCompletionRequest{}, err
}
err = json.Unmarshal(reqBody, &completion)
if err != nil {
return ChatCompletionRequest{}, err
return openai.ChatCompletionRequest{}, err
}
return completion, nil
}
func TestFinishReason(t *testing.T) {
c := &ChatCompletionChoice{
FinishReason: FinishReasonNull,
c := &openai.ChatCompletionChoice{
FinishReason: openai.FinishReasonNull,
}
resBytes, _ := json.Marshal(c)
if !strings.Contains(string(resBytes), `"finish_reason":null`) {
@@ -398,11 +396,11 @@ func TestFinishReason(t *testing.T) {
t.Error("null should not be quoted")
}
otherReasons := []FinishReason{
FinishReasonStop,
FinishReasonLength,
FinishReasonFunctionCall,
FinishReasonContentFilter,
otherReasons := []openai.FinishReason{
openai.FinishReasonStop,
openai.FinishReasonLength,
openai.FinishReasonFunctionCall,
openai.FinishReasonContentFilter,
}
for _, r := range otherReasons {
c.FinishReason = r