fix: chat stream resp error (#259)

This commit is contained in:
Liu Shuang
2023-04-19 20:05:00 +08:00
committed by GitHub
parent 3b10c032b6
commit d6ab1b3a4f
8 changed files with 146 additions and 33 deletions

View File

@@ -1,16 +1,16 @@
package openai_test
import (
. "github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/internal/test"
"github.com/sashabaranov/go-openai/internal/test/checks"
"context"
"errors"
"io"
"net/http"
"net/http/httptest"
"testing"
. "github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/internal/test"
"github.com/sashabaranov/go-openai/internal/test/checks"
)
func TestCompletionsStreamWrongModel(t *testing.T) {
@@ -171,6 +171,52 @@ func TestCreateCompletionStreamError(t *testing.T) {
t.Logf("%+v\n", apiErr)
}
func TestCreateCompletionStreamRateLimitError(t *testing.T) {
server := test.NewTestServer()
server.RegisterHandler("/v1/completions", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(429)
// Send test responses
dataBytes := []byte(`{"error":{` +
`"message": "You are sending requests too quickly.",` +
`"type":"rate_limit_reached",` +
`"param":null,` +
`"code":"rate_limit_reached"}}`)
_, err := w.Write(dataBytes)
checks.NoError(t, err, "Write error")
})
ts := server.OpenAITestServer()
ts.Start()
defer ts.Close()
// Client portion of the test
config := DefaultConfig(test.GetTestToken())
config.BaseURL = ts.URL + "/v1"
config.HTTPClient.Transport = &tokenRoundTripper{
test.GetTestToken(),
http.DefaultTransport,
}
client := NewClientWithConfig(config)
ctx := context.Background()
request := CompletionRequest{
MaxTokens: 5,
Model: GPT3Ada,
Prompt: "Hello!",
Stream: true,
}
var apiErr *APIError
_, err := client.CreateCompletionStream(ctx, request)
if !errors.As(err, &apiErr) {
t.Errorf("TestCreateCompletionStreamRateLimitError did not return APIError")
}
t.Logf("%+v\n", apiErr)
}
// A "tokenRoundTripper" is a struct that implements the RoundTripper
// interface, specifically to handle the authentication token by adding a token
// to the request header. We need this because the API requires that each