Files
go-openai/chat_stream.go
2023-04-01 20:06:22 +04:00

69 lines
2.0 KiB
Go

package openai
import (
"bufio"
"context"
)
type ChatCompletionStreamChoiceDelta struct {
Content string `json:"content"`
}
type ChatCompletionStreamChoice struct {
Index int `json:"index"`
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
FinishReason string `json:"finish_reason"`
}
type ChatCompletionStreamResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []ChatCompletionStreamChoice `json:"choices"`
Usage Usage `json:"usage"`
}
// ChatCompletionStream
// Note: Perhaps it is more elegant to abstract Stream using generics.
type ChatCompletionStream struct {
*streamReader[ChatCompletionStreamResponse]
}
// CreateChatCompletionStream — API call to create a chat completion w/ streaming
// support. It sets whether to stream back partial progress. If set, tokens will be
// sent as data-only server-sent events as they become available, with the
// stream terminated by a data: [DONE] message.
func (c *Client) CreateChatCompletionStream(
ctx context.Context,
request ChatCompletionRequest,
) (stream *ChatCompletionStream, err error) {
urlSuffix := "/chat/completions"
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
err = ErrChatCompletionInvalidModel
return
}
request.Stream = true
req, err := c.newStreamRequest(ctx, "POST", urlSuffix, request)
if err != nil {
return
}
resp, err := c.config.HTTPClient.Do(req) //nolint:bodyclose // body is closed in stream.Close()
if err != nil {
return
}
stream = &ChatCompletionStream{
streamReader: &streamReader[ChatCompletionStreamResponse]{
emptyMessagesLimit: c.config.EmptyMessagesLimit,
reader: bufio.NewReader(resp.Body),
response: resp,
errAccumulator: newErrorAccumulator(),
unmarshaler: &jsonUnmarshaler{},
},
}
return
}