package openai import ( "bufio" "context" utils "github.com/sashabaranov/go-openai/internal" ) type ChatCompletionStreamChoiceDelta struct { Content string `json:"content,omitempty"` Role string `json:"role,omitempty"` } type ChatCompletionStreamChoice struct { Index int `json:"index"` Delta ChatCompletionStreamChoiceDelta `json:"delta"` FinishReason string `json:"finish_reason"` } type ChatCompletionStreamResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` Choices []ChatCompletionStreamChoice `json:"choices"` } // ChatCompletionStream // Note: Perhaps it is more elegant to abstract Stream using generics. type ChatCompletionStream struct { *streamReader[ChatCompletionStreamResponse] } // CreateChatCompletionStream — API call to create a chat completion w/ streaming // support. It sets whether to stream back partial progress. If set, tokens will be // sent as data-only server-sent events as they become available, with the // stream terminated by a data: [DONE] message. func (c *Client) CreateChatCompletionStream( ctx context.Context, request ChatCompletionRequest, ) (stream *ChatCompletionStream, err error) { urlSuffix := chatCompletionsSuffix if !checkEndpointSupportsModel(urlSuffix, request.Model) { err = ErrChatCompletionInvalidModel return } request.Stream = true req, err := c.newStreamRequest(ctx, "POST", urlSuffix, request, request.Model) if err != nil { return } resp, err := c.config.HTTPClient.Do(req) //nolint:bodyclose // body is closed in stream.Close() if err != nil { return } if isFailureStatusCode(resp) { return nil, c.handleErrorResp(resp) } stream = &ChatCompletionStream{ streamReader: &streamReader[ChatCompletionStreamResponse]{ emptyMessagesLimit: c.config.EmptyMessagesLimit, reader: bufio.NewReader(resp.Body), response: resp, errAccumulator: utils.NewErrorAccumulator(), unmarshaler: &utils.JSONUnmarshaler{}, }, } return }