openaichat

package
v0.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 8, 2026 License: Apache-2.0 Imports: 26 Imported by: 0

Documentation

Overview

Package openaichat implements a client for the OpenAI Chat Completion API.

It is described at https://platform.openai.com/docs/api-reference/

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

func ProcessStream

func ProcessStream(chunks iter.Seq[ChatStreamChunkResponse]) (iter.Seq[genai.Reply], func() (genai.Usage, [][]genai.Logprob, error))

ProcessStream converts the raw packets from the streaming API into Reply fragments.

func Scoreboard

func Scoreboard() scoreboard.Score

Scoreboard for OpenAI.

Types

type Annotation

type Annotation struct {
	Type        string `json:"type,omitzero"` // "url_citation"
	URLCitation struct {
		StartIndex int64  `json:"start_index,omitzero"`
		EndIndex   int64  `json:"end_index,omitzero"`
		Title      string `json:"title,omitzero"`
		URL        string `json:"url,omitzero"` // Has a ?utm_source=openai suffix.
	} `json:"url_citation,omitzero"`
}

Annotation is a provider-specific annotation.

type Background

type Background string

Background is only supported on gpt-image-1.

const (
	BackgroundAuto        Background = "auto"
	BackgroundTransparent Background = "transparent"
	BackgroundOpaque      Background = "opaque"
)

Background mode values.

type Batch

type Batch struct {
	CancelledAt      base.Time `json:"cancelled_at"`
	CancellingAt     base.Time `json:"cancelling_at"`
	CompletedAt      base.Time `json:"completed_at"`
	CompletionWindow string    `json:"completion_window"` // "24h"
	CreatedAt        base.Time `json:"created_at"`
	Endpoint         string    `json:"endpoint"`      // Same as BatchRequest.Endpoint
	ErrorFileID      string    `json:"error_file_id"` // File ID containing the outputs of requests with errors.
	Errors           struct {
		Data []struct {
			Code    string `json:"code"`
			Line    int64  `json:"line"`
			Message string `json:"message"`
			Param   string `json:"param"`
		} `json:"data"`
	} `json:"errors"`
	ExpiredAt     base.Time         `json:"expired_at"`
	ExpiresAt     base.Time         `json:"expires_at"`
	FailedAt      base.Time         `json:"failed_at"`
	FinalizingAt  base.Time         `json:"finalizing_at"`
	ID            string            `json:"id"`
	InProgressAt  base.Time         `json:"in_progress_at"`
	InputFileID   string            `json:"input_file_id"` // Input data
	Metadata      map[string]string `json:"metadata"`
	Model         string            `json:"model,omitzero"`
	Object        string            `json:"object"`         // "batch"
	OutputFileID  string            `json:"output_file_id"` // Output data
	RequestCounts struct {
		Completed int64 `json:"completed"`
		Failed    int64 `json:"failed"`
		Total     int64 `json:"total"`
	} `json:"request_counts"`
	Status string     `json:"status"`         // "completed", "in_progress", "validating", "finalizing"
	Usage  BatchUsage `json:"usage,omitzero"` // Token usage for the batch
}

Batch is documented at https://platform.openai.com/docs/api-reference/batch/object

type BatchRequest

type BatchRequest struct {
	CompletionWindow string            `json:"completion_window"` // Must be "24h"
	Endpoint         string            `json:"endpoint"`          // One of /v1/responses, /v1/chat/completions, /v1/embeddings, /v1/completions
	InputFileID      string            `json:"input_file_id"`     // File must be JSONL
	Metadata         map[string]string `json:"metadata,omitzero"` // Maximum 16 keys of 64 chars, values max 512 chars
}

BatchRequest is documented at https://platform.openai.com/docs/api-reference/batch/create

type BatchRequestInput

type BatchRequestInput struct {
	CustomID string      `json:"custom_id"`
	Method   string      `json:"method"` // "POST"
	URL      string      `json:"url"`    // "/v1/chat/completions", "/v1/embeddings", "/v1/completions", "/v1/responses"
	Body     ChatRequest `json:"body"`
}

BatchRequestInput is documented at https://platform.openai.com/docs/api-reference/batch/request-input

type BatchRequestOutput

type BatchRequestOutput struct {
	CustomID string `json:"custom_id"`
	ID       string `json:"id"`
	Error    struct {
		Code    string `json:"code"`
		Message string `json:"message"`
	} `json:"error"`
	Response struct {
		StatusCode int          `json:"status_code"`
		RequestID  string       `json:"request_id"` // To use when contacting support
		Body       ChatResponse `json:"body"`
	} `json:"response"`
}

BatchRequestOutput is documented at https://platform.openai.com/docs/api-reference/batch/request-output

type BatchUsage

type BatchUsage struct {
	InputTokens        int64 `json:"input_tokens"`
	OutputTokens       int64 `json:"output_tokens"`
	TotalTokens        int64 `json:"total_tokens"`
	InputTokensDetails struct {
		CachedTokens int64 `json:"cached_tokens"`
	} `json:"input_tokens_details"`
	OutputTokensDetails struct {
		ReasoningTokens int64 `json:"reasoning_tokens"`
	} `json:"output_tokens_details"`
}

BatchUsage represents token usage information for a batch.

type ChatRequest

type ChatRequest struct {
	Model            string             `json:"model"`
	MaxTokens        int64              `json:"max_tokens,omitzero"` // Deprecated
	MaxChatTokens    int64              `json:"max_completion_tokens,omitzero"`
	Stream           bool               `json:"stream"`
	Messages         []Message          `json:"messages"`
	Seed             int64              `json:"seed,omitzero"`
	Temperature      float64            `json:"temperature,omitzero"` // [0, 2]
	Store            bool               `json:"store,omitzero"`
	ReasoningEffort  ReasoningEffort    `json:"reasoning_effort,omitzero"`
	Metadata         map[string]string  `json:"metadata,omitzero"`
	FrequencyPenalty float64            `json:"frequency_penalty,omitzero"` // [-2.0, 2.0]
	LogitBias        map[string]float64 `json:"logit_bias,omitzero"`
	// See https://cookbook.openai.com/examples/using_logprobs
	Logprobs    bool     `json:"logprobs,omitzero"`
	TopLogprobs int64    `json:"top_logprobs,omitzero"` // [0, 20]
	N           int64    `json:"n,omitzero"`            // Number of choices
	Modalities  []string `json:"modalities,omitzero"`   // text, audio
	Prediction  struct {
		Type    string `json:"type,omitzero"` // "content"
		Content []struct {
			Type string `json:"type,omitzero"` // "text"
			Text string `json:"text,omitzero"`
		} `json:"content,omitzero"`
	} `json:"prediction,omitzero"`
	Audio struct {
		// https://platform.openai.com/docs/guides/text-to-speech#voice-options
		Voice string `json:"voice,omitzero"` // "alloy", "ash", "ballad", "coral", "echo", "fable", "nova", "onyx", "sage", "shimmer"
		// https://platform.openai.com/docs/guides/text-to-speech#supported-output-formats
		Format string `json:"format,omitzero"` // "mp3", "wav", "flac", "opus", "pcm16", "aac"
	} `json:"audio,omitzero"`
	PresencePenalty float64 `json:"presence_penalty,omitzero"` // [-2.0, 2.0]
	ResponseFormat  struct {
		Type       string `json:"type,omitzero"` // "text", "json_object", "json_schema"
		JSONSchema struct {
			Description string             `json:"description,omitzero"`
			Name        string             `json:"name,omitzero"`
			Schema      *jsonschema.Schema `json:"schema,omitzero"`
			Strict      bool               `json:"strict,omitzero"`
		} `json:"json_schema,omitzero"`
	} `json:"response_format,omitzero"`
	ServiceTier   ServiceTier `json:"service_tier,omitzero"`
	Stop          []string    `json:"stop,omitzero"` // keywords to stop completion
	StreamOptions struct {
		IncludeUsage bool `json:"include_usage,omitzero"`
	} `json:"stream_options,omitzero"`
	TopP  float64 `json:"top_p,omitzero"` // [0, 1]
	Tools []Tool  `json:"tools,omitzero"`
	// Alternative when forcing a specific function. This can probably be achieved
	// by providing a single tool and ToolChoice == "required".
	// ToolChoice struct {
	// 	Type     string `json:"type,omitzero"` // "function"
	// 	Function struct {
	// 		Name string `json:"name,omitzero"`
	// 	} `json:"function,omitzero"`
	// } `json:"tool_choice,omitzero"`
	ToolChoice        string            `json:"tool_choice,omitzero"` // "none", "auto", "required"
	ParallelToolCalls bool              `json:"parallel_tool_calls,omitzero"`
	User              string            `json:"user,omitzero"`
	WebSearchOptions  *WebSearchOptions `json:"web_search_options,omitzero"`
}

ChatRequest is documented at https://platform.openai.com/docs/api-reference/chat/create

func (*ChatRequest) Init

func (c *ChatRequest) Init(msgs genai.Messages, model string, opts ...genai.GenOption) error

Init initializes the provider specific completion request with the generic completion request.

func (*ChatRequest) SetStream

func (c *ChatRequest) SetStream(stream bool)

SetStream sets the streaming mode.

type ChatResponse

type ChatResponse struct {
	Choices []struct {
		FinishReason FinishReason `json:"finish_reason"`
		Index        int64        `json:"index"`
		Message      Message      `json:"message"`
		Logprobs     Logprobs     `json:"logprobs"`
	} `json:"choices"`
	Created           base.Time `json:"created"`
	ID                string    `json:"id"`
	Model             string    `json:"model"`
	Object            string    `json:"object"`
	Usage             Usage     `json:"usage"`
	ServiceTier       string    `json:"service_tier"`
	SystemFingerprint string    `json:"system_fingerprint"`
}

ChatResponse is documented at https://platform.openai.com/docs/api-reference/chat/object

func (*ChatResponse) ToResult

func (c *ChatResponse) ToResult() (genai.Result, error)

ToResult converts the response to a genai.Result.

type ChatStreamChunkResponse

type ChatStreamChunkResponse struct {
	Choices []struct {
		Delta struct {
			Content     string       `json:"content"`
			Role        string       `json:"role"`
			Refusal     string       `json:"refusal"`
			ToolCalls   []ToolCall   `json:"tool_calls"`
			Annotations []Annotation `json:"annotations"`
		} `json:"delta"`
		FinishReason FinishReason `json:"finish_reason"`
		Index        int64        `json:"index"`
		Logprobs     Logprobs     `json:"logprobs"`
	} `json:"choices"`
	Created           base.Time `json:"created"`
	ID                string    `json:"id"`
	Model             string    `json:"model"`
	Object            string    `json:"object"` // "chat.completion.chunk"
	ServiceTier       string    `json:"service_tier"`
	SystemFingerprint string    `json:"system_fingerprint"`
	Usage             Usage     `json:"usage"`
	Obfuscation       string    `json:"obfuscation"`
}

ChatStreamChunkResponse is not documented?

type Client

type Client struct {
	base.NotImplemented
	// contains filtered or unexported fields
}

Client implements genai.Provider.

func New

func New(ctx context.Context, opts ...genai.ProviderOption) (*Client, error)

New creates a new client to talk to the OpenAI platform API.

If ProviderOptionAPIKey is not provided, it tries to load it from the OPENAI_API_KEY environment variable. If none is found, it will still return a client coupled with an base.ErrAPIKeyRequired error. Get your API key at https://platform.openai.com/settings/organization/api-keys

To use multiple models, create multiple clients. Use one of the model from https://platform.openai.com/docs/models

Documents

OpenAI supports many types of documents, listed at https://platform.openai.com/docs/assistants/tools/file-search#supported-files

Example (HTTP_record)
package main

import (
	"context"
	"fmt"
	"log"
	"net/http"
	"os"

	"github.com/maruel/genai"
	"github.com/maruel/genai/httprecord"
	"github.com/maruel/genai/providers/openaichat"
	"gopkg.in/dnaeon/go-vcr.v4/pkg/recorder"
)

func main() {
	// Example to do HTTP recording and playback for smoke testing.
	// The example recording is in testdata/example.yaml.
	var rr *recorder.Recorder
	defer func() {
		// In a smoke test, use t.Cleanup().
		if rr != nil {
			if err := rr.Stop(); err != nil {
				log.Printf("Failed saving recordings: %v", err)
			}
		}
	}()

	// Simple trick to force recording via an environment variable.
	mode := recorder.ModeRecordOnce
	if os.Getenv("RECORD") == "1" {
		mode = recorder.ModeRecordOnly
	}
	wrapper := func(h http.RoundTripper) http.RoundTripper {
		var err error
		rr, err = httprecord.New("testdata/example", h, recorder.WithMode(mode))
		if err != nil {
			log.Fatal(err)
		}
		return rr
	}
	// When playing back the smoke test, no API key is needed. Insert a fake API key.
	var opts []genai.ProviderOption
	if os.Getenv("OPENAI_API_KEY") == "" {
		opts = append(opts, genai.ProviderOptionAPIKey("<insert_api_key_here>"))
	}
	ctx := context.Background()
	c, err := openaichat.New(ctx, append([]genai.ProviderOption{genai.ProviderOptionTransportWrapper(wrapper)}, opts...)...)
	if err != nil {
		log.Fatal(err)
	}
	models, err := c.ListModels(ctx)
	if err != nil {
		log.Fatal(err)
	}
	if len(models) > 1 {
		fmt.Println("Found multiple models")
	}
}
Output:

Found multiple models

func (*Client) CacheAddRequest

func (c *Client) CacheAddRequest(ctx context.Context, msgs genai.Messages, name, displayName string, ttl time.Duration, opts ...genai.GenOption) (string, error)

CacheAddRequest adds a cache entry.

func (*Client) CacheDelete

func (c *Client) CacheDelete(ctx context.Context, name string) error

CacheDelete deletes a cache entry.

func (*Client) CacheList

func (c *Client) CacheList(ctx context.Context) ([]genai.CacheEntry, error)

CacheList lists cache entries.

func (*Client) Cancel

func (c *Client) Cancel(ctx context.Context, id genai.Job) error

Cancel cancels an in-progress batch. The batch will be in status cancelling for up to 10 minutes, before changing to cancelled, where it will have partial results (if any) available in the output file.

func (*Client) CancelRaw

func (c *Client) CancelRaw(ctx context.Context, id genai.Job) (Batch, error)

CancelRaw cancels a batch request.

func (*Client) Capabilities

func (c *Client) Capabilities() genai.ProviderCapabilities

Capabilities implements genai.Provider.

func (*Client) FileAdd

func (c *Client) FileAdd(ctx context.Context, filename string, r io.ReadSeeker) (string, error)

FileAdd uploads a file. The TTL is one month.

func (*Client) FileDel

func (c *Client) FileDel(ctx context.Context, id string) error

FileDel deletes a file.

func (*Client) FileGet

func (c *Client) FileGet(ctx context.Context, id string) (io.ReadCloser, error)

FileGet retrieves a file.

func (*Client) FilesListRaw

func (c *Client) FilesListRaw(ctx context.Context) ([]File, error)

FilesListRaw lists files.

func (*Client) GenAsync

func (c *Client) GenAsync(ctx context.Context, msgs genai.Messages, opts ...genai.GenOption) (genai.Job, error)

GenAsync implements genai.ProviderGenAsync.

It requests the providers' batch API and returns the job ID. It can take up to 24 hours to complete.

func (*Client) GenAsyncRaw

func (c *Client) GenAsyncRaw(ctx context.Context, b BatchRequest) (Batch, error)

GenAsyncRaw runs an asynchronous generation request.

func (*Client) GenStream

func (c *Client) GenStream(ctx context.Context, msgs genai.Messages, opts ...genai.GenOption) (iter.Seq[genai.Reply], func() (genai.Result, error))

GenStream implements genai.Provider.

func (*Client) GenStreamRaw

func (c *Client) GenStreamRaw(ctx context.Context, in *ChatRequest) (iter.Seq[ChatStreamChunkResponse], func() error)

GenStreamRaw provides access to the raw API.

func (*Client) GenSync

func (c *Client) GenSync(ctx context.Context, msgs genai.Messages, opts ...genai.GenOption) (genai.Result, error)

GenSync implements genai.Provider.

func (*Client) GenSyncRaw

func (c *Client) GenSyncRaw(ctx context.Context, in *ChatRequest, out *ChatResponse) error

GenSyncRaw provides access to the raw API.

func (*Client) HTTPClient

func (c *Client) HTTPClient() *http.Client

HTTPClient returns the HTTP client to fetch results (e.g. videos) generated by the provider.

func (*Client) ListModels

func (c *Client) ListModels(ctx context.Context) ([]genai.Model, error)

ListModels implements genai.Provider.

func (*Client) ModelID

func (c *Client) ModelID() string

ModelID implements genai.Provider.

It returns the selected model ID.

func (*Client) Name

func (c *Client) Name() string

Name implements genai.Provider.

It returns the name of the provider.

func (*Client) OutputModalities

func (c *Client) OutputModalities() genai.Modalities

OutputModalities implements genai.Provider.

It returns the output modalities, i.e. what kind of output the model will generate (text, audio, image, video, etc).

func (*Client) PokeResult

func (c *Client) PokeResult(ctx context.Context, id genai.Job) (genai.Result, error)

PokeResult implements genai.ProviderGenAsync.

It retrieves the result for a job ID.

func (*Client) PokeResultRaw

func (c *Client) PokeResultRaw(ctx context.Context, id genai.Job) (Batch, error)

PokeResultRaw polls an asynchronous generation request.

func (*Client) Scoreboard

func (c *Client) Scoreboard() scoreboard.Score

Scoreboard implements genai.Provider.

type Content

type Content struct {
	Type ContentType `json:"type,omitzero"`

	// Type == "text"
	Text string `json:"text,omitzero"`

	// Type == "image_url"
	ImageURL struct {
		URL    string `json:"url,omitzero"`
		Detail string `json:"detail,omitzero"` // "auto", "low", "high"
	} `json:"image_url,omitzero"`

	// Type == "input_audio"
	InputAudio struct {
		Data []byte `json:"data,omitzero"`
		// https://platform.openai.com/docs/guides/speech-to-text
		Format string `json:"format,omitzero"` // "mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"
	} `json:"input_audio,omitzero"`

	// Type == "file"
	File struct {
		// Either FileID or both Filename and FileData.
		FileID   string `json:"file_id,omitzero"` // Use https://platform.openai.com/docs/api-reference/files
		Filename string `json:"filename,omitzero"`
		FileData string `json:"file_data,omitzero"`
	} `json:"file,omitzero"`
}

Content is a provider-specific content block.

func (*Content) FromReply

func (c *Content) FromReply(in *genai.Reply) error

FromReply converts from a genai reply.

func (*Content) FromRequest

func (c *Content) FromRequest(in *genai.Request) error

FromRequest converts from a genai request.

func (*Content) To

func (c *Content) To(out *genai.Reply) error

To converts to the genai equivalent.

type ContentType

type ContentType string

ContentType is a provider-specific content type.

const (
	ContentText       ContentType = "text"
	ContentImageURL   ContentType = "image_url"
	ContentInputAudio ContentType = "input_audio"
	ContentRefusal    ContentType = "refusal"
	ContentAudio      ContentType = "audio"
	ContentFile       ContentType = "file"
)

Content type values.

type Contents

type Contents []Content

Contents is a collection of content blocks.

func (*Contents) UnmarshalJSON

func (c *Contents) UnmarshalJSON(b []byte) error

UnmarshalJSON implements json.Unmarshaler.

OpenAI replies with content as a string.

type ErrorResponse

type ErrorResponse struct {
	ErrorVal ErrorResponseError `json:"error"`
}

ErrorResponse is the provider-specific error response.

func (*ErrorResponse) Error

func (er *ErrorResponse) Error() string

func (*ErrorResponse) IsAPIError

func (er *ErrorResponse) IsAPIError() bool

IsAPIError implements base.ErrorResponseI.

type ErrorResponseError

type ErrorResponseError struct {
	Code    string `json:"code"`
	Message string `json:"message"`
	Status  string `json:"status"`
	Type    string `json:"type"`
	Param   string `json:"param"`
}

ErrorResponseError is the nested error in an error response.

type File

type File struct {
	Bytes         int64     `json:"bytes"` // File size
	CreatedAt     base.Time `json:"created_at"`
	ExpiresAt     base.Time `json:"expires_at"`
	Filename      string    `json:"filename"`
	ID            string    `json:"id"`
	Object        string    `json:"object"`         // "file"
	Purpose       string    `json:"purpose"`        // One of: assistants, assistants_output, batch, batch_output, fine-tune, fine-tune-results and vision
	Status        string    `json:"status"`         // Deprecated
	StatusDetails string    `json:"status_details"` // Deprecated
}

File is documented at https://platform.openai.com/docs/api-reference/files/object

func (*File) GetDisplayName

func (f *File) GetDisplayName() string

GetDisplayName implements genai.CacheItem.

func (*File) GetExpiry

func (f *File) GetExpiry() time.Time

GetExpiry implements genai.CacheItem.

func (*File) GetID

func (f *File) GetID() string

GetID implements genai.Model.

type FileDeleteResponse

type FileDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"` // "file"
	Deleted bool   `json:"deleted"`
}

FileDeleteResponse is documented at https://platform.openai.com/docs/api-reference/files/delete

type FileListResponse

type FileListResponse struct {
	Data   []File `json:"data"`
	Object string `json:"object"` // "list"
}

FileListResponse is documented at https://platform.openai.com/docs/api-reference/files/list

type FinishReason

type FinishReason string

FinishReason is a provider-specific finish reason.

const (
	FinishStop          FinishReason = "stop"
	FinishLength        FinishReason = "length"
	FinishToolCalls     FinishReason = "tool_calls"
	FinishContentFilter FinishReason = "content_filter"
)

Finish reason values.

func (FinishReason) ToFinishReason

func (f FinishReason) ToFinishReason() genai.FinishReason

ToFinishReason converts to a genai.FinishReason.

type GenOptionImage added in v0.2.0

type GenOptionImage struct {
	// Background is only supported on gpt-image-1.
	Background Background
}

GenOptionImage defines OpenAI specific options.

func (*GenOptionImage) Validate added in v0.2.0

func (o *GenOptionImage) Validate() error

Validate implements genai.Validatable.

type GenOptionText added in v0.2.0

type GenOptionText struct {
	// ReasoningEffort is the amount of effort (number of tokens) the LLM can use to think about the answer.
	//
	// When unspecified, defaults to medium.
	ReasoningEffort ReasoningEffort
	// ServiceTier specify the priority.
	ServiceTier ServiceTier
}

GenOptionText defines OpenAI specific options.

func (*GenOptionText) Validate added in v0.2.0

func (o *GenOptionText) Validate() error

Validate implements genai.Validatable.

type ImageChoiceData

type ImageChoiceData struct {
	B64JSON       []byte `json:"b64_json"`
	RevisedPrompt string `json:"revised_prompt"` // dall-e-3 only
	URL           string `json:"url"`            // Unsupported for gpt-image-1
}

ImageChoiceData is the data for one image generation choice.

type ImageRequest

type ImageRequest struct {
	Prompt            string     `json:"prompt"`
	Model             string     `json:"model,omitzero"`              // Default to dall-e-2, unless a gpt-image-1 specific parameter is used.
	Background        Background `json:"background,omitzero"`         // Default "auto"
	Moderation        string     `json:"moderation,omitzero"`         // gpt-image-1: "low" or "auto"
	N                 int64      `json:"n,omitzero"`                  // Number of images to return
	OutputCompression float64    `json:"output_compression,omitzero"` // Defaults to 100. Only supported on gpt-image-1 with webp or jpeg
	OutputFormat      string     `json:"output_format,omitzero"`      // "png", "jpeg" or "webp". Defaults to png. Only supported on gpt-image-1.
	Quality           string     `json:"quality,omitzero"`            // "auto", gpt-image-1: "high", "medium", "low". dall-e-3: "hd", "standard". dall-e-2: "standard".
	ResponseFormat    string     `json:"response_format,omitzero"`    // "url" or "b64_json"; url is valid for 60 minutes; gpt-image-1 only returns b64_json
	Size              string     `json:"size,omitzero"`               // "auto", gpt-image-1: "1024x1024", "1536x1024", "1024x1536". dall-e-3: "1024x1024", "1792x1024", "1024x1792". dall-e-2: "256x256", "512x512", "1024x1024".
	Style             string     `json:"style,omitzero"`              // dall-e-3: "vivid", "natural"
	User              string     `json:"user,omitzero"`               // End-user to help monitor and detect abuse
}

ImageRequest is documented at https://platform.openai.com/docs/api-reference/images

func (*ImageRequest) Init

func (i *ImageRequest) Init(msg *genai.Message, model string, opts ...genai.GenOption) error

Init initializes the request from the given parameters.

type ImageResponse

type ImageResponse struct {
	Created base.Time         `json:"created"`
	Data    []ImageChoiceData `json:"data"`
	Usage   struct {
		InputTokens        int64 `json:"input_tokens"`
		OutputTokens       int64 `json:"output_tokens"`
		TotalTokens        int64 `json:"total_tokens"`
		InputTokensDetails struct {
			TextTokens  int64 `json:"text_tokens"`
			ImageTokens int64 `json:"image_tokens"`
		} `json:"input_tokens_details"`
	} `json:"usage"`
	Background   string `json:"background"`    // "opaque"
	Size         string `json:"size"`          // e.g. "1024x1024"
	Quality      string `json:"quality"`       // e.g. "medium"
	OutputFormat string `json:"output_format"` // e.g. "png"
}

ImageResponse is the provider-specific image generation response.

type Logprobs

type Logprobs struct {
	Content []struct {
		Token       string  `json:"token"`
		Bytes       []byte  `json:"bytes"`
		Logprob     float64 `json:"logprob"`
		TopLogprobs []struct {
			Token   string  `json:"token"`
			Bytes   []byte  `json:"bytes"`
			Logprob float64 `json:"logprob"`
		} `json:"top_logprobs"`
	} `json:"content"`
	Refusal string `json:"refusal"`
}

Logprobs is the provider-specific log probabilities.

func (*Logprobs) To

func (l *Logprobs) To() [][]genai.Logprob

To converts to the genai equivalent.

type Message

type Message struct {
	Role    string   `json:"role,omitzero"` // "developer", "assistant", "user"
	Name    string   `json:"name,omitzero"` // An optional name for the participant. Provides the model information to differentiate between participants of the same role.
	Content Contents `json:"content,omitzero"`
	Refusal string   `json:"refusal,omitzero"` // The refusal message by the assistant.
	Audio   struct {
		ID string `json:"id,omitzero"`
	} `json:"audio,omitzero"`
	ToolCalls   []ToolCall   `json:"tool_calls,omitzero"`
	ToolCallID  string       `json:"tool_call_id,omitzero"` // TODO: Document the role of this field.
	Annotations []Annotation `json:"annotations,omitzero"`
}

Message is documented at https://platform.openai.com/docs/api-reference/chat/create

func (*Message) From

func (m *Message) From(in *genai.Message) error

From must be called with at most one ToolCallResults.

func (*Message) To

func (m *Message) To(out *genai.Message) error

To converts to the genai equivalent.

type Model

type Model struct {
	ID      string    `json:"id"`
	Object  string    `json:"object"`
	Created base.Time `json:"created"`
	OwnedBy string    `json:"owned_by"`
}

Model is documented at https://platform.openai.com/docs/api-reference/models/object

Sadly the modalities aren't reported. The only way I can think of to find it at run time is to fetch https://platform.openai.com/docs/models/gpt-4o-mini-realtime-preview, find the div containing "Modalities:", then extract the modalities from the text.

func (*Model) Context

func (m *Model) Context() int64

Context implements genai.Model.

func (*Model) GetID

func (m *Model) GetID() string

GetID implements genai.Model.

func (*Model) String

func (m *Model) String() string

type ModelsResponse

type ModelsResponse struct {
	Object string  `json:"object"` // list
	Data   []Model `json:"data"`
}

ModelsResponse represents the response structure for OpenAI models listing.

func (*ModelsResponse) ToModels

func (r *ModelsResponse) ToModels() []genai.Model

ToModels converts OpenAI models to genai.Model interfaces.

type ReasoningEffort

type ReasoningEffort string

ReasoningEffort is the effort the model should put into reasoning. Default is Medium.

https://platform.openai.com/docs/api-reference/assistants/createAssistant#assistants-createassistant-reasoning_effort https://platform.openai.com/docs/guides/reasoning

const (
	ReasoningEffortNone    ReasoningEffort = "none"
	ReasoningEffortMinimal ReasoningEffort = "minimal"
	ReasoningEffortLow     ReasoningEffort = "low"
	ReasoningEffortMedium  ReasoningEffort = "medium"
	ReasoningEffortHigh    ReasoningEffort = "high"
	ReasoningEffortXHigh   ReasoningEffort = "xhigh"
)

Reasoning effort values.

func (ReasoningEffort) Validate added in v0.2.0

func (r ReasoningEffort) Validate() error

Validate implements genai.Validatable.

type ServiceTier

type ServiceTier string

ServiceTier is the quality of service to determine the request's priority.

const (
	// ServiceTierAuto will utilize scale tier credits until they are exhausted if the Project is Scale tier
	// enabled, else the request will be processed using the default service tier with a lower uptime SLA and no
	// latency guarantee.
	//
	// https://openai.com/api-scale-tier/
	ServiceTierAuto ServiceTier = "auto"
	// ServiceTierDefault has the request be processed using the default service tier with a lower uptime SLA
	// and no latency guarantee.
	ServiceTierDefault ServiceTier = "default"
	// ServiceTierFlex has the request be processed with the Flex Processing service tier.
	//
	// Flex processing is in beta, and currently only available for GPT-5, o3 and o4-mini models.
	//
	// https://platform.openai.com/docs/guides/flex-processing
	ServiceTierFlex ServiceTier = "flex"
)

func (ServiceTier) Validate added in v0.2.0

func (s ServiceTier) Validate() error

Validate implements genai.Validatable.

type Tool

type Tool struct {
	Type     string `json:"type,omitzero"` // "function"
	Function struct {
		Description string             `json:"description,omitzero"`
		Name        string             `json:"name,omitzero"`
		Parameters  *jsonschema.Schema `json:"parameters,omitzero"`
		Strict      bool               `json:"strict,omitzero"`
	} `json:"function,omitzero"`
}

Tool is a provider-specific tool definition.

type ToolCall

type ToolCall struct {
	Index    int64  `json:"index,omitzero"`
	ID       string `json:"id,omitzero"`
	Type     string `json:"type,omitzero"` // "function"
	Function struct {
		Name      string `json:"name,omitzero"`
		Arguments string `json:"arguments,omitzero"`
	} `json:"function,omitzero"`
}

ToolCall is a provider-specific tool call.

func (*ToolCall) From

func (t *ToolCall) From(in *genai.ToolCall) error

From converts from the genai equivalent.

func (*ToolCall) To

func (t *ToolCall) To(out *genai.ToolCall)

To converts to the genai equivalent.

type Usage

type Usage struct {
	PromptTokens        int64 `json:"prompt_tokens"`
	CompletionTokens    int64 `json:"completion_tokens"`
	TotalTokens         int64 `json:"total_tokens"`
	PromptTokensDetails struct {
		CachedTokens int64 `json:"cached_tokens"`
		AudioTokens  int64 `json:"audio_tokens"`
		TextTokens   int64 `json:"text_tokens"`
		ImageTokens  int64 `json:"image_tokens"`
	} `json:"prompt_tokens_details"`
	CompletionTokensDetails struct {
		ReasoningTokens          int64 `json:"reasoning_tokens"`
		AudioTokens              int64 `json:"audio_tokens"`
		AcceptedPredictionTokens int64 `json:"accepted_prediction_tokens"`
		RejectedPredictionTokens int64 `json:"rejected_prediction_tokens"`
		TextTokens               int64 `json:"text_tokens"`
	} `json:"completion_tokens_details"`
}

Usage is the provider-specific token usage.

type WebSearchOptions

type WebSearchOptions struct {
	SearchContextSize string `json:"search_context_size,omitzero"` // "low", "medium", "high"
	UserLocation      struct {
		Type        string `json:"type,omitzero"` // "approximate"
		Approximate struct {
			Country string `json:"country,omitzero"` // "GB"
			City    string `json:"city,omitzero"`    // "London"
			Region  string `json:"region,omitzero"`  // "London"
		} `json:"approximate,omitzero"`
	} `json:"user_location,omitzero"`
}

WebSearchOptions is "documented" at https://platform.openai.com/docs/guides/tools-web-search

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL