types

package
v0.0.5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 26, 2025 License: MPL-2.0 Imports: 6 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type Audio

type Audio struct {
	Base64 string      `json:"base64,omitempty"`
	Format AudioFormat `json:"format,omitempty"`
}

type AudioFormat

type AudioFormat string
const (
	AudioFormatWAV   AudioFormat = "wav"
	AudioFormatMP3   AudioFormat = "mp3"
	AudioFormatAAC   AudioFormat = "aac"
	AudioFormatFLAC  AudioFormat = "flac"
	AudioFormatOpus  AudioFormat = "opus"
	AudioFormatPcm16 AudioFormat = "pcm16"
)

type AudioOutput

type AudioOutput struct {
	Voice  string      `json:"voice"`
	Format AudioFormat `json:"format"`
}

type Content

type Content struct {
	Text  string `json:"text,omitempty"`
	Image *Image `json:"image,omitempty"`
	Audio *Audio `json:"audio,omitempty"`
}

func NewAudioContent

func NewAudioContent(data []byte, format AudioFormat) Content

NewAudioContent creates a new Content instance containing audio data. It takes the audio data as a byte slice and the audio format as an AudioFormat. The returned Content object will have its Audio field populated with the provided data and format.

func NewImageContent

func NewImageContent(imageContent []byte, format ImageFormat) Content

func NewImageUrlContent

func NewImageUrlContent(url string, detail ...ImageDetailLevel) Content

func NewTextContent

func NewTextContent(text string) Content

func (Content) AsSlice

func (c Content) AsSlice() []Content

type ContentType

type ContentType string

type ErrUnknownRole

type ErrUnknownRole struct {
	Role string
}

func (ErrUnknownRole) Error

func (e ErrUnknownRole) Error() string

type Image

type Image struct {
	ImageData *ImageData `json:"image_data,omitempty"`
	Url       string     `json:"url,omitempty"`
	// OpenAI: The level of detail for the image description. Options are "auto", "low", "medium", and "high". Default is "auto".
	Detail ImageDetailLevel `json:"detail,omitempty"`
}

type ImageData

type ImageData struct {
	Base64 string      `json:"base64,omitempty"`
	Format ImageFormat `json:"format,omitempty"`
}

type ImageDetailLevel

type ImageDetailLevel string
const (
	ImageDetailAuto   ImageDetailLevel = "auto"
	ImageDetailLow    ImageDetailLevel = "low"
	ImageDetailMedium ImageDetailLevel = "medium"
	ImageDetailHigh   ImageDetailLevel = "high"
)

type ImageFormat

type ImageFormat string
const (
	ImageFormatPNG  ImageFormat = "png"
	ImageFormatJPG  ImageFormat = "jpg"
	ImageFormatWEBP ImageFormat = "webp"
)

type Message

type Message struct {
	Role      Role    `json:"role"`
	Content   Content `json:"content"`
	Reasoning string  `json:"reasoning"`
	// Assistant tool calls
	ToolCalls []ToolCall `json:"tool_calls"`
	// OpenAI: An optional name for the participant. Provides the model information to differentiate between participants of the same role.
	Name string `json:"name"`
	// OpenAI: Assistant refusal message
	Refusal string `json:"refusal"`
	// Anthropic: User boolean indicating whether function call resulted in an error.
	IsErr bool `json:"is_err"`
}

type Modality

type Modality string
const (
	ModalityText  Modality = "text"
	ModalityAudio Modality = "audio"
)

type Options

type Options struct {
	// OpenAI: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
	FrequencyPenalty *float64
	// OpenAI: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.
	Logprobs bool
	// OpenAI: An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
	MaxCompletionTokens uint
	// The maximum number of tokens to generate before stopping.
	MaxTokens uint
	// OpenAI: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
	// N *uint
	// OpenAI: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
	PresencePenalty float64
	// OpenAI: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
	Seed *int64
	// OpenAI: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
	// Anthropic: Amount of randomness injected into the response. Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0` for analytical / multiple choice, and closer to `1.0` for creative and generative tasks. Note that even with `temperature` of `0.0`, the results will not be fully deterministic.
	Temperature *float64
	// OpenAI: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.
	TopLogprobs *int32
	TopP        *float64
	TopK        *float32
	// OpenAI: Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
	ParallelToolCalls *bool
	// OpenAI: Cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field.
	// Google:  Resource name of a context cache that can be used in subsequent requests.
	PromptCacheKey string
	// OpenAI: A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
	SafetyIdentifier string
	// OpenAI: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching optimizations. A stable identifier for your end-users. Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
	User string
	// OpenAI: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	LogitBias map[string]int64
	// OpenAI: **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). (Accepts [ReasoningEffortUnion.ofString])
	// Ollama: Think controls whether thinking/reasoning models will think before responding. (Accepts both [ReasoningEffortUnion.ofString] and [ReasoningEffortUnion.ofBool])
	ReasoningEffort *ReasoningEffortUnion
	// OpenAI: Specifies the processing type used for serving the request. Any of "auto", "default", "flex", "scale", "priority".
	// Anthropic: Any of "auto", "standard_only"
	ServiceTier string
	// OpenAI: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
	// Anthropic: Custom text sequences that will cause the model to stop generating.
	Stop []string
	// Schema specifying the format that the model must output.
	//
	// *Supported providers*: OpenAI, Ollama
	ResponseFormat *jsonschema.Schema
	// Anthropic: Enable extended thinking. Must be ≥1024 and less than `max_tokens`.
	Thinking uint64
	// Ollama: How long the model will stay loaded into memory following the request.
	KeepAlive *time.Duration
	// Google:
	//     - `text/plain` (default)
	//     - `application/json`
	ResponseMIMEType string
	// OpenAI: Include usage statistics in streaming mode.
	IncludeStreamMetrics bool
	// l337: Controls channel buffering for streaming responses. Defaults to `0` (unbuffered).
	StreamingBufferSize int
	// OpenAI: Output types that you would like the model to generate. Defaults to `["text"]`.
	//     - `text`: Plain text output.
	//     - `audio`: Audio output (if supported by model).
	Modalities []Modality
	Audio      AudioOutput
}

type Parameter

type Parameter interface {
	Apply(*Run) error
}

func WithAudioContentMessage

func WithAudioContentMessage(role Role, audioContent []byte, format AudioFormat) Parameter

func WithImageContentMessage

func WithImageContentMessage(role Role, imageContent []byte, format ImageFormat) Parameter

func WithImageUrlMessage

func WithImageUrlMessage(role Role, imageURL string) Parameter

func WithSessionID

func WithSessionID(sessionID uuid.UUID) Parameter

func WithTextMessage

func WithTextMessage(role Role, content string) Parameter

type ParameterFunc

type ParameterFunc func(*Run) error

func (ParameterFunc) Apply

func (s ParameterFunc) Apply(r *Run) error

type ReasoningEffortLevel

type ReasoningEffortLevel string
const (
	ReasoningEffortLow    ReasoningEffortLevel = "low"
	ReasoningEffortMedium ReasoningEffortLevel = "medium"
	ReasoningEffortHigh   ReasoningEffortLevel = "high"
)

type ReasoningEffortUnion

type ReasoningEffortUnion struct {
	// contains filtered or unexported fields
}

func NewReasoningEffortBool

func NewReasoningEffortBool(enabled bool) *ReasoningEffortUnion

func NewReasoningEffortLevel

func NewReasoningEffortLevel(level ReasoningEffortLevel) *ReasoningEffortUnion

func (*ReasoningEffortUnion) AsAny

func (r *ReasoningEffortUnion) AsAny() any

func (*ReasoningEffortUnion) AsBool

func (r *ReasoningEffortUnion) AsBool() (bool, bool)

func (*ReasoningEffortUnion) AsLevel

type Response

type Response struct {
	ID           string          `json:"id"`
	Created      time.Time       `json:"created"`
	Content      Content         `json:"content"`
	Refusal      string          `json:"refusal"`
	Reasoning    string          `json:"reasoning"`
	ToolCalls    []ToolCall      `json:"tool_calls"`
	FinishReason string          `json:"finish_reason"`
	Metrics      metrics.Metrics `json:"metrics"`
}

type Role

type Role string
const (
	RoleAssistant Role = "assistant"
	RoleDeveloper Role = "developer"
	RoleSystem    Role = "system"
	RoleTool      Role = "tool"
	RoleUser      Role = "user"
	RoleModel     Role = "model"
)

func (Role) String

func (r Role) String() string

type Run

type Run struct {
	SessionID uuid.UUID                       `json:"session_id"`
	Messages  []Message                       `json:"messages,omitempty"`
	Metrics   map[uuid.UUID][]metrics.Metrics `json:"metrics,omitempty"`
}

func (*Run) Content

func (r *Run) Content() Content

Returns the content of the last message in the response.

type ToolCall

type ToolCall struct {
	// Unique identifier for the tool call.
	ID string `json:"id"`
	// Raw LLM arguments.
	Arguments string `json:"arguments"`
	// Tool name
	Name string `json:"name"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL