gai

package module
v0.0.0-...-293a3f4 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 10, 2026 License: MIT Imports: 6 Imported by: 4

README

Go Artificial Intelligence (GAI)

Logo

GoDoc CI

Go Artificial Intelligence (GAI) helps you work with foundational models, large language models, and other AI models.

Pronounced like "guy".

⚠️ This library is in development. Things will probably break, but existing functionality is usable. ⚠️

go get maragu.dev/gai

Made with ✨sparkles✨ by maragu: independent software consulting for cloud-native Go apps & AI engineering.

Contact me at markus@maragu.dk for consulting work, or perhaps an invoice to support this project?

Usage

Clients

These client implementations are available:

Examples

Click to expand each section, or see all examples under internal/examples.

Tools
package main

import (
	"context"
	"fmt"
	"log/slog"
	"os"
	"time"

	"maragu.dev/gai"
	"maragu.dev/gai/clients/openai"
	"maragu.dev/gai/tools"
)

func main() {
	ctx := context.Background()
	log := slog.New(slog.NewTextHandler(os.Stderr, nil))

	c := openai.NewClient(openai.NewClientOptions{
		Key: os.Getenv("OPENAI_API_KEY"),
		Log: log,
	})

	cc := c.NewChatCompleter(openai.NewChatCompleterOptions{
		Model: openai.ChatCompleteModelGPT4o,
	})

	req := gai.ChatCompleteRequest{
		Messages: []gai.Message{
			gai.NewUserTextMessage("What time is it?"),
		},
		System: gai.Ptr("You are a British seagull. Speak like it."),
		Tools: []gai.Tool{
			tools.NewGetTime(time.Now), // Note that some tools that only require the stdlib are included in GAI
		},
	}

	res, err := cc.ChatComplete(ctx, req)
	if err != nil {
		log.Error("Error chat-completing", "error", err)
		return
	}

	var parts []gai.MessagePart
	var result gai.ToolResult

	for part, err := range res.Parts() {
		if err != nil {
			log.Error("Error processing part", "error", err)
			return
		}

		parts = append(parts, part)

		switch part.Type {
		case gai.MessagePartTypeText:
			fmt.Print(part.Text())

		case gai.MessagePartTypeToolCall:
			toolCall := part.ToolCall()
			for _, tool := range req.Tools {
				if tool.Name != toolCall.Name {
					continue
				}

				content, err := tool.Execute(ctx, toolCall.Args) // Tools aren't called automatically, so you can decide if, how, and when
				result = gai.ToolResult{
					ID:      toolCall.ID,
					Name:    toolCall.Name,
					Content: content,
					Err:     err,
				}
				break
			}
		}
	}

	if result.ID == "" {
		log.Error("No tool result found")
		return
	}

	// Add both the tool call (in the parts) and the tool result to the messages, and make another request
	req.Messages = append(req.Messages,
		gai.Message{Role: gai.MessageRoleModel, Parts: parts},
		gai.NewUserToolResultMessage(result),
	)

	res, err = cc.ChatComplete(ctx, req)
	if err != nil {
		log.Error("Error chat-completing", "error", err)
		return
	}

	for part, err := range res.Parts() {
		if err != nil {
			log.Error("Error processing part", "error", err)
			return
		}

		switch part.Type {
		case gai.MessagePartTypeText:
			fmt.Print(part.Text())
		}
	}
}
$ go run main.go
Ahoy, mate! The time be 15:20, it be!
Tools (custom)
package main

import (
	"context"
	"encoding/json"
	"fmt"
	"log/slog"
	"math/rand/v2"
	"os"

	"maragu.dev/gai"
	"maragu.dev/gai/clients/openai"
)

type EatArgs struct {
	What string `json:"what" jsonschema_description:"What you'd like to eat."`
}

func NewEat() gai.Tool {
	return gai.Tool{
		Name:        "eat",
		Description: "Eat something, supplying what you eat as an argument. The result will be a string describing how it was.",
		Schema:      gai.GenerateToolSchema[EatArgs](),
		Execute: func(ctx context.Context, args json.RawMessage) (string, error) {
			var eatArgs EatArgs
			if err := json.Unmarshal(args, &eatArgs); err != nil {
				return "", fmt.Errorf("error unmarshaling eat args from JSON: %w", err)
			}

			results := []string{
				"it was okay.",
				"it was absolutely excellent!",
				"it was awful.",
				"it gave you diarrhea.",
			}

			return "You ate " + eatArgs.What + " and " + results[rand.IntN(len(results))], nil
		},
	}
}

func main() {
	ctx := context.Background()
	log := slog.New(slog.NewTextHandler(os.Stderr, nil))

	c := openai.NewClient(openai.NewClientOptions{
		Key: os.Getenv("OPENAI_API_KEY"),
		Log: log,
	})

	cc := c.NewChatCompleter(openai.NewChatCompleterOptions{
		Model: openai.ChatCompleteModelGPT4o,
	})

	req := gai.ChatCompleteRequest{
		Messages: []gai.Message{
			gai.NewUserTextMessage("Eat something, and tell me how it was. Elaborate."),
		},
		System: gai.Ptr("You are a British seagull. Speak like it. You must use the \"eat\" tool."),
		Tools: []gai.Tool{
			NewEat(),
		},
	}

	res, err := cc.ChatComplete(ctx, req)
	if err != nil {
		log.Error("Error chat-completing", "error", err)
		return
	}

	var parts []gai.MessagePart
	var result gai.ToolResult

	for part, err := range res.Parts() {
		if err != nil {
			log.Error("Error processing part", "error", err)
			return
		}

		parts = append(parts, part)

		switch part.Type {
		case gai.MessagePartTypeText:
			fmt.Print(part.Text())

		case gai.MessagePartTypeToolCall:
			toolCall := part.ToolCall()
			for _, tool := range req.Tools {
				if tool.Name != toolCall.Name {
					continue
				}

				content, err := tool.Execute(ctx, toolCall.Args) // Tools aren't called automatically, so you can decide if, how, and when
				result = gai.ToolResult{
					ID:      toolCall.ID,
					Name:    toolCall.Name,
					Content: content,
					Err:     err,
				}
				break
			}
		}
	}

	if result.ID == "" {
		log.Error("No tool result found")
		return
	}

	// Add both the tool call (in the parts) and the tool result to the messages, and make another request
	req.Messages = append(req.Messages,
		gai.Message{Role: gai.MessageRoleModel, Parts: parts},
		gai.NewUserToolResultMessage(result),
	)
	req.System = nil

	res, err = cc.ChatComplete(ctx, req)
	if err != nil {
		log.Error("Error chat-completing", "error", err)
		return
	}

	for part, err := range res.Parts() {
		if err != nil {
			log.Error("Error processing part", "error", err)
			return
		}

		switch part.Type {
		case gai.MessagePartTypeText:
			fmt.Print(part.Text())
		}
	}
}
$ go run main.go
I had some fish and chips leftover from a tourist's lunch. It wasn't the freshest, but it had that classic blend of crispy batter and tender fish, with a side of golden fries. The flavors were enjoyable, albeit a bit cold. Unfortunately, not everything went smoothly afterward, as it gave me an upset stomach. Eating leftovers can sometimes be a gamble, and this time, it didn't pay off as I had hoped!
Evals

Evals will only run with go test -run TestEval ./... and otherwise be skipped.

Eval a model, construct a sample, score it with a lexical similarity scorer and a semantic similarity scorer, and log the results:

package evals_test

import (
	"os"
	"testing"

	"maragu.dev/gai"
	"maragu.dev/gai/clients/openai"
	"maragu.dev/gai/eval"
)

// TestEvalSeagull evaluates how a seagull's day is going.
// All evals must be prefixed with "TestEval".
func TestEvalSeagull(t *testing.T) {
	c := openai.NewClient(openai.NewClientOptions{
		Key: os.Getenv("OPENAI_API_KEY"),
	})

	cc := c.NewChatCompleter(openai.NewChatCompleterOptions{
		Model: openai.ChatCompleteModelGPT4o,
	})

	embedder := c.NewEmbedder(openai.NewEmbedderOptions{
		Dimensions: 1536,
		Model:      openai.EmbedModelTextEmbedding3Small,
	})

	// Evals only run if "go test" is being run with "-test.run=TestEval", e.g.: "go test -test.run=TestEval ./..."
	eval.Run(t, "answers about the day", func(t *testing.T, e *eval.E) {
		input := "What are you doing today?"
		res, err := cc.ChatComplete(t.Context(), gai.ChatCompleteRequest{
			Messages: []gai.Message{
				gai.NewUserTextMessage(input),
			},
			System: gai.Ptr("You are a British seagull. Speak like it."),
		})
		if err != nil {
			t.Fatal(err)
		}

		// The output is streamed and accessible through an iterator via the Parts() method.
		var output string
		for part, err := range res.Parts() {
			if err != nil {
				t.Fatal(err)
			}
			output += part.Text()
		}

		// Create a sample to pass to the scorer.
		sample := eval.Sample{
			Input:    input,
			Output:   output,
			Expected: "Oh, splendid day it is! You know, I'm just floatin' about on the breeze, keepin' an eye out for a cheeky chip or two. Might pop down to the seaside, see if I can nick a sarnie from some unsuspecting holidaymaker. It's a gull's life, innit? How about you, what are you up to?",
		}

		// Score the sample using a lexical similarity scorer with the Levenshtein distance.
		lexicalSimilarityResult := e.Score(sample, eval.LexicalSimilarityScorer(eval.LevenshteinDistance))

		// Also score with a semantic similarity scorer based on embedding vectors and cosine similarity.
		semanticSimilarityResult := e.Score(sample, eval.SemanticSimilarityScorer(t, embedder, eval.CosineSimilarity))

		// Log the sample, results, and timing information.
		e.Log(sample, lexicalSimilarityResult, semanticSimilarityResult)
	})
}

Output in the file evals.jsonl:

{
	"Name":"TestEvalSeagull/answers_about_the_day",
	"Group":"Seagull",
	"Sample":{
		"Input":"What are you doing today?",
		"Expected":"Oh, splendid day it is! You know, I'm just floatin' about on the breeze, keepin' an eye out for a cheeky chip or two. Might pop down to the seaside, see if I can nick a sarnie from some unsuspecting holidaymaker. It's a gull's life, innit? How about you, what are you up to?",
		"Output":"Ah, 'ello there! Well, today's a splendid day for a bit of mischief and scavenging, innit? Got me eye on the local chippy down by the pier. Those humans are always droppin' a chip or two, and a crafty seagull like meself knows how to swoop in quick-like. Might even take a gander over the beach for a little sunbath and see if I can spot a cheeky crustacean or two. All in a day's work for a proper British seagull like me! What's keepin' you busy, then?"
	},
	"Results":[
		{"Score":0.28634361233480177,"Type":"LexicalSimilarity"},
		{"Score":0.9064784491110223,"Type":"SemanticSimilarity"}
	],
	"Duration":6316444292
}

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func Ptr

func Ptr[T any](v T) *T

func ReadAllString

func ReadAllString(r io.Reader) string

ReadAllString is like io.ReadAll, but returns a string, and panics on errors. Useful for situations where the read cannot error.

Types

type ChatCompleteFinishReason

type ChatCompleteFinishReason string

ChatCompleteFinishReason describes why the model stopped generating tokens.

const (
	// ChatCompleteFinishReasonUnknown indicates that the provider did not supply a recognised termination code.
	ChatCompleteFinishReasonUnknown ChatCompleteFinishReason = "unknown"
	// ChatCompleteFinishReasonStop indicates that generation stopped naturally or due to a configured stop sequence.
	ChatCompleteFinishReasonStop ChatCompleteFinishReason = "stop"
	// ChatCompleteFinishReasonLength indicates that generation hit the configured token limit.
	ChatCompleteFinishReasonLength ChatCompleteFinishReason = "length"
	// ChatCompleteFinishReasonContentFilter indicates that a platform-level moderation filter blocked the content.
	ChatCompleteFinishReasonContentFilter ChatCompleteFinishReason = "content_filter"
	// ChatCompleteFinishReasonToolCalls indicates that the model requested a tool invocation mid-response.
	ChatCompleteFinishReasonToolCalls ChatCompleteFinishReason = "tool_calls"
	// ChatCompleteFinishReasonRefusal indicates that the model produced a refusal message of its own accord.
	ChatCompleteFinishReasonRefusal ChatCompleteFinishReason = "refusal"
)

type ChatCompleteRequest

type ChatCompleteRequest struct {
	MaxCompletionTokens *int
	Messages            []Message
	ResponseSchema      *Schema
	System              *string
	Temperature         *Temperature
	ThinkingLevel       *ThinkingLevel
	Tools               []Tool
}

ChatCompleteRequest for a chat model.

type ChatCompleteResponse

type ChatCompleteResponse struct {
	Meta *ChatCompleteResponseMetadata
	// contains filtered or unexported fields
}

ChatCompleteResponse for ChatCompleter. Construct with NewChatCompleteResponse. Note that the [ChatCompleteResponse.Meta] field is a pointer, because it's updated continuously until the streaming response with ChatCompleteResponse.Parts is complete.

func NewChatCompleteResponse

func NewChatCompleteResponse(partsFunc iter.Seq2[MessagePart, error]) ChatCompleteResponse

func (ChatCompleteResponse) Parts

type ChatCompleteResponseMetadata

type ChatCompleteResponseMetadata struct {
	Usage ChatCompleteResponseUsage
	// FinishReason is optional; nil indicates the provider omitted a finish signal entirely.
	FinishReason *ChatCompleteFinishReason
}

ChatCompleteResponseMetadata contains metadata about the request and response, for example, token usage.

type ChatCompleteResponseUsage

type ChatCompleteResponseUsage struct {
	PromptTokens     int
	ThoughtsTokens   int
	CompletionTokens int
}

type ChatCompleter

type ChatCompleter interface {
	ChatComplete(ctx context.Context, req ChatCompleteRequest) (ChatCompleteResponse, error)
}

ChatCompleter is satisfied by models supporting chat completion. Streaming chat completion is preferred where possible, so that methods on ChatCompleteResponse, like ChatCompleteResponse.Parts, can be used to stream the response.

type EmbedRequest

type EmbedRequest struct {
	Input io.Reader
}

EmbedRequest for Embedder.

type EmbedResponse

type EmbedResponse[T VectorComponent] struct {
	Embedding []T
}

EmbedResponse for Embedder.

type Embedder

type Embedder[T VectorComponent] interface {
	Embed(ctx context.Context, p EmbedRequest) (EmbedResponse[T], error)
}

Embedder is satisfied by models supporting embedding.

type Message

type Message struct {
	Role  MessageRole
	Parts []MessagePart
}

func NewModelTextMessage

func NewModelTextMessage(text string) Message

NewModelTextMessage is a convenience function to create a new model text message.

func NewUserDataMessage

func NewUserDataMessage(mimeType string, data io.Reader) Message

NewUserDataMessage is a convenience function to create a new user data message.

func NewUserTextMessage

func NewUserTextMessage(text string) Message

NewUserTextMessage is a convenience function to create a new user text message.

func NewUserToolResultMessage

func NewUserToolResultMessage(result ToolResult) Message

type MessagePart

type MessagePart struct {
	Type     MessagePartType
	Data     io.Reader
	MIMEType string
	// contains filtered or unexported fields
}

func DataMessagePart

func DataMessagePart(mimeType string, data io.Reader) MessagePart

func TextMessagePart

func TextMessagePart(text string) MessagePart

func ToolCallPart

func ToolCallPart(id, name string, args json.RawMessage) MessagePart

func (MessagePart) Text

func (m MessagePart) Text() string

func (MessagePart) ToolCall

func (m MessagePart) ToolCall() ToolCall

func (MessagePart) ToolResult

func (m MessagePart) ToolResult() ToolResult

type MessagePartType

type MessagePartType string

MessagePartType for MessagePart.

const (
	MessagePartTypeData       MessagePartType = "data"
	MessagePartTypeText       MessagePartType = "text"
	MessagePartTypeToolCall   MessagePartType = "tool_call"
	MessagePartTypeToolResult MessagePartType = "tool_result"
)

type MessageRole

type MessageRole string

MessageRole for Message.

const (
	MessageRoleUser  MessageRole = "user"
	MessageRoleModel MessageRole = "model"
)

type Schema

type Schema struct {
	// Optional. The value should be validated against any (one or more) of the subschemas
	// in the list.
	AnyOf []*Schema `json:"anyOf,omitempty"`

	// Optional. Default value of the data.
	Default any `json:"default,omitempty"`

	// Optional. The description of the data.
	Description string `json:"description,omitempty"`

	// Optional. Possible values of the element of primitive type with enum format. Examples:
	// 1. We can define direction as : {type:STRING, format:enum, enum:["EAST", NORTH",
	// "SOUTH", "WEST"]} 2. We can define apartment number as : {type:INTEGER, format:enum,
	// enum:["101", "201", "301"]}
	Enum []string `json:"enum,omitempty"`

	// Optional. Example of the object. Will only populated when the object is the root.
	Example any `json:"example,omitempty"`

	// Optional. The format of the data. Supported formats: for NUMBER type: "float", "double"
	// for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc
	Format string `json:"format,omitempty"`

	// Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY.
	Items *Schema `json:"items,omitempty"`

	// Optional. Maximum number of the elements for Type.ARRAY.
	MaxItems *int64 `json:"maxItems,omitempty,string"`

	// Optional. Maximum value of the Type.INTEGER and Type.NUMBER
	Maximum *float64 `json:"maximum,omitempty"`

	// Optional. Minimum number of the elements for Type.ARRAY.
	MinItems *int64 `json:"minItems,omitempty,string"`

	// Optional. Minimum value of the Type.INTEGER and Type.NUMBER.
	Minimum *float64 `json:"minimum,omitempty"`

	// Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT.
	Properties map[string]*Schema `json:"properties,omitempty"`

	// Optional. The order of the properties. Not a standard field in open API spec. Only
	// used to support the order of the properties.
	PropertyOrdering []string `json:"propertyOrdering,omitempty"`

	// Optional. Required properties of Type.OBJECT.
	Required []string `json:"required,omitempty"`

	// Optional. The title of the Schema.
	Title string `json:"title,omitempty"`

	// Optional. The type of the data.
	Type SchemaType `json:"type,omitempty"`
}

func GenerateSchema

func GenerateSchema[T any]() Schema

GenerateSchema from any type. See github.com/invopop/jsonschema for struct tags etc.

type SchemaType

type SchemaType string
const (
	// OpenAPI string type
	SchemaTypeString SchemaType = "string"
	// OpenAPI number type
	SchemaTypeNumber SchemaType = "number"
	// OpenAPI integer type
	SchemaTypeInteger SchemaType = "integer"
	// OpenAPI boolean type
	SchemaTypeBoolean SchemaType = "boolean"
	// OpenAPI array type
	SchemaTypeArray SchemaType = "array"
	// OpenAPI object type
	SchemaTypeObject SchemaType = "object"
)

type Temperature

type Temperature float64

func (Temperature) Float64

func (t Temperature) Float64() float64

func (Temperature) String

func (t Temperature) String() string

String satisfies fmt.Stringer.

type ThinkingLevel

type ThinkingLevel string

ThinkingLevel controls how much reasoning effort the model applies. Not all levels are supported by all providers; unsupported levels will panic.

const (
	// ThinkingLevelNone disables thinking entirely.
	ThinkingLevelNone ThinkingLevel = "none"
	// ThinkingLevelMinimal applies minimal thinking.
	ThinkingLevelMinimal ThinkingLevel = "minimal"
	// ThinkingLevelLow applies low thinking effort.
	ThinkingLevelLow ThinkingLevel = "low"
	// ThinkingLevelMedium applies medium thinking effort.
	ThinkingLevelMedium ThinkingLevel = "medium"
	// ThinkingLevelHigh applies high thinking effort.
	ThinkingLevelHigh ThinkingLevel = "high"
	// ThinkingLevelXHigh applies extra-high thinking effort.
	ThinkingLevelXHigh ThinkingLevel = "xhigh"
	// ThinkingLevelMax applies maximum thinking effort.
	ThinkingLevelMax ThinkingLevel = "max"
)

type Tool

type Tool struct {
	Name        string
	Description string
	Schema      ToolSchema
	Execute     ToolFunction
	Summarize   ToolFunction
}

Tool definition.

type ToolCall

type ToolCall struct {
	ID   string
	Name string
	Args json.RawMessage
}

type ToolFunction

type ToolFunction func(ctx context.Context, rawArgs json.RawMessage) (string, error)

type ToolResult

type ToolResult struct {
	ID      string
	Name    string
	Content string
	Err     error
}

TODO tool result can be string but also other types, such as image!

type ToolSchema

type ToolSchema struct {
	Properties map[string]*Schema
}

ToolSchema in JSON Schema format of the arguments the tool accepts.

func GenerateToolSchema

func GenerateToolSchema[T any]() ToolSchema

type VectorComponent

type VectorComponent interface {
	~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64
}

VectorComponent is a single component of a vector.

Directories

Path Synopsis
clients
Package eval lets you evaluate models with various Scorer functions.
Package eval lets you evaluate models with various Scorer functions.
internal
examples/tools command

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL