Documentation
¶
Index ¶
- Constants
- Variables
- type AIProperty
- type Analytics
- type BaseModel
- type F
- type FuncParams
- type GoFunctionTool
- type ImageGenOptions
- func WithImgAspectRatio(aspectRatio string) ImageGenOptions
- func WithImgDisabledSafetyFilters() ImageGenOptions
- func WithImgImageSize(imageSize string) ImageGenOptions
- func WithImgMimeType(mimeType string) ImageGenOptions
- func WithImgNegativePrompt(prompt string) ImageGenOptions
- func WithImgPersonGeneration(policy string) ImageGenOptions
- func WithImgSpecialConfig(cfg map[SpecialConfig]any) ImageGenOptions
- func WithImgTemperature(temperature float32) ImageGenOptions
- func WithImgUserPrePrompt(prompt string) ImageGenOptions
- func WithNImages(n int) ImageGenOptions
- func WithOutputDirectory(dir string) ImageGenOptions
- type ImageModels
- type KarmaAI
- func (kai *KarmaAI) AddGoFunctionTool(tool internalopenai.GoFunctionTool) error
- func (kai *KarmaAI) ChatCompletion(messages models.AIChatHistory) (*models.AIChatResponse, error)
- func (kai *KarmaAI) ChatCompletionManaged(history *models.AIChatHistory) (*models.AIChatResponse, error)
- func (kai *KarmaAI) ChatCompletionStream(messages models.AIChatHistory, ...) (*models.AIChatResponse, error)
- func (kai *KarmaAI) ChatCompletionStreamManaged(history *models.AIChatHistory, ...) (*models.AIChatResponse, error)
- func (kai *KarmaAI) ClearGoFunctionTools()
- func (kai *KarmaAI) DeleteAnalyticProperty(property AIProperty)
- func (kai *KarmaAI) EnableTools()
- func (kai *KarmaAI) GenerateFromSinglePrompt(prompt string) (*models.AIChatResponse, error)
- func (kai *KarmaAI) GetEmbeddings(text string) (*models.AIEmbeddingResponse, error)
- func (kai *KarmaAI) GetSpecialConfig(c SpecialConfig) (any, error)
- func (kai *KarmaAI) SendErrorEvent(err error)
- func (kai *KarmaAI) SendEvent()
- func (kai *KarmaAI) SetAnalyticProperty(property AIProperty, val any)
- type KarmaImageGen
- type MCPServer
- type MCPTool
- type ModelConfig
- type Option
- func AddGoFunctionTool(tool internalopenai.GoFunctionTool) Option
- func AddMCPServer(server MCPServer) Option
- func ConfigureAnalytics(distinctID, traceID string) Option
- func SetCustomModelVariant(m string) Option
- func SetGoFunctionTools(tools []internalopenai.GoFunctionTool) Option
- func SetMCPAuthToken(token string) Option
- func SetMCPServers(servers []MCPServer) Option
- func SetMCPTools(tools []MCPTool) Option
- func SetMCPUrl(url string) Option
- func WithContext(context string) Option
- func WithDirectToolCalls() Option
- func WithMaxTokens(tokens int) Option
- func WithMaxToolPasses(max int) Option
- func WithReasoningEffort(effort shared.ReasoningEffort) Option
- func WithResponseType(responseType string) Option
- func WithSpecialConfig(config map[SpecialConfig]any) Option
- func WithSystemMessage(message string) Option
- func WithTemperature(temp float32) Option
- func WithToolsEnabled() Option
- func WithTopK(topK int) Option
- func WithTopP(topP float32) Option
- func WithUserPrePrompt(prompt string) Option
- type Provider
- type SpecialConfig
Constants ¶
const ( XAI_API = "https://api.x.ai/v1" GROQ_API = "https://api.groq.com/openai/v1" SARVAM_API = "https://api.sarvam.ai/v1" FIREWORKS_API = "https://api.fireworks.ai/inference/v1" OPENROUTER_API = "https://openrouter.ai/api/v1" )
API URLs for different providers
const AIGenerationEvent string = "$ai_generation"
Variables ¶
var ( ProviderModelMapping providerMap = map[Provider]map[BaseModel]string{ OpenAI: { GPT4: "gpt-4", GPT4o: "gpt-4o", GPT4oMini: "gpt-4o-mini", GPT4Turbo: "gpt-4-turbo", GPT35Turbo: "gpt-3.5-turbo", GPT5: "gpt-5", GPT5Nano: "gpt-5-nano", GPT5Mini: "gpt-5-mini", O1: "o1", O1Mini: "o1-mini", O1Preview: "o1-preview", TextEmbeddingAda002: "text-embedding-ada-002", TextEmbedding3Large: "text-embedding-3-large", TextEmbedding3Small: "text-embedding-3-small", }, Anthropic: { ClaudeInstant: "claude-instant-v1.2", ClaudeV2: "claude-v2.1", Claude3Sonnet: "claude-3-sonnet-20240229", Claude3Haiku: "claude-3-haiku-20240307", Claude3Opus: "claude-3-opus-20240229", Claude35Sonnet: "claude-3.5-sonnet-20241022", Claude35Haiku: "claude-3.5-haiku-20241022", Claude37Sonnet: "claude-3.7-sonnet-20250219", Claude4Sonnet: "claude-4-sonnet-20250514", Claude4Opus: "claude-4-opus-20250514", }, Bedrock: { ClaudeInstant: "anthropic.claude-instant-v1", ClaudeV2: "anthropic.claude-v2:1", Claude3Sonnet: "anthropic.claude-3-sonnet-20240229-v1:0", Claude3Haiku: "anthropic.claude-3-haiku-20240307-v1:0", Claude3Opus: "anthropic.claude-3-opus-20240229-v1:0", Claude35Sonnet: "anthropic.claude-3-5-sonnet-20241022-v2:0", Claude35Haiku: "anthropic.claude-3-5-haiku-20241022-v1:0", Claude37Sonnet: "us.anthropic.claude-3-7-sonnet-20250219-v1:0", Llama3_8B: "meta.llama3-8b-instruct-v1:0", Llama3_70B: "meta.llama3-70b-instruct-v1:0", Llama31_8B: "meta.llama3-1-8b-instruct-v1:0", Llama31_70B: "meta.llama3-1-70b-instruct-v1:0", Llama32_1B: "meta.llama3-2-1b-instruct-v1:0", Llama32_3B: "meta.llama3-2-3b-instruct-v1:0", Llama32_11B: "meta.llama3-2-11b-instruct-v1:0", Llama32_90B: "meta.llama3-2-90b-instruct-v1:0", Llama33_70B: "meta.llama3-3-70b-instruct-v1:0", Mistral7B: "mistral.mistral-7b-instruct-v0:2", Mixtral8x7B: "mistral.mixtral-8x7b-instruct-v0:1", MistralLarge: "mistral.mistral-large-2402-v1:0", MistralSmall: "mistral.mistral-small-2402-v1:0", TitanTextG1Large: "amazon.titan-tg1-large", TitanTextPremier: "amazon.titan-text-premier-v1:0", TitanTextLite: "amazon.titan-text-lite-v1:0", TitanTextExpress: "amazon.titan-text-express-v1:0", TitanEmbedText: "amazon.titan-embed-text-v1:2", TitanEmbedImage: "amazon.titan-embed-image-v1:0", NovaPro: "amazon.nova-pro-v1:0", NovaLite: "amazon.nova-lite-v1:0", NovaCanvas: "amazon.nova-canvas-v1:0", NovaReel: "amazon.nova-reel-v1:0", NovaMicro: "amazon.nova-micro-v1:0", }, Google: { Gemini3FlashPreview: "gemini-3-flash-preview", Gemini3ProPreview: "gemini-3-pro-preview", Gemini25Flash: "gemini-2.5-flash", Gemini25Pro: "gemini-2.5-pro", Gemini20Flash: "gemini-2.0-flash", Gemini20FlashLite: "gemini-2.0-flash-lite", Gemini15Flash: "gemini-1.5-flash", Gemini15Flash8B: "gemini-1.5-flash-8b", Gemini15Pro: "gemini-1.5-pro", GeminiEmbedding: "text-embedding-004", PaLM2: "palm-2", Llama4_Scout_17B: "meta/llama-4-maverick-17b-128e-instruct-maas", Llama33_70B: "meta/llama-3.3-70b-instruct-maas", Llama32_90B: "meta/llama-3.2-90b-vision-instruct-maas", Llama31_405B: "meta/llama-3.1-405b-instruct-maas", KimiK2Thinking: "moonshotai/kimi-k2-thinking-maas", MiniMaxM2: "minimaxai/minimax-m2-maas", GPTOSS_120B: "openai/gpt-oss-120b-maas", GPTOSS_20B: "openai/gpt-oss-20b-maas", }, XAI: { Grok4: "grok-4", Grok4Fast: "grok-4-fast-non-reasoning", Grok4ReasoningFast: "grok-4-fast-reasoning", GrokCodeFast: "grok-code-fast-1", Grok3: "grok-3", Grok3Mini: "grok-3-mini", }, Groq: { Llama31_8B: "llama-3.1-8b-instant", Llama33_70B: "llama-3.3-70b-versatile", Llama4_Guard_12B: "meta-llama/llama-guard-4-12b", Llama4_Scout_17B: "meta-llama/llama-4-scout-17b-16e-instruct", GPTOSS_120B: "openai/gpt-oss-120b", GPTOSS_20B: "openai/gpt-oss-20b", Quew3_32B: "qwen/qwen3-32b", }, Sarvam: { SarvamM: "sarvam-m", }, FireworksAI: { MiniMaxM2P1: "accounts/fireworks/models/minimax-m2p1", MiniMaxM2: "accounts/fireworks/models/minimax-m2", GLM4_7: "accounts/fireworks/models/glm-4p7", DeepSeekV3P2: "accounts/fireworks/models/deepseek-v3p2", KimiK2Thinking: "accounts/fireworks/models/kimi-k2-thinking", GLM4_6: "accounts/fireworks/models/glm-4p6", Quew3_235B_VL_Thinking: "accounts/fireworks/models/qwen3-vl-235b-a22b-thinking", Quew3_235B_VL_Instruct: "accounts/fireworks/models/qwen3-vl-235b-a22b-instruct", Qwen3_Coder_480B_Instruct: "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct", Quew3_235B_Thinking: "accounts/fireworks/models/qwen3-235b-a22b-thinking-2507", Quew3_235B_Instruct: "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct", GPTOSS_120B: "accounts/fireworks/models/gpt-oss-120b", GPTOSS_20B: "accounts/fireworks/models/gpt-oss-20b", Llama33_70B: "accounts/fireworks/models/llama-v3p3-70b-instruct", }, OpenRouter: { GPTOSS_120B: "openai/gpt-oss-120b", GPTOSS_20B: "openai/gpt-oss-20b", GPT5Mini: "openai/gpt-5-mini", GPT5Nano: "openai/gpt-5-nano", GPT5_1: "openai/gpt-5.1", GPT5_2: "openai/gpt-5.2", GPT5_2_Pro: "openai/gpt-5.2-pro", GPT5_1Codex: "openai/gpt-5.1-codex", GPT5_1CodexMax: "openai/gpt-5.1-codex-max", GPT5_2Codex: "openai/gpt-5.2-codex", GPT5_2CodexMax: "openai/gpt-5.2-codex-max", Claude4_5Opus: "anthropic/claude-sonnet-4.5", Claude4_5Sonnet: "anthropic/claude-sonnet-4.5", MiniMaxM2P1: "minimax/minimax-m2.1", MiniMaxM2: "minimax/minimax-m2", Gemini3FlashPreview: "google/gemini-3-flash-preview", Gemini3ProPreview: "google/gemini-3-pro-preview", Gemini25Pro: "google/gemini-2.5-pro", Gemini25Flash: "google/gemini-2.5-flash", Gemini20FlashLite: "google/gemini-2.5-flash-lite", KimiK2Thinking: "moonshotai/kimi-k2-thinking", Llama33_70B: "meta-llama/llama-3.3-70b-instruct", Llama4_Scout_17B: "meta-llama/llama-4-scout", Qwen3_Coder_480B_Instruct: "qwen/qwen3-coder", Quew3_235B_Instruct: "qwen/qwen3-235b-a22b-2507", Quew3_235B_VL_Instruct: "qwen/qwen3-vl-235b-a22b-instruct", Quew3_235B_Thinking: "qwen/qwen3-vl-235b-a22b-thinking", }, } )
Functions ¶
This section is empty.
Types ¶
type AIProperty ¶ added in v1.15.4
type AIProperty string
AIProperty represents the keys used for AI event tracking
const ( // Posthog LLM observability properties AITraceID AIProperty = "$ai_trace_id" // The trace ID (UUID to group AI events), similar to conversation_id. Must contain only letters, numbers, and special characters: -, _, ~, ., @, (, ), !, ', :, | AIModel AIProperty = "$ai_model" // The model used (e.g., gpt-3.5-turbo) AIProvider AIProperty = "$ai_provider" // The LLM provider name AIInput AIProperty = "$ai_input" // List of messages sent to the LLM (e.g., [{"role": "user", "content": "..."}]) AIInputTokens AIProperty = "$ai_input_tokens" // The number of tokens in the input (from response.usage) AIOutputChoices AIProperty = "$ai_output_choices" // List of choices returned by the LLM (e.g., [{"role": "assistant", "content": "..."}]) AIOutputTokens AIProperty = "$ai_output_tokens" // The number of tokens in the output (from response.usage) AILatency AIProperty = "$ai_latency" // The latency of the LLM call in seconds AIHTTPStatus AIProperty = "$ai_http_status" // The HTTP status code of the LLM response AIBaseURL AIProperty = "$ai_base_url" // The base URL of the LLM provider AIIsError AIProperty = "$ai_is_error" // Boolean indicating whether the request resulted in an error AIError AIProperty = "$ai_error" // The error message or object if the request failed // Custom properties SystemPrompt AIProperty = "$kai_system_prompt" ToolCallEnabled AIProperty = "$kai_tool_call_enabled" McpServerUrls AIProperty = "$kai_mcp_server_urls" Temperature AIProperty = "$kai_temperature" TopP AIProperty = "$kai_top_p" TopK AIProperty = "$kai_top_k" MaxTokens AIProperty = "$kai_max_tokens" )
type Analytics ¶ added in v1.15.4
type Analytics struct {
DistinctID string `json:"distinct_id"`
TraceId string `json:"trace_id"`
CaptureUserPrompts bool `json:"capture_user_prompts"`
CaptureAIResponses bool `json:"capture_ai_responses"`
CaptureToolCalls bool `json:"capture_tool_calls"`
// contains filtered or unexported fields
}
Analytics represents analytics configuration
type BaseModel ¶ added in v1.15.16
type BaseModel string
BaseModel represents the core model without provider-specific naming
const ( // OpenAI Models GPT4 BaseModel = "gpt-4" GPT4o BaseModel = "gpt-4o" GPT4oMini BaseModel = "gpt-4o-mini" GPT4Turbo BaseModel = "gpt-4-turbo" GPT35Turbo BaseModel = "gpt-3.5-turbo" GPT5 BaseModel = "gpt-5" GPT5Nano BaseModel = "gpt-5-nano" GPT5Mini BaseModel = "gpt-5-mini" GPT5_1 BaseModel = "gpt-5.1" GPT5_2 BaseModel = "gpt-5.2" GPT5_2_Pro BaseModel = "gpt-5.2-pro" GPT5_1Codex BaseModel = "gpt-5.1-codex" GPT5_1CodexMax BaseModel = "gpt-5.1-codex-max" GPT5_2Codex BaseModel = "gpt-5.2-codex" GPT5_2CodexMax BaseModel = "gpt-5.2-codex-max" O1 BaseModel = "o1" O1Mini BaseModel = "o1-mini" O1Preview BaseModel = "o1-preview" GPTOSS_20B BaseModel = "gpt-oss-20b" GPTOSS_120B BaseModel = "gpt-oss-120b" // Text Embedding Models TextEmbeddingAda002 BaseModel = "text-embedding-ada-002" TextEmbedding3Large BaseModel = "text-embedding-3-large" TextEmbedding3Small BaseModel = "text-embedding-3-small" // Claude Models Claude35Sonnet BaseModel = "claude-3.5-sonnet" Claude35Haiku BaseModel = "claude-3.5-haiku" Claude3Sonnet BaseModel = "claude-3-sonnet" Claude3Haiku BaseModel = "claude-3-haiku" Claude3Opus BaseModel = "claude-3-opus" Claude37Sonnet BaseModel = "claude-3.7-sonnet" Claude4Sonnet BaseModel = "claude-4-sonnet" Claude4_5Sonnet BaseModel = "claude-4.5-sonnet" Claude4Opus BaseModel = "claude-4-opus" Claude4_5Opus BaseModel = "claude-4.5-opus" ClaudeInstant BaseModel = "claude-instant" ClaudeV2 BaseModel = "claude-v2" // Llama Models Llama3_8B BaseModel = "llama-3-8b" Llama3_70B BaseModel = "llama-3-70b" Llama31_8B BaseModel = "llama-3.1-8b" Llama31_70B BaseModel = "llama-3.1-70b" Llama31_405B BaseModel = "llama-3.1-405b" Llama32_1B BaseModel = "llama-3.2-1b" Llama32_3B BaseModel = "llama-3.2-3b" Llama32_11B BaseModel = "llama-3.2-11b" Llama32_90B BaseModel = "llama-3.2-90b" Llama33_70B BaseModel = "llama-3.3-70b" Llama4_Guard_12B BaseModel = "llama-4-guard-12b" Llama4_Scout_17B BaseModel = "llama-4-scout-17b" // Mistral Models Mistral7B BaseModel = "mistral-7b" Mixtral8x7B BaseModel = "mixtral-8x7b" MistralLarge BaseModel = "mistral-large" MistralSmall BaseModel = "mistral-small" // Quew Models Quew3_32B BaseModel = "quew3-32b" Quew3_235B_VL_Thinking BaseModel = "quew3-235b-vl" Quew3_235B_VL_Instruct BaseModel = "quew3-235b-vl-instruct" Quew3_235B_Thinking BaseModel = "quew3-235b-thinking" Quew3_235B_Instruct BaseModel = "quew3-235b-instruct" Qwen3_Coder_480B_Instruct BaseModel = "qwen3-coder-480b-instruct" // Moonshot Models KimiK2Thinking BaseModel = "kimi-k2-thinking" MiniMaxM2 BaseModel = "minimax-m2" MiniMaxM2P1 BaseModel = "minimax-m2p1" GLM4_7 BaseModel = "glm4-7" GLM4_6 BaseModel = "glm4-6" // Deepseek DeepSeekV3P2 BaseModel = "deepseek-v3p2" // Amazon Titan Models TitanTextG1Large BaseModel = "titan-text-g1-large" TitanTextPremier BaseModel = "titan-text-premier" TitanTextLite BaseModel = "titan-text-lite" TitanTextExpress BaseModel = "titan-text-express" TitanEmbedText BaseModel = "titan-embed-text" TitanEmbedImage BaseModel = "titan-embed-image" // Amazon Nova Models NovaPro BaseModel = "nova-pro" NovaLite BaseModel = "nova-lite" NovaCanvas BaseModel = "nova-canvas" NovaReel BaseModel = "nova-reel" NovaMicro BaseModel = "nova-micro" // Google Models Gemini3FlashPreview BaseModel = "gemini-3-flash-preview" Gemini3ProPreview BaseModel = "gemini-3-pro-preview" Gemini25Flash BaseModel = "gemini-2.5-flash" Gemini25Pro BaseModel = "gemini-2.5-pro" Gemini20Flash BaseModel = "gemini-2.0-flash" Gemini20FlashLite BaseModel = "gemini-2.0-flash-lite" Gemini15Flash BaseModel = "gemini-1.5-flash" Gemini15Flash8B BaseModel = "gemini-1.5-flash-8b" Gemini15Pro BaseModel = "gemini-1.5-pro" GeminiEmbedding BaseModel = "gemini-embedding" PaLM2 BaseModel = "palm-2" // xAI Models Grok4 BaseModel = "grok-4" GrokCodeFast BaseModel = "grok-code-fast-1" Grok4ReasoningFast BaseModel = "grok-4-fast-reasoning" Grok4Fast BaseModel = "grok-4-fast-non-reasoning" Grok3 BaseModel = "grok-3" Grok3Mini BaseModel = "grok-3-mini" // Sarvam AI Models SarvamM BaseModel = "sarvam-m" )
Base Models - Core models without provider prefixes
type FuncParams ¶ added in v1.16.50
type FuncParams = internalopenai.FuncParams
FuncParams is a helper type for building OpenAI function parameters. It provides a fluent API for defining tool parameter schemas and extracting typed values from tool call arguments.
Schema Building Methods:
- SetString, SetStringEnum - Add string parameters
- SetInt, SetIntRange - Add integer parameters
- SetNumber, SetNumberRange - Add number (float) parameters
- SetBool - Add boolean parameters
- SetArray, SetArrayWithItems - Add array parameters
- SetObject - Add nested object parameters
- SetRequired, AddRequired - Set required fields
Value Extraction Methods (for use in handlers):
- GetString, GetStringDefault
- GetInt, GetIntDefault
- GetFloat, GetFloatDefault
- GetBool, GetBoolDefault
- GetStringArray
- GetMap
func NewFuncParams ¶ added in v1.16.50
func NewFuncParams(history ...*models.AIChatHistory) FuncParams
NewFuncParams creates a new FuncParams with default object type.
Example:
params := ai.NewFuncParams().
SetString("name", "User's full name").
SetInt("age", "User's age in years").
SetStringEnum("status", "Account status", []string{"active", "inactive", "pending"}).
SetRequired("name", "age")
type GoFunctionTool ¶ added in v1.16.50
type GoFunctionTool = internalopenai.GoFunctionTool
GoFunctionTool represents a Go function that can be called by the AI model. The Handler receives a FuncParams which provides helper methods to extract typed values from the tool call arguments.
Example:
tool := ai.NewGoFunctionTool(
"get_weather",
"Get the current weather",
ai.NewFuncParams().
SetString("location", "The city name").
SetRequired("location"),
func(ctx context.Context, args ai.FuncParams) (string, error) {
location := args.GetStringDefault("location", "Unknown")
return fmt.Sprintf(`{"weather": "sunny", "location": "%s"}`, location), nil
},
)
func NewGoFunctionTool ¶ added in v1.16.50
func NewGoFunctionTool( name string, description string, params FuncParams, handler func(context.Context, FuncParams) (string, error), ) GoFunctionTool
NewGoFunctionTool creates a new GoFunctionTool with the given parameters. The handler receives FuncParams which provides helper methods like GetString, GetInt, GetFloat, etc. for extracting typed values.
Example:
tool := ai.NewGoFunctionTool(
"calculate",
"Perform arithmetic operations",
ai.NewFuncParams().
SetNumber("a", "First operand").
SetNumber("b", "Second operand").
SetStringEnum("op", "Operation", []string{"add", "subtract", "multiply", "divide"}).
SetRequired("a", "b", "op"),
func(ctx context.Context, args ai.FuncParams) (string, error) {
a := args.GetFloatDefault("a", 0)
b := args.GetFloatDefault("b", 0)
op := args.GetStringDefault("op", "add")
var result float64
switch op {
case "add":
result = a + b
case "subtract":
result = a - b
case "multiply":
result = a * b
case "divide":
result = a / b
}
return fmt.Sprintf(`{"result": %f}`, result), nil
},
)
func NewStrictGoFunctionTool ¶ added in v1.16.50
func NewStrictGoFunctionTool( name string, description string, params FuncParams, handler func(context.Context, FuncParams) (string, error), ) GoFunctionTool
NewStrictGoFunctionTool creates a new GoFunctionTool with strict mode enabled. Strict mode ensures the model follows the parameter schema exactly. The handler receives FuncParams which provides helper methods for extracting typed values.
type ImageGenOptions ¶ added in v1.15.2
type ImageGenOptions func(*KarmaImageGen)
func WithImgAspectRatio ¶ added in v1.16.59
func WithImgAspectRatio(aspectRatio string) ImageGenOptions
WithImgAspectRatio sets the aspect ratio for generated images
func WithImgDisabledSafetyFilters ¶ added in v1.16.59
func WithImgDisabledSafetyFilters() ImageGenOptions
WithImgDisabledSafetyFilters disables all safety filters
func WithImgImageSize ¶ added in v1.16.59
func WithImgImageSize(imageSize string) ImageGenOptions
WithImgImageSize sets the image size (e.g., "1K", "2K")
func WithImgMimeType ¶ added in v1.16.59
func WithImgMimeType(mimeType string) ImageGenOptions
WithImgMimeType sets the output MIME type (e.g., "image/png", "image/jpeg")
func WithImgNegativePrompt ¶ added in v1.15.2
func WithImgNegativePrompt(prompt string) ImageGenOptions
func WithImgPersonGeneration ¶ added in v1.16.59
func WithImgPersonGeneration(policy string) ImageGenOptions
WithImgPersonGeneration sets the person generation policy
func WithImgSpecialConfig ¶ added in v1.16.59
func WithImgSpecialConfig(cfg map[SpecialConfig]any) ImageGenOptions
WithImgSpecialConfig sets provider-specific configuration
func WithImgTemperature ¶ added in v1.16.59
func WithImgTemperature(temperature float32) ImageGenOptions
WithImgTemperature sets the temperature for image generation
func WithImgUserPrePrompt ¶ added in v1.15.2
func WithImgUserPrePrompt(prompt string) ImageGenOptions
func WithNImages ¶ added in v1.15.2
func WithNImages(n int) ImageGenOptions
func WithOutputDirectory ¶ added in v1.15.2
func WithOutputDirectory(dir string) ImageGenOptions
type ImageModels ¶ added in v1.15.2
type ImageModels string
const ( GROK_2_IMAGE ImageModels = "grok-2-image" GPT_1_IMAGE ImageModels = "gpt-image-1" DALL_E_3 ImageModels = "dall-e-3" DALL_E_2 ImageModels = "dall-e-2" GEMINI_NANO_BANANA ImageModels = "gemini-2.5-flash-image-preview" // Google/Gemini Image Models GEMINI_3_PRO_IMAGE ImageModels = "gemini-3-pro-image-preview" // Segmind Models SEGMIND_SD ImageModels = "segmind-sd-3.5-large" SEGMIND_PROTOVIS ImageModels = "segmind-protovis-lightning" SEGMIND_SAMARITAN ImageModels = "segmind-samaritan-3d" SEGMIND_DREAMSHAPER ImageModels = "segmind-dreamshaper-lightning" SEGMIND_NANO_BANANA ImageModels = "segmind-nano-banana" SEGMIND_FLUX ImageModels = "segmind-flux-schnell" SEGMIND_MIDJOURNEY ImageModels = "segmind-midjourney" SEGMIND_SDXL ImageModels = "segmind-sdxl-txt2img" SEGMIND_SD15 ImageModels = "segmind-sd15-txt2img" )
type KarmaAI ¶
type KarmaAI struct {
Model ModelConfig `json:"model"`
SystemMessage string `json:"system_message"`
Context string `json:"context"`
UserPrePrompt string `json:"user_pre_prompt"`
Temperature float32 `json:"temperature"`
TopP float32 `json:"top_p"`
TopK int `json:"top_k"`
MaxTokens int `json:"max_tokens"`
ReasoningEffort *shared.ReasoningEffort `json:"reasoning_effort"`
ResponseType string `json:"response_type"`
MCPConfig map[string]MCPTool `json:"mcp_config"`
MCPUrl string `json:"mcp_url"`
AuthToken string `json:"auth_token"`
MCPTools []MCPTool `json:"mcp_tools"`
GoFunctionTools []internalopenai.GoFunctionTool `json:"go_function_tools"`
ToolsEnabled bool `json:"tools_enabled"`
UseMCPExecution bool `json:"use_mcp_execution"`
Analytics *Analytics `json:"analytics"`
Features *F `json:"features"`
MaxToolPasses int `json:"max_tool_passes"`
// Deprecated: Use MCPServers instead
MCPServers []MCPServer `json:"mcp_servers"`
// Provider-specific configuration
SpecialConfig map[SpecialConfig]any `json:"special_config"`
}
KarmaAI represents the main AI configuration
func NewKarmaAI ¶
NewKarmaAI creates a new KarmaAI instance with the specified model and options
func (*KarmaAI) AddGoFunctionTool ¶ added in v1.16.50
func (kai *KarmaAI) AddGoFunctionTool(tool internalopenai.GoFunctionTool) error
AddGoFunctionTool adds a Go function tool to the KarmaAI instance after construction. Returns an error if the tool name or handler is missing.
func (*KarmaAI) ChatCompletion ¶
func (kai *KarmaAI) ChatCompletion(messages models.AIChatHistory) (*models.AIChatResponse, error)
func (*KarmaAI) ChatCompletionManaged ¶ added in v1.16.55
func (kai *KarmaAI) ChatCompletionManaged(history *models.AIChatHistory) (*models.AIChatResponse, error)
func (*KarmaAI) ChatCompletionStream ¶ added in v1.5.39
func (kai *KarmaAI) ChatCompletionStream(messages models.AIChatHistory, callback func(chunk models.StreamedResponse) error) (*models.AIChatResponse, error)
func (*KarmaAI) ChatCompletionStreamManaged ¶ added in v1.16.55
func (kai *KarmaAI) ChatCompletionStreamManaged(history *models.AIChatHistory, callback func(chunk models.StreamedResponse) error) (*models.AIChatResponse, error)
func (*KarmaAI) ClearGoFunctionTools ¶ added in v1.16.50
func (kai *KarmaAI) ClearGoFunctionTools()
ClearGoFunctionTools removes all Go function tools from the KarmaAI instance.
func (*KarmaAI) DeleteAnalyticProperty ¶ added in v1.15.4
func (kai *KarmaAI) DeleteAnalyticProperty(property AIProperty)
func (*KarmaAI) EnableTools ¶ added in v1.15.2
func (kai *KarmaAI) EnableTools()
EnableTools enables tool usage
func (*KarmaAI) GenerateFromSinglePrompt ¶ added in v1.5.39
func (kai *KarmaAI) GenerateFromSinglePrompt(prompt string) (*models.AIChatResponse, error)
func (*KarmaAI) GetEmbeddings ¶ added in v1.11.93
func (kai *KarmaAI) GetEmbeddings(text string) (*models.AIEmbeddingResponse, error)
func (*KarmaAI) GetSpecialConfig ¶ added in v1.16.59
func (kai *KarmaAI) GetSpecialConfig(c SpecialConfig) (any, error)
func (*KarmaAI) SendErrorEvent ¶ added in v1.15.4
func (*KarmaAI) SetAnalyticProperty ¶ added in v1.15.4
func (kai *KarmaAI) SetAnalyticProperty(property AIProperty, val any)
type KarmaImageGen ¶ added in v1.15.2
type KarmaImageGen struct {
UserPrePrompt string // User's pre-prompt for image generation
NegativePrompt string // User's negative prompt for image generation
N int // Number of output images
Model ImageModels
OutputDirectory string
// Provider-specific configuration (same as KarmaAI)
SpecialConfig map[SpecialConfig]any
// Image generation settings
AspectRatio string // e.g., "1:1", "16:9", "9:16", "4:3", "3:4"
ImageSize string // e.g., "1K", "2K"
MimeType string // e.g., "image/png", "image/jpeg"
PersonGeneration string // e.g., "ALLOW_ALL", "BLOCK_ALL", "BLOCK_ONLY_ADULTS"
Temperature float32 // Temperature for generation
DisableSafetyFilters bool // Disable safety filters
}
func NewKarmaImageGen ¶ added in v1.15.2
func NewKarmaImageGen(model ImageModels, opts ...ImageGenOptions) *KarmaImageGen
func (*KarmaImageGen) GenerateImages ¶ added in v1.15.2
func (ki *KarmaImageGen) GenerateImages(prompt string) (*models.AIImageResponse, error)
func (*KarmaImageGen) GenerateImagesWithInputImages ¶ added in v1.16.22
func (ki *KarmaImageGen) GenerateImagesWithInputImages(prompt string, imageUrls []string) (*models.AIImageResponse, error)
GenerateImagesWithInputImages generates images using input images (useful for models like Nano Banana)
type MCPServer ¶ added in v1.15.3
type MCPServer struct {
URL string `json:"url"`
AuthToken string `json:"auth_token,omitempty"`
Tools []MCPTool `json:"tools"`
}
MCPServer represents an MCP server configuration
func NewMCPServer ¶ added in v1.15.3
NewMCPServer creates a new MCP server configuration
type MCPTool ¶ added in v1.15.0
type MCPTool struct {
FriendlyName string `json:"friendly_name"`
ToolName string `json:"tool_name"`
Description string `json:"description"`
InputSchema any `json:"input_schema"`
}
MCPTool represents an MCP tool configuration
type ModelConfig ¶ added in v1.15.16
type ModelConfig struct {
BaseModel BaseModel
Provider Provider
CustomModelString string // Optional: override the provider-specific model string
}
ModelConfig represents a model with its provider configuration
func (ModelConfig) GetModelProvider ¶ added in v1.15.16
func (mc ModelConfig) GetModelProvider() Provider
GetModelProvider returns the provider for a given model config
func (ModelConfig) GetModelString ¶ added in v1.15.16
func (mc ModelConfig) GetModelString() string
GetModelString returns the provider-specific model string for API calls
func (ModelConfig) GetProvider ¶ added in v1.15.16
func (mc ModelConfig) GetProvider() Provider
GetProvider returns the provider for this model config
func (ModelConfig) IsOpenAICompatibleModel ¶ added in v1.15.16
func (mc ModelConfig) IsOpenAICompatibleModel() bool
IsOpenAICompatibleModel checks if the model is OpenAI API compatible
func (ModelConfig) SupportsMCP ¶ added in v1.15.16
func (mc ModelConfig) SupportsMCP() bool
SupportsMCP checks if the model supports MCP
type Option ¶
type Option func(*KarmaAI)
Option represents a configuration option for KarmaAI
func AddGoFunctionTool ¶ added in v1.16.48
func AddGoFunctionTool(tool internalopenai.GoFunctionTool) Option
func AddMCPServer ¶ added in v1.15.3
AddMCPServer adds an MCP server to the configuration
func ConfigureAnalytics ¶ added in v1.15.4
ConfigureAnalytics configures analytics settings
func SetCustomModelVariant ¶ added in v1.15.16
Use a custom model variant
func SetGoFunctionTools ¶ added in v1.16.48
func SetGoFunctionTools(tools []internalopenai.GoFunctionTool) Option
func SetMCPAuthToken ¶ added in v1.15.0
SetMCPAuthToken sets the MCP auth token
func SetMCPServers ¶ added in v1.15.3
SetMCPServers sets the MCP servers
func SetMCPTools ¶ added in v1.15.0
SetMCPTools sets the MCP tools
func WithDirectToolCalls ¶ added in v1.16.35
func WithDirectToolCalls() Option
WithDirectToolCalls enables tools without MCP execution (for LangChain/n8n)
func WithMaxTokens ¶ added in v1.5.39
WithMaxTokens sets the maximum tokens
func WithMaxToolPasses ¶ added in v1.16.48
func WithReasoningEffort ¶ added in v1.16.48
func WithReasoningEffort(effort shared.ReasoningEffort) Option
WithReasoningEffort sets the reasoning effort for supported models
func WithResponseType ¶ added in v1.12.101
WithResponseType sets the response type
func WithSpecialConfig ¶ added in v1.16.59
func WithSpecialConfig(config map[SpecialConfig]any) Option
func WithSystemMessage ¶
WithSystemMessage sets the system message
func WithTemperature ¶ added in v1.5.39
WithTemperature sets the temperature
func WithToolsEnabled ¶ added in v1.15.20
func WithToolsEnabled() Option
WithToolsEnabled enables MCP tools
func WithUserPrePrompt ¶
WithUserPrePrompt sets the user pre-prompt
type SpecialConfig ¶ added in v1.16.59
type SpecialConfig string
const ( GoogleProjectID SpecialConfig = "google_project_id" GoogleLocation SpecialConfig = "google_location" GoogleAPIKey SpecialConfig = "google_api_key" )
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
Package mcp provides a client implementation for the Model Context Protocol (MCP).
|
Package mcp provides a client implementation for the Model Context Protocol (MCP). |
|
* This file adds extenders to KarmaAI package, adding managed memory functionality.
|
* This file adds extenders to KarmaAI package, adding managed memory functionality. |