Documentation
¶
Overview ¶
* This file adds extenders to KarmaAI package, adding managed memory functionality.
Index ¶
- type CacheConfig
- type CacheMethod
- type CachedMemories
- type EntityRelationship
- type KarmaMemory
- func (km *KarmaMemory) ChatCompletion(prompt string) (*models.AIChatResponse, error)
- func (km *KarmaMemory) ChatCompletionStream(prompt string, callback func(chunk models.StreamedResponse) error) (*models.AIChatResponse, error)
- func (k *KarmaMemory) ClearHistory()
- func (k *KarmaMemory) DisableCache()
- func (k *KarmaMemory) EnableCache(cfg CacheConfig)
- func (k *KarmaMemory) EnableMemoryCache(cfg ...CacheConfig)
- func (k *KarmaMemory) EnableRedisCache(cfg ...CacheConfig)
- func (k *KarmaMemory) GetCacheMethod() CacheMethod
- func (k *KarmaMemory) GetCacheStats() (map[string]any, error)
- func (k *KarmaMemory) GetContext(userPrompt string) (string, error)
- func (k *KarmaMemory) GetHistory() models.AIChatHistory
- func (k *KarmaMemory) InvalidateCache() error
- func (k *KarmaMemory) IsCacheEnabled() bool
- func (k *KarmaMemory) NumberOfMessages() int
- func (k *KarmaMemory) UpdateMessageHistory(messages []models.AIMessage)
- func (k *KarmaMemory) UseEmbeddingLLM(llm ai.BaseModel, provider ai.Provider)
- func (k *KarmaMemory) UseLogger(logger *zap.Logger)
- func (k *KarmaMemory) UseMemoryLLM(llm ai.BaseModel, provider ai.Provider, extraConfig ...MemoryLlmConfig)
- func (k *KarmaMemory) UseRetrievalMode(mode RetrievalMode)
- func (k *KarmaMemory) UseScope(scope string) bool
- func (k *KarmaMemory) UseService(service VectorServices) error
- func (k *KarmaMemory) UseUser(userId string) bool
- func (k *KarmaMemory) WarmupCache() error
- type Memory
- type MemoryCache
- type MemoryCategory
- type MemoryFilter
- type MemoryLifespan
- type MemoryLlmConfig
- type MemoryMutability
- type MemoryStatus
- type RetrievalMode
- type VectorServices
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type CacheConfig ¶ added in v1.16.43
type CacheMethod ¶ added in v1.16.43
type CacheMethod string
const ( CacheMethodMemory CacheMethod = "memory" CacheMethodRedis CacheMethod = "redis" )
type CachedMemories ¶ added in v1.16.43
type EntityRelationship ¶
type KarmaMemory ¶
type KarmaMemory struct {
// contains filtered or unexported fields
}
func NewKarmaMemory ¶
func NewKarmaMemory(kai *ai.KarmaAI, userId string, sc ...string) *KarmaMemory
func (*KarmaMemory) ChatCompletion ¶
func (km *KarmaMemory) ChatCompletion(prompt string) (*models.AIChatResponse, error)
func (*KarmaMemory) ChatCompletionStream ¶
func (km *KarmaMemory) ChatCompletionStream(prompt string, callback func(chunk models.StreamedResponse) error) (*models.AIChatResponse, error)
func (*KarmaMemory) ClearHistory ¶
func (k *KarmaMemory) ClearHistory()
func (*KarmaMemory) DisableCache ¶ added in v1.16.43
func (k *KarmaMemory) DisableCache()
func (*KarmaMemory) EnableCache ¶ added in v1.16.43
func (k *KarmaMemory) EnableCache(cfg CacheConfig)
func (*KarmaMemory) EnableMemoryCache ¶ added in v1.16.43
func (k *KarmaMemory) EnableMemoryCache(cfg ...CacheConfig)
func (*KarmaMemory) EnableRedisCache ¶ added in v1.16.43
func (k *KarmaMemory) EnableRedisCache(cfg ...CacheConfig)
func (*KarmaMemory) GetCacheMethod ¶ added in v1.16.43
func (k *KarmaMemory) GetCacheMethod() CacheMethod
func (*KarmaMemory) GetCacheStats ¶ added in v1.16.43
func (k *KarmaMemory) GetCacheStats() (map[string]any, error)
func (*KarmaMemory) GetContext ¶
func (k *KarmaMemory) GetContext(userPrompt string) (string, error)
func (*KarmaMemory) GetHistory ¶
func (k *KarmaMemory) GetHistory() models.AIChatHistory
func (*KarmaMemory) InvalidateCache ¶ added in v1.16.43
func (k *KarmaMemory) InvalidateCache() error
func (*KarmaMemory) IsCacheEnabled ¶ added in v1.16.43
func (k *KarmaMemory) IsCacheEnabled() bool
func (*KarmaMemory) NumberOfMessages ¶
func (k *KarmaMemory) NumberOfMessages() int
func (*KarmaMemory) UpdateMessageHistory ¶
func (k *KarmaMemory) UpdateMessageHistory(messages []models.AIMessage)
Advanced implementations require custom logic to manage message history, in such cases below function can be used to update the message history
func (*KarmaMemory) UseEmbeddingLLM ¶
func (k *KarmaMemory) UseEmbeddingLLM(llm ai.BaseModel, provider ai.Provider)
func (*KarmaMemory) UseLogger ¶ added in v1.16.42
func (k *KarmaMemory) UseLogger(logger *zap.Logger)
func (*KarmaMemory) UseMemoryLLM ¶
func (k *KarmaMemory) UseMemoryLLM(llm ai.BaseModel, provider ai.Provider, extraConfig ...MemoryLlmConfig)
func (*KarmaMemory) UseRetrievalMode ¶
func (k *KarmaMemory) UseRetrievalMode(mode RetrievalMode)
func (*KarmaMemory) UseScope ¶
func (k *KarmaMemory) UseScope(scope string) bool
func (*KarmaMemory) UseService ¶ added in v1.16.42
func (k *KarmaMemory) UseService(service VectorServices) error
func (*KarmaMemory) UseUser ¶
func (k *KarmaMemory) UseUser(userId string) bool
func (*KarmaMemory) WarmupCache ¶ added in v1.16.43
func (k *KarmaMemory) WarmupCache() error
type Memory ¶
type Memory struct {
Id string `json:"id"` // Vector Id
SubjectKey string `json:"subject_key"`
Namespace string `json:"namespace"`
Category MemoryCategory `json:"category"`
Summary string `json:"summary"`
RawText string `json:"raw_text"`
Importance int `json:"importance"`
Mutability MemoryMutability `json:"mutability"`
Lifespan MemoryLifespan `json:"lifespan"`
ForgetScore float64 `json:"forget_score"`
Status MemoryStatus `json:"status"`
SupersedesCanonicalKeys []string `json:"supersedes_canonical_keys"`
Metadata json.RawMessage `json:"metadata"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
ExpiresAt *time.Time `json:"expires_at,omitempty"`
EntityRelationships []EntityRelationship `json:"entity_relationships"`
}
type MemoryCache ¶ added in v1.16.43
type MemoryCache interface {
IsEnabled() bool
GetBackend() CacheMethod
CacheMemoriesByCategory(ctx context.Context, userID, scope string, category MemoryCategory, memories []Memory) error
GetCachedMemoriesByCategory(ctx context.Context, userID, scope string, category MemoryCategory) ([]Memory, bool)
CacheRules(ctx context.Context, userID, scope string, rules []Memory) error
GetCachedRules(ctx context.Context, userID, scope string) ([]Memory, bool)
CacheFacts(ctx context.Context, userID, scope string, facts []Memory) error
GetCachedFacts(ctx context.Context, userID, scope string) ([]Memory, bool)
CacheSkills(ctx context.Context, userID, scope string, skills []Memory) error
GetCachedSkills(ctx context.Context, userID, scope string) ([]Memory, bool)
CacheContext(ctx context.Context, userID, scope string, contextMemories []Memory) error
GetCachedContext(ctx context.Context, userID, scope string) ([]Memory, bool)
// Dynamic filtering for conscious mode
CacheAllMemories(ctx context.Context, userID, scope string, memories []Memory) error
GetCachedMemoriesWithFilter(ctx context.Context, userID, scope string, filter MemoryFilter) ([]Memory, bool)
InvalidateCategoryCache(ctx context.Context, userID, scope string, category MemoryCategory) error
InvalidateUserCache(ctx context.Context, userID string) error
GetCacheStats(ctx context.Context, userID string) (map[string]any, error)
WarmupCache(ctx context.Context, userID, scope string, vectorClient vectorService) error
Close() error
}
func NewCache ¶ added in v1.16.43
func NewCache(logger *zap.Logger, cfg ...CacheConfig) MemoryCache
func NewMemoryCache ¶ added in v1.16.43
func NewMemoryCache(logger *zap.Logger, cfg ...CacheConfig) MemoryCache
func NewRedisCache ¶ added in v1.16.43
func NewRedisCache(logger *zap.Logger, cfg ...CacheConfig) MemoryCache
type MemoryCategory ¶
type MemoryCategory string
const ( CategoryFact MemoryCategory = "fact" CategoryPreference MemoryCategory = "preference" CategorySkill MemoryCategory = "skill" CategoryContext MemoryCategory = "context" CategoryRule MemoryCategory = "rule" CategoryEntity MemoryCategory = "entity" CategoryEpisodic MemoryCategory = "episodic" )
type MemoryFilter ¶ added in v1.16.43
type MemoryFilter struct {
Categories []MemoryCategory // Filter by categories (e.g., fact, rule, skill)
Lifespans []MemoryLifespan // Filter by lifespans (e.g., short_term, lifelong)
Status *MemoryStatus // Filter by status (e.g., active)
MinImportance *int // Filter by minimum importance
NotExpired bool // Only include non-expired memories
}
MemoryFilter represents dynamic filter criteria for conscious mode retrieval
type MemoryLifespan ¶
type MemoryLifespan string
const ( LifespanShortTerm MemoryLifespan = "short_term" LifespanMidTerm MemoryLifespan = "mid_term" LifespanLongTerm MemoryLifespan = "long_term" LifespanLifelong MemoryLifespan = "lifelong" )
type MemoryLlmConfig ¶ added in v1.16.43
type MemoryMutability ¶
type MemoryMutability string
const ( MutabilityMutable MemoryMutability = "mutable" MutabilityImmutable MemoryMutability = "immutable" )
type MemoryStatus ¶
type MemoryStatus string
const ( StatusActive MemoryStatus = "active" StatusSuperseded MemoryStatus = "superseded" StatusDeleted MemoryStatus = "deleted" )
type RetrievalMode ¶
type RetrievalMode string
RetrievalMode defines how memory context is retrieved
const ( // RetrievalModeConscious uses AI to generate dynamic search queries based on user prompt. // It analyzes the prompt to determine relevant categories, lifespan, and search terms. // Best for: Complex queries where context matters, conversational AI. // Tradeoff: Higher latency due to LLM call, more tokens used. RetrievalModeConscious RetrievalMode = "conscious" // RetrievalModeAuto uses a fixed query strategy with the user prompt as the search text. // Always includes non-expired facts, preferences, rules, entities, and context. // Best for: Fast retrieval, predictable behavior, lower cost. // Tradeoff: Less intelligent filtering, may retrieve less relevant memories. RetrievalModeAuto RetrievalMode = "auto" )
type VectorServices ¶ added in v1.16.42
type VectorServices string
const ( VectorServiceUpstash VectorServices = "upstash" VectorServicePinecone VectorServices = "pinecone" )
Click to show internal directories.
Click to hide internal directories.