init commit

This commit is contained in:
2025-11-30 13:01:24 -05:00
parent f4596a372d
commit 29355260ed
607 changed files with 136371 additions and 234 deletions

327
store/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,327 @@
package cache
import (
"context"
"sync"
"sync/atomic"
"time"
)
// Interface defines the operations a cache must support.
type Interface interface {
// Set adds a value to the cache with the default TTL.
Set(ctx context.Context, key string, value any)
// SetWithTTL adds a value to the cache with a custom TTL.
SetWithTTL(ctx context.Context, key string, value any, ttl time.Duration)
// Get retrieves a value from the cache.
Get(ctx context.Context, key string) (any, bool)
// Delete removes a value from the cache.
Delete(ctx context.Context, key string)
// Clear removes all values from the cache.
Clear(ctx context.Context)
// Size returns the number of items in the cache.
Size() int64
// Close stops all background tasks and releases resources.
Close() error
}
// item represents a cached value with metadata.
type item struct {
value any
expiration time.Time
size int // Approximate size in bytes
}
// Config contains options for configuring a cache.
type Config struct {
// DefaultTTL is the default time-to-live for cache entries.
DefaultTTL time.Duration
// CleanupInterval is how often the cache runs cleanup.
CleanupInterval time.Duration
// MaxItems is the maximum number of items allowed in the cache.
MaxItems int
// OnEviction is called when an item is evicted from the cache.
OnEviction func(key string, value any)
}
// DefaultConfig returns a default configuration for the cache.
func DefaultConfig() Config {
return Config{
DefaultTTL: 10 * time.Minute,
CleanupInterval: 5 * time.Minute,
MaxItems: 1000,
OnEviction: nil,
}
}
// Cache is a thread-safe in-memory cache with TTL and memory management.
type Cache struct {
data sync.Map
config Config
itemCount int64 // Use atomic operations to track item count
stopChan chan struct{}
closedChan chan struct{}
}
// New creates a new memory cache with the given configuration.
func New(config Config) *Cache {
c := &Cache{
config: config,
stopChan: make(chan struct{}),
closedChan: make(chan struct{}),
}
go c.cleanupLoop()
return c
}
// NewDefault creates a new memory cache with default configuration.
func NewDefault() *Cache {
return New(DefaultConfig())
}
// Set adds a value to the cache with the default TTL.
func (c *Cache) Set(ctx context.Context, key string, value any) {
c.SetWithTTL(ctx, key, value, c.config.DefaultTTL)
}
// SetWithTTL adds a value to the cache with a custom TTL.
func (c *Cache) SetWithTTL(_ context.Context, key string, value any, ttl time.Duration) {
// Estimate size of the item (very rough approximation).
size := estimateSize(value)
// Check if item already exists to avoid double counting.
if _, exists := c.data.Load(key); exists {
c.data.Delete(key)
} else {
// Only increment if this is a new key.
atomic.AddInt64(&c.itemCount, 1)
}
c.data.Store(key, item{
value: value,
expiration: time.Now().Add(ttl),
size: size,
})
// If we're over the max items, clean up old items.
if c.config.MaxItems > 0 && atomic.LoadInt64(&c.itemCount) > int64(c.config.MaxItems) {
c.cleanupOldest()
}
}
// Get retrieves a value from the cache.
func (c *Cache) Get(_ context.Context, key string) (any, bool) {
value, ok := c.data.Load(key)
if !ok {
return nil, false
}
itm, ok := value.(item)
if !ok {
// If the value is not of type item, it means it was corrupted or not set correctly.
c.data.Delete(key)
return nil, false
}
if time.Now().After(itm.expiration) {
c.data.Delete(key)
atomic.AddInt64(&c.itemCount, -1)
if c.config.OnEviction != nil {
c.config.OnEviction(key, itm.value)
}
return nil, false
}
return itm.value, true
}
// Delete removes a value from the cache.
func (c *Cache) Delete(_ context.Context, key string) {
if value, loaded := c.data.LoadAndDelete(key); loaded {
atomic.AddInt64(&c.itemCount, -1)
if c.config.OnEviction != nil {
if itm, ok := value.(item); ok {
c.config.OnEviction(key, itm.value)
}
}
}
}
// Clear removes all values from the cache.
func (c *Cache) Clear(_ context.Context) {
if c.config.OnEviction != nil {
c.data.Range(func(key, value any) bool {
itm, ok := value.(item)
if !ok {
return true
}
if keyStr, ok := key.(string); ok {
c.config.OnEviction(keyStr, itm.value)
}
return true
})
}
c.data = sync.Map{}
atomic.StoreInt64(&c.itemCount, 0)
}
// Size returns the number of items in the cache.
func (c *Cache) Size() int64 {
return atomic.LoadInt64(&c.itemCount)
}
// Close stops the cache cleanup goroutine.
func (c *Cache) Close() error {
select {
case <-c.stopChan:
// Already closed
return nil
default:
close(c.stopChan)
<-c.closedChan // Wait for cleanup goroutine to exit
return nil
}
}
// cleanupLoop periodically cleans up expired items.
func (c *Cache) cleanupLoop() {
ticker := time.NewTicker(c.config.CleanupInterval)
defer func() {
ticker.Stop()
close(c.closedChan)
}()
for {
select {
case <-ticker.C:
c.cleanup()
case <-c.stopChan:
return
}
}
}
// cleanup removes expired items.
func (c *Cache) cleanup() {
evicted := make(map[string]any)
count := 0
c.data.Range(func(key, value any) bool {
itm, ok := value.(item)
if !ok {
return true
}
if time.Now().After(itm.expiration) {
c.data.Delete(key)
count++
if c.config.OnEviction != nil {
if keyStr, ok := key.(string); ok {
evicted[keyStr] = itm.value
}
}
}
return true
})
if count > 0 {
atomic.AddInt64(&c.itemCount, -int64(count))
// Call eviction callbacks outside the loop to avoid blocking the range
if c.config.OnEviction != nil {
for k, v := range evicted {
c.config.OnEviction(k, v)
}
}
}
}
// cleanupOldest removes the oldest items if we're over the max items.
func (c *Cache) cleanupOldest() {
// Remove 20% of max items at once
threshold := max(c.config.MaxItems/5, 1)
currentCount := atomic.LoadInt64(&c.itemCount)
// If we're not over the threshold, don't do anything
if currentCount <= int64(c.config.MaxItems) {
return
}
// Find the oldest items
type keyExpPair struct {
key string
value any
expiration time.Time
}
candidates := make([]keyExpPair, 0, threshold)
c.data.Range(func(key, value any) bool {
itm, ok := value.(item)
if !ok {
return true
}
if keyStr, ok := key.(string); ok && len(candidates) < threshold {
candidates = append(candidates, keyExpPair{keyStr, itm.value, itm.expiration})
return true
}
// Find the newest item in candidates
newestIdx := 0
for i := 1; i < len(candidates); i++ {
if candidates[i].expiration.After(candidates[newestIdx].expiration) {
newestIdx = i
}
}
// Replace it if this item is older
if itm.expiration.Before(candidates[newestIdx].expiration) {
candidates[newestIdx] = keyExpPair{key.(string), itm.value, itm.expiration}
}
return true
})
// Delete the oldest items
deletedCount := 0
for _, candidate := range candidates {
c.data.Delete(candidate.key)
deletedCount++
if c.config.OnEviction != nil {
c.config.OnEviction(candidate.key, candidate.value)
}
}
// Update count
if deletedCount > 0 {
atomic.AddInt64(&c.itemCount, -int64(deletedCount))
}
}
// estimateSize attempts to estimate the memory footprint of a value.
func estimateSize(value any) int {
switch v := value.(type) {
case string:
return len(v) + 24 // base size + string overhead
case []byte:
return len(v) + 24 // base size + slice overhead
case map[string]any:
return len(v) * 64 // rough estimate
default:
return 64 // default conservative estimate
}
}

209
store/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,209 @@
package cache
import (
"context"
"fmt"
"sync"
"testing"
"time"
)
func TestCacheBasicOperations(t *testing.T) {
ctx := context.Background()
config := DefaultConfig()
config.DefaultTTL = 100 * time.Millisecond
config.CleanupInterval = 50 * time.Millisecond
cache := New(config)
defer cache.Close()
// Test Set and Get
cache.Set(ctx, "key1", "value1")
if val, ok := cache.Get(ctx, "key1"); !ok || val != "value1" {
t.Errorf("Expected 'value1', got %v, exists: %v", val, ok)
}
// Test SetWithTTL
cache.SetWithTTL(ctx, "key2", "value2", 200*time.Millisecond)
if val, ok := cache.Get(ctx, "key2"); !ok || val != "value2" {
t.Errorf("Expected 'value2', got %v, exists: %v", val, ok)
}
// Test Delete
cache.Delete(ctx, "key1")
if _, ok := cache.Get(ctx, "key1"); ok {
t.Errorf("Key 'key1' should have been deleted")
}
// Test automatic expiration
time.Sleep(150 * time.Millisecond)
if _, ok := cache.Get(ctx, "key1"); ok {
t.Errorf("Key 'key1' should have expired")
}
// key2 should still be valid (200ms TTL)
if _, ok := cache.Get(ctx, "key2"); !ok {
t.Errorf("Key 'key2' should still be valid")
}
// Wait for key2 to expire
time.Sleep(100 * time.Millisecond)
if _, ok := cache.Get(ctx, "key2"); ok {
t.Errorf("Key 'key2' should have expired")
}
// Test Clear
cache.Set(ctx, "key3", "value3")
cache.Clear(ctx)
if _, ok := cache.Get(ctx, "key3"); ok {
t.Errorf("Cache should be empty after Clear()")
}
}
func TestCacheEviction(t *testing.T) {
ctx := context.Background()
config := DefaultConfig()
config.MaxItems = 5
cache := New(config)
defer cache.Close()
// Add 5 items (max capacity)
for i := 0; i < 5; i++ {
key := fmt.Sprintf("key%d", i)
cache.Set(ctx, key, i)
}
// Verify all 5 items are in the cache
for i := 0; i < 5; i++ {
key := fmt.Sprintf("key%d", i)
if _, ok := cache.Get(ctx, key); !ok {
t.Errorf("Key '%s' should be in the cache", key)
}
}
// Add 2 more items to trigger eviction
cache.Set(ctx, "keyA", "valueA")
cache.Set(ctx, "keyB", "valueB")
// Verify size is still within limits
if cache.Size() > int64(config.MaxItems) {
t.Errorf("Cache size %d exceeds limit %d", cache.Size(), config.MaxItems)
}
// Some of the original keys should have been evicted
evictedCount := 0
for i := 0; i < 5; i++ {
key := fmt.Sprintf("key%d", i)
if _, ok := cache.Get(ctx, key); !ok {
evictedCount++
}
}
if evictedCount == 0 {
t.Errorf("No keys were evicted despite exceeding max items")
}
// The newer keys should still be present
if _, ok := cache.Get(ctx, "keyA"); !ok {
t.Errorf("Key 'keyA' should be in the cache")
}
if _, ok := cache.Get(ctx, "keyB"); !ok {
t.Errorf("Key 'keyB' should be in the cache")
}
}
func TestCacheConcurrency(t *testing.T) {
ctx := context.Background()
cache := NewDefault()
defer cache.Close()
const goroutines = 10
const operationsPerGoroutine = 100
var wg sync.WaitGroup
wg.Add(goroutines)
for i := 0; i < goroutines; i++ {
go func(id int) {
defer wg.Done()
baseKey := fmt.Sprintf("worker%d-", id)
// Set operations
for j := 0; j < operationsPerGoroutine; j++ {
key := fmt.Sprintf("%skey%d", baseKey, j)
value := fmt.Sprintf("value%d-%d", id, j)
cache.Set(ctx, key, value)
}
// Get operations
for j := 0; j < operationsPerGoroutine; j++ {
key := fmt.Sprintf("%skey%d", baseKey, j)
val, ok := cache.Get(ctx, key)
if !ok {
t.Errorf("Key '%s' should exist in cache", key)
continue
}
expected := fmt.Sprintf("value%d-%d", id, j)
if val != expected {
t.Errorf("For key '%s', expected '%s', got '%s'", key, expected, val)
}
}
// Delete half the keys
for j := 0; j < operationsPerGoroutine/2; j++ {
key := fmt.Sprintf("%skey%d", baseKey, j)
cache.Delete(ctx, key)
}
}(i)
}
wg.Wait()
// Verify size and deletion
var totalKeysExpected int64 = goroutines * operationsPerGoroutine / 2
if cache.Size() != totalKeysExpected {
t.Errorf("Expected cache size to be %d, got %d", totalKeysExpected, cache.Size())
}
}
func TestEvictionCallback(t *testing.T) {
ctx := context.Background()
evicted := make(map[string]interface{})
evictedMu := sync.Mutex{}
config := DefaultConfig()
config.DefaultTTL = 50 * time.Millisecond
config.CleanupInterval = 25 * time.Millisecond
config.OnEviction = func(key string, value interface{}) {
evictedMu.Lock()
evicted[key] = value
evictedMu.Unlock()
}
cache := New(config)
defer cache.Close()
// Add items
cache.Set(ctx, "key1", "value1")
cache.Set(ctx, "key2", "value2")
// Manually delete
cache.Delete(ctx, "key1")
// Verify manual deletion triggered callback
time.Sleep(10 * time.Millisecond) // Small delay to ensure callback processed
evictedMu.Lock()
if evicted["key1"] != "value1" {
t.Errorf("Eviction callback not triggered for manual deletion")
}
evictedMu.Unlock()
// Wait for automatic expiration
time.Sleep(60 * time.Millisecond)
// Verify TTL expiration triggered callback
evictedMu.Lock()
if evicted["key2"] != "value2" {
t.Errorf("Eviction callback not triggered for TTL expiration")
}
evictedMu.Unlock()
}