溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點(diǎn)擊 登錄注冊 即表示同意《億速云用戶服務(wù)條款》

分布式緩存實(shí)踐:Go HashMap的緩存數(shù)據(jù)訪問緩存淘汰算法

發(fā)布時間:2024-11-09 01:13:39 來源:億速云 閱讀:79 作者:小樊 欄目:編程語言

在分布式系統(tǒng)中,緩存是一種提高數(shù)據(jù)訪問速度的有效手段。Go語言中的sync.Map是一個內(nèi)置的并發(fā)安全的map,可以用來實(shí)現(xiàn)分布式緩存。為了實(shí)現(xiàn)緩存淘汰算法,我們可以使用以下幾種策略:

  1. LRU(Least Recently Used):最近最少使用的數(shù)據(jù)將被淘汰。這種策略可以通過維護(hù)一個雙向鏈表來實(shí)現(xiàn),鏈表中存儲了緩存的鍵值對。當(dāng)緩存滿時,從鏈表頭部移除元素。訪問緩存時,將對應(yīng)的鍵值對移動到鏈表尾部。
type LRUCache struct {
    capacity int
    cache    sync.Map
    lruList  *list.List
}

type entry struct {
    key   string
    value interface{}
    index int
}

func NewLRUCache(capacity int) *LRUCache {
    return &LRUCache{
        capacity: capacity,
        lruList:  list.New(),
    }
}

func (c *LRUCache) Get(key string) (interface{}, bool) {
    if value, ok := c.cache.Load(key); ok {
        c.lruList.MoveToFront(c.lruList.Find(key))
        return value, true
    }
    return nil, false
}

func (c *LRUCache) Put(key string, value interface{}) {
    if c.capacity <= 0 {
        return
    }

    if value, ok := c.cache.LoadOrStore(key, value); ok {
        c.lruList.MoveToFront(c.lruList.Find(key))
    } else {
        if c.lruList.Len() >= c.capacity {
            last := c.lruList.Back()
            delete(c.cache, last.Value.(*entry).key)
            c.lruList.Remove(last)
        }
        newEntry := &entry{key: key, value: value}
        c.lruList.PushFront(newEntry)
        c.cache.Store(key, newEntry)
    }
}
  1. TTL(Time To Live):數(shù)據(jù)在一定時間內(nèi)有效,過期將被淘汰。這種策略可以通過為每個緩存項(xiàng)設(shè)置一個過期時間來實(shí)現(xiàn)。當(dāng)訪問緩存時,檢查是否已過期,如果過期則淘汰。
type TTLCache struct {
    capacity int
    cache    sync.Map
    ttlMap   map[string]int64
}

func NewTTLCache(capacity int) *TTLCache {
    return &TTLCache{
        capacity: capacity,
        ttlMap:   make(map[string]int64),
    }
}

func (c *TTLCache) Get(key string) (interface{}, bool) {
    if value, ok := c.cache.Load(key); ok {
        if time.Since(time.Unix(value.(*entry).expiration, 0)) <= time.Duration(value.(*entry).ttl)*time.Second {
            c.cache.Store(key, value)
            c.lruList.MoveToFront(c.lruList.Find(key))
            return value, true
        }
    }
    return nil, false
}

func (c *TTLCache) Put(key string, value interface{}, ttl int) {
    if c.capacity <= 0 {
        return
    }

    expiration := time.Now().Add(time.Duration(ttl) * time.Second).Unix()
    if value, ok := c.cache.LoadOrStore(key, &entry{value: value, expiration: expiration}); ok {
        c.lruList.MoveToFront(c.lruList.Find(key))
    } else {
        if c.lruList.Len() >= c.capacity {
            last := c.lruList.Back()
            delete(c.cache, last.Value.(*entry).key)
            c.lruList.Remove(last)
        }
        newEntry := &entry{value: value, expiration: expiration}
        c.lruList.PushFront(newEntry)
        c.cache.Store(key, newEntry)
    }
    c.ttlMap[key] = expiration
}
  1. LFU(Least Frequently Used):最不經(jīng)常使用的數(shù)據(jù)將被淘汰。這種策略可以通過維護(hù)一個哈希表來記錄每個鍵值對的出現(xiàn)頻率來實(shí)現(xiàn)。當(dāng)緩存滿時,根據(jù)頻率從低到高淘汰數(shù)據(jù)。
type LFUCache struct {
    capacity int
    cache    sync.Map
    freqMap  map[string]int
    minFreq  int
}

func NewLFUCache(capacity int) *LFUCache {
    return &LFUCache{
        capacity: capacity,
        freqMap:  make(map[string]int),
        minFreq:  0,
    }
}

func (c *LFUCache) Get(key string) (interface{}, bool) {
    if value, ok := c.cache.Load(key); ok {
        c.increaseFreq(key)
        return value, true
    }
    return nil, false
}

func (c *LFUCache) Put(key string, value interface{}) {
    if c.capacity <= 0 {
        return
    }

    if _, ok := c.cache.Load(key); ok {
        c.cache.Store(key, value)
        c.increaseFreq(key)
    } else {
        if c.cache.Len() >= c.capacity {
            minFreqKey := ""
            minFreq := int(^uint(0) >> 1) // Max int value
            for k, v := range c.freqMap {
                if v < minFreq {
                    minFreq = v
                    minFreqKey = k
                }
            }
            delete(c.cache, minFreqKey)
            delete(c.freqMap, minFreqKey)
        }
        c.cache.Store(key, value)
        c.freqMap[key] = 1
        if c.minFreq == 0 {
            c.minFreq = 1
        }
    }
}

func (c *LFUCache) increaseFreq(key string) {
    freq := c.freqMap[key]
    delete(c.freqMap, key)
    c.freqMap[key] = freq + 1
    if freq == c.minFreq {
        c.minFreq++
    }
}

這些緩存淘汰算法可以根據(jù)具體需求進(jìn)行選擇和實(shí)現(xiàn)。在實(shí)際應(yīng)用中,還可以根據(jù)業(yè)務(wù)特點(diǎn)對緩存策略進(jìn)行調(diào)整和優(yōu)化。

向AI問一下細(xì)節(jié)

免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場,如果涉及侵權(quán)請聯(lián)系站長郵箱:is@yisu.com進(jìn)行舉報(bào),并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。

go
AI