What is an LRUCache?
The LRU means discards the least recently used items first. It means that a cache implementation using this algorithm requires keeping track of what was for last to discards the least recently used item See more here
In the following video, you will find a detailed explanation over the LRU algorithm which will make easier you understand the following code examples.
Code implementation
Following the code implementation for our LRUCache.
package algoTest
import "container/list"
type LRUCache struct {
capacity int
linkedList *list.List
hashTable map[int]*list.Element
}
// The new will initialize the map and linkedList
func New(capacity int) LRUCache {
return LRUCache{
capacity: capacity,
linkedList: list.New(),
hashTable: make(map[int]*list.Element),
}
}
// Return the value of the key or -1 when it is not found
func (cache *LRUCache) Get(key int) int {
// Check if exist
element, has := cache.hashTable[key]
// if not exist return -1
if !has {
return -1
}
// if exist move the element before to the first of
cache.linkedList.MoveBefore(element, cache.linkedList.Front())
// each element is the linked list pointer where []int[0] is the index and the []int[1] is the value
return element.Value.([]int)[1]
}
func (cache *LRUCache) Put(key int, value int) {
element, has := cache.hashTable[key]
if has {
// if has element with the key then add the new value
element.Value = value
// move element to the front of the list
cache.linkedList.MoveBefore(element, cache.linkedList.Front())
return
}
if len(cache.hashTable) >= cache.capacity {
Evict(cache)
}
createElementOnFrontOfList(cache, key, value)
return
}
// Create a new element in front of the list
func createElementOnFrontOfList(cache *LRUCache, key int, value int) {
front := cache.linkedList.PushFront([]int{key, value})
cache.hashTable[key] = front
}
// Remove the last item of the map
func Evict(cache *LRUCache) {
back := cache.linkedList.Back()
cache.linkedList.Remove(back)
delete(cache.hashTable, back.Value.([]int)[0])
}
Following a test implemented for we check our LRUCache.
package algoTest
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
)
func TestLRUCache(t *testing.T) {
capacity := 3
cache := New(capacity);
// Assertions to check if the hashTable was initialized with the capacity informed
assert.NotNil(t,cache)
assert.Equal(t, capacity, cache.capacity)
//Put items in the map/hashTable
i :=0 ;
fmt.Printf("Starting caching ..\n")
for i <= capacity*2 {
key := i
value := i*2
fmt.Printf("Key: %d, Value: %d \n", key, value)
cache.Put(key,value)
i++
}
// Check if kept just the last entries
for i:= capacity*2; i > capacity; i-- {
last := cache.Get(i)
fmt.Printf("Result caching ..\n")
fmt.Printf("Key: %d, Value: %d \n", i, last)
assert.NotNil(t,last)
assert.Equal(t, i*2, last)
}
// Check if the first one was evicted as should be
first := cache.Get(0)
assert.NotNil(t,first)
assert.Equal(t, -1, first)
}