14.1 LRU/LFU缓存实现
面试重要程度:⭐⭐⭐⭐⭐
常见提问方式: "设计LRU缓存" "实现LFU算法" "缓存淘汰策略对比"
预计阅读时间:40分钟
🎯 LRU缓存实现
LRU基本概念
LRU (Least Recently Used) 最近最少使用算法,是一种常用的缓存淘汰策略。
核心思想: 当缓存满时,优先淘汰最久未被访问的数据。
LRU算法实现
/** * LRU缓存实现 * LeetCode 146: LRU Cache */ public class LRUCache { /** * 双向链表节点 */ class DLinkedNode { int key; int value; DLinkedNode prev; DLinkedNode next; public DLinkedNode() {} public DLinkedNode(int key, int value) { this.key = key; this.value = value; } } private Map<Integer, DLinkedNode> cache = new HashMap<>(); private int size; private int capacity; private DLinkedNode head, tail; public LRUCache(int capacity) { this.size = 0; this.capacity = capacity; // 使用伪头部和伪尾部节点 head = new DLinkedNode(); tail = new DLinkedNode(); head.next = tail; tail.prev = head; } /** * 获取缓存值 * 时间复杂度:O(1) */ public int get(int key) { DLinkedNode node = cache.get(key); if (node == null) { return -1; } // 如果key存在,先通过哈希表定位,再移到头部 moveToHead(node); return node.value; } /** * 添加缓存值 * 时间复杂度:O(1) */ public void put(int key, int value) { DLinkedNode node = cache.get(key); if (node == null) { DLinkedNode newNode = new DLinkedNode(key, value); // 添加进哈希表 cache.put(key, newNode); // 添加至双向链表的头部 addToHead(newNode); ++size; if (size > capacity) { // 如果超出容量,删除双向链表的尾部节点 DLinkedNode tail = removeTail(); // 删除哈希表中对应的项 cache.remove(tail.key); --size; } } else { // 如果key存在,先通过哈希表定位,再修改value,并移到头部 node.value = value; moveToHead(node); } } /** * 添加节点到头部 */ private void addToHead(DLinkedNode node) { node.prev = head; node.next = head.next; head.next.prev = node; head.next = node; } /** * 删除节点 */ private void removeNode(DLinkedNode node) { node.prev.next = node.next; node.next.prev = node.prev; } /** * 移动节点到头部 */ private void moveToHead(DLinkedNode node) { removeNode(node); addToHead(node); } /** * 删除尾部节点 */ private DLinkedNode removeTail() { DLinkedNode lastNode = tail.prev; removeNode(lastNode); return lastNode; } }
基于LinkedHashMap的简化实现
/** * 基于LinkedHashMap的LRU实现 */ public class LRUCacheSimple extends LinkedHashMap<Integer, Integer> { private int capacity; public LRUCacheSimple(int capacity) { super(capacity, 0.75F, true); this.capacity = capacity; } public int get(int key) { return super.getOrDefault(key, -1); } public void put(int key, int value) { super.put(key, value); } @Override protected boolean removeEldestEntry(Map.Entry<Integer, Integer> eldest) { return size() > capacity; } }
🔄 LFU缓存实现
LFU基本概念
LFU (Least Frequently Used) 最不经常使用算法,根据数据的历史访问频率来淘汰数据。
核心思想: 当缓存满时,优先淘汰访问频率最低的数据。
LFU算法实现
/** * LFU缓存实现 * LeetCode 460: LFU Cache */ public class LFUCache { /** * 缓存节点 */ class Node { int key, val, freq; Node prev, next; public Node(int key, int val) { this.key = key; this.val = val; this.freq = 1; } } /** * 双向链表 */ class DoublyLinkedList { Node head, tail; public DoublyLinkedList() { head = new Node(0, 0); tail = new Node(0, 0); head.next = tail; tail.prev = head; } public void addFirst(Node node) { Node next = head.next; node.next = next; next.prev = node; node.prev = head; head.next = node; } public void remove(Node node) { Node prev = node.prev; Node next = node.next; prev.next = next; next.prev = prev; } public Node removeLast() { if (head.next == tail) { return null; } Node last = tail.prev; remove(last); return last; } public boolean isEmpty() { return head.next == tail; } } private int capacity; private int minFreq; private Map<Integer, Node> keyToNode; private Map<Integer, DoublyLinkedList> freqToList; public LFUCache(int capacity) { this.capacity = capacity; this.minFreq = 0; this.keyToNode = new HashMap<>(); this.freqToList = new HashMap<>(); } public int get(int key) { Node node = keyToNode.get(key); if (node == null) { return -1; } // 增加频率 increaseFreq(node); return node.val; } public void put(int key, int value) { if (capacity <= 0) return; Node node = keyToNode.get(key); if (node != null) { // 更新已存在的key node.val = value; increaseFreq(node); return; } // 添加新key if (keyToNode.size() >= capacity) { // 删除最少使用的节点 removeMinFreqNode(); } // 插入新节点 Node newNode = new Node(key, value); keyToNode.put(key, newNode); freqToList.computeIfAbsent(1, k -> new DoublyLinkedList()).addFirst(newNode); minFreq = 1; } private void increaseFreq(Node node) { int freq = node.freq; // 从原频率链表中删除 freqToList.get(freq).remove(node); // 如果原频率链表为空且是最小频率,更新最小频率 if (freqToList.get(freq).isEmpty() && freq == minFreq) { minFreq++; } // 增加频率并添加到新频率链表 node.freq++; freqToList.computeIfAbsent(node.freq, k -> new DoublyLinkedList()).addFirst(node); } private void removeMinFreqNode() { DoublyLinkedList list = freqToList.get(minFreq); Node node = list.removeLast(); keyToNode.remove(node.key); } }
简化版LFU实现
/** * 基于优先队列的简化LFU实现 */ public class LFUCacheSimple { class Node { int key, val, freq, time; public Node(int key, int val, int freq, int time) { this.key = key; this.val = val; this.freq = freq; this.time = time; } } private int capacity; private int time; private Map<Integer, Node> keyToNode; private PriorityQueue<Node> pq; public LFUCacheSimple(int capacity) { this.capacity = capacity; this.time = 0; this.keyToNode = new HashMap<>(); this.pq = new PriorityQueue<>((a, b) -> { if (a.freq != b.freq) { return a.freq - b.freq; } return a.time - b.time; }); } public int get(int key) { Node node = keyToNode.get(key); if (node == null) { return -1; } node.freq++; node.time = ++time; return node.val; } public void put(int key, int value) { if (capacity <= 0) return; Node node = keyToNode.get(key); if (node != null) { node.val = value; node.freq++; node.time = ++time; return; } if (keyToNode.size() >= capacity) { // 清理过期节点 while (!pq.isEmpty()) { Node peek = pq.peek(); if (keyToNode.get(peek.key) == peek) { keyToNode.remove(peek.key); pq.poll(); break; } else { pq.poll(); } } } Node newNode = new Node(key, value, 1, ++time); keyToNode.put(key, newNode); pq.offer(newNode); } }
🔄 缓存策略对比
常见缓存淘汰算法
/** * 缓存策略对比分析 */ public class CacheStrategyComparison { /** * FIFO (First In First Out) 先进先出 */ public static class FIFOCache<K, V> { private final int capacity; private final Map<K, V> cache; private final Queue<K> queue; public FIFOCache(int capacity) { this.capacity = capacity; this.cache = new HashMap<>(); this.queue = new LinkedList<>(); } public V get(K key) { return cache.get(key); } public void put(K key, V value) { if (cache.containsKey(key)) { cache.put(key, value); return; } if (cache.size() >= capacity) { K oldest = queue.poll(); cache.remove(oldest); } cache.put(key, value); queue.offer(key); } } /** * Random 随机淘汰 */ public static class RandomCache<K, V> { private final int capacity; private final Map<K, V> cache; private final List<K> keys; private final Random random; public RandomCache(int capacity) { this.capacity = capacity; this.cache = new HashMap<>(); this.keys = new ArrayList<>(); this.random = new Random(); } public V get(K key) { return cache.get(key); } public void put(K key, V value) { if (cache.containsKey(key)) { cache.put(key, value); return; } if (cache.size() >= capacity) { int randomIndex = random.nextInt(keys.size()); K randomKey = keys.get(randomIndex); cache.remove(randomKey); keys.remove(randomIndex); } cache.put(key, value); keys.add(key); } } }
性能对比分析
/** * 缓存性能测试 */ public class CachePerformanceTest { public static void performanceComparison() { int capacity = 1000; int operations = 10000; // LRU测试 long startTime = System.currentTimeMillis(); LRUCache lru = new LRUCache(capacity); testCache(lru, operations); long lruTime = System.currentTimeMillis() - startTime; // LFU测试 startTime = System.currentTimeMillis(); LFUCache lfu = new LFUCache(capacity); testCache(lfu, operations); long lfuTime = System.currentTimeMillis() - startTime; System.out.println("LRU Time: " + lruTime + "ms"); System.out.println("LFU Time: " + lfuTime + "ms"); } private static void testCache(Object cache, int operations) { Random random = new Random(); if (cache instanceof LRUCache) { LRUCache lruCache = (LRUCache) cache; for (int i = 0; i < operations; i++) { int key = random.nextInt(operations); if (random.nextBoolean()) { lruCache.get(key); } else { lruCache.put(key, random.nextInt()); } } } else if (cache instanceof LFUCache) { LFUCache lfuCache = (LFUCache) cache; for (int i = 0; i < operations; i++) { int key = random.nextInt(operations); if (random.nextBoolean()) { lfuCache.get(key); } else { lfuCache.put(key, random.nextInt()); } } } } }
🎯 实际应用场景
分布式缓存实现
/** * 分布式LRU缓存 */ public class DistributedLRUCache { private final LRUCache localCache; private final RedisTemplate<String, Object> redisTemplate; private final String cachePrefix; public DistributedLRUCache(int localCapacity, RedisTemplate<String, Object> redisTemplate) { this.localCache = new LRUCache(localCapacity); this.redisTemplate = redisTemplate; this.cachePrefix = "cache:"; } public Object get(String key) { // 先查本地缓存 Object value = localCache.get(key.hashCode()); if (value != null) { return value; } // 再查Redis value = redisTemplate.opsForValue().get(cachePrefix + key); if (value != null) { // 回填本地缓存 localCache.put(key.hashCode(), (Integer) value); } return value; } public void put(String key, Object value) { // 同时更新本地缓存和Redis localCache.put(key.hashCode(), (Integer) value); redisTemplate.opsForValue().set(cachePrefix + key, value, Duration.ofHours(1)); } public void evict(String key) { localCache.put(key.hashCode(), -1); // 标记删除 redisTemplate.delete(cachePrefix + key); } }
缓存预热和更新策略
/** * 缓存管理器 */ @Component public class CacheManager { private final LRUCache cache; private final ScheduledExecutorService scheduler; public CacheManager() { this.cache = new LRUCache(10000); this.scheduler = Executors.newScheduledThreadPool(2); // 启动缓存预热 startCacheWarmup(); // 启动定期刷新 startPeriodicRefresh(); } /** * 缓存预热 */ private void startCacheWarmup() { scheduler.execute(() -> { try { // 预加载热点数据 List<String> hotKeys = getHotKeys(); for (String key : hotKeys) { Object value = loadFromDatabase(key); if (value != null) { cache.put(key.hashCode(), (Integer) value); } } System.out.println("Cache warmup completed"); } catch (Exception e) { System.err.println("Cache warmup failed: " + e.getMessage()); } }); } /** * 定期刷新缓存 */ private void startPeriodicRefresh() { scheduler.scheduleAtFixedRate(() -> { try { // 刷新即将过期的数据 refreshExpiredData(); } catch (Exception e) { System.err.println("Cache refresh failed: " + e.getMessage()); } }, 1, 1, TimeUnit.HOURS); } private List<String> getHotKeys() { // 从统计系统获取热点key return Arrays.asList("user:1", "user:2", "product:hot"); } private Object loadFromDatabase(String key) { // 从数据库加载数据 return key.hashCode(); } private void refreshExpiredData() { // 刷新即将过期的数据 System.out.println("Refreshing expired cache data"); } }
💡 面试常见问题解答
Q1: LRU和LFU的区别和适用场景?
标准回答:
LRU vs LFU对比: 1. LRU (最近最少使用) - 淘汰策略:最久未访问的数据 - 适用场景:时间局部性强的应用 - 实现复杂度:O(1)操作 - 典型应用:操作系统页面置换、Web缓存 2. LFU (最不经常使用) - 淘汰策略:访问频率最低的数据 - 适用场景:有明显热点数据的应用 - 实现复杂度:O(1)操作(优化后) - 典型应用:CDN缓存、数据库缓存 3. 选择建议 - 短期访问模式:选择LRU - 长期访问模式:选择LFU - 混合场景:可考虑LRU-K或ARC算法 根据业务特点选择合适的策略。
Q2: 如何实现O(1)时间复杂度的LRU?
标准回答:
O(1) LRU实现要点: 1. 数据结构选择 - HashMap:快速定位节点 - 双向链表:快速插入和删除 2. 关键操作 - get:HashMap查找 + 移动到头部 - put:HashMap更新 + 链表操作 3. 实现细节 - 使用伪头尾节点简化边界处理 - 维护size变量避免重复计算 - 先删除再插入保证原子性 4. 空间复杂度 - HashMap: O(capacity) - 双向链表: O(capacity) - 总计: O(capacity) 双向链表是实现O(1)操作的关键。
Q3: 分布式环境下如何实现缓存一致性?
标准回答:
分布式缓存一致性方案: 1. 缓存更新策略 - Cache Aside:应用负责缓存更新 - Write Through:同步更新缓存和数据库 - Write Behind:异步更新数据库 2. 一致性保证 - 强一致性:分布式锁 + 版本号 - 最终一致性:消息队列 + 重试机制 - 弱一致性:TTL过期 + 定期刷新 3. 实现方案 - Redis Cluster:数据分片 + 主从复制 - 一致性Hash:节点变更时最小化数据迁移 - 消息通知:变更时通知其他节点 4. 最佳实践 - 设置合理的TTL - 使用版本号防止ABA问题 - 监控缓存命中率和一致性 根据业务对一致性的要求选择方案。
核心要点总结:
- ✅ 掌握LRU/LFU的实现原理和代码
- ✅ 理解不同缓存策略的适用场景
- ✅ 能够设计分布式缓存方案
- ✅ 熟悉缓存一致性和性能优化策略
Java面试圣经 文章被收录于专栏
Java面试圣经,带你练透java圣经