Java面试秋招之高并发系统设计

第10章 高并发系统设计

面试重要程度:⭐⭐⭐⭐⭐

常见提问方式:如何设计一个秒杀系统?如何处理热点数据?

预计阅读时间:45分钟

开场白

兄弟,高并发系统设计绝对是面试中的王炸题目!这不仅考察你的技术深度,更能看出你的架构思维和解决复杂问题的能力。我见过太多技术不错的同学,就是在系统设计这关败下阵来。

今天我们就把高并发系统设计的核心思路和实战经验彻底搞清楚,让你在面试中展现出架构师级别的思维。

🏗️ 10.1 系统架构设计原则

高可用、高并发、高性能

面试必问:

面试官:"设计一个高并发系统需要考虑哪些方面?如何保证系统的高可用?"

系统设计三高原则:

1. 高可用(High Availability)

// 高可用设计要点
/*
1. 消除单点故障
   - 服务集群部署
   - 数据库主从/集群
   - 缓存集群

2. 故障隔离
   - 服务拆分
   - 资源隔离
   - 熔断降级

3. 快速恢复
   - 健康检查
   - 自动重启
   - 故障转移
*/

@Component
public class HighAvailabilityService {
    
    // 多数据源配置,实现故障转移
    @Autowired
    @Qualifier("masterDataSource")
    private DataSource masterDataSource;
    
    @Autowired
    @Qualifier("slaveDataSource") 
    private DataSource slaveDataSource;
    
    @Autowired
    private CircuitBreaker circuitBreaker;
    
    public User getUserById(Long userId) {
        // 使用熔断器保护服务调用
        return circuitBreaker.executeSupplier(() -> {
            try {
                // 优先使用主库
                return getUserFromDataSource(masterDataSource, userId);
            } catch (Exception e) {
                log.warn("主库查询失败,切换到从库: {}", e.getMessage());
                // 故障转移到从库
                return getUserFromDataSource(slaveDataSource, userId);
            }
        });
    }
    
    private User getUserFromDataSource(DataSource dataSource, Long userId) {
        // 具体的数据库查询逻辑
        try (Connection conn = dataSource.getConnection()) {
            // 查询用户信息
            return queryUser(conn, userId);
        } catch (SQLException e) {
            throw new RuntimeException("数据库查询失败", e);
        }
    }
}

2. 高并发(High Concurrency)

// 高并发处理策略
@RestController
public class HighConcurrencyController {
    
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    @Autowired
    private RateLimiter rateLimiter;
    
    @Autowired
    private ThreadPoolExecutor asyncExecutor;
    
    // 1. 限流保护
    @GetMapping("/api/products/{id}")
    public ResponseEntity<Product> getProduct(@PathVariable Long id) {
        // 令牌桶限流
        if (!rateLimiter.tryAcquire(1, TimeUnit.SECONDS)) {
            return ResponseEntity.status(HttpStatus.TOO_MANY_REQUESTS)
                .body(null);
        }
        
        // 2. 缓存优先
        String cacheKey = "product:" + id;
        Product product = (Product) redisTemplate.opsForValue().get(cacheKey);
        
        if (product != null) {
            return ResponseEntity.ok(product);
        }
        
        // 3. 异步处理
        CompletableFuture<Product> future = CompletableFuture.supplyAsync(() -> {
            Product p = productService.getById(id);
            if (p != null) {
                // 缓存结果
                redisTemplate.opsForValue().set(cacheKey, p, 30, TimeUnit.MINUTES);
            }
            return p;
        }, asyncExecutor);
        
        try {
            product = future.get(2, TimeUnit.SECONDS); // 超时控制
            return ResponseEntity.ok(product);
        } catch (TimeoutException e) {
            // 超时返回默认值
            return ResponseEntity.ok(getDefaultProduct(id));
        }
    }
}

3. 高性能(High Performance)

// 高性能优化策略
@Service
public class HighPerformanceService {
    
    // 1. 连接池优化
    @Bean
    public HikariDataSource dataSource() {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl("jdbc:mysql://localhost:3306/test");
        config.setUsername("root");
        config.setPassword("password");
        
        // 连接池参数调优
        config.setMaximumPoolSize(50);          // 最大连接数
        config.setMinimumIdle(10);              // 最小空闲连接
        config.setConnectionTimeout(30000);     // 连接超时30秒
        config.setIdleTimeout(600000);          // 空闲超时10分钟
        config.setMaxLifetime(1800000);         // 连接最大生命周期30分钟
        config.setLeakDetectionThreshold(60000); // 连接泄漏检测
        
        return new HikariDataSource(config);
    }
    
    // 2. 批量处理
    @Transactional
    public void batchUpdateProducts(List<Product> products) {
        int batchSize = 1000;
        
        for (int i = 0; i < products.size(); i += batchSize) {
            int endIndex = Math.min(i + batchSize, products.size());
            List<Product> batch = products.subList(i, endIndex);
            
            // 批量更新
            productMapper.batchUpdate(batch);
            
            // 防止长事务,分批提交
            if (i > 0 && i % (batchSize * 10) == 0) {
                entityManager.flush();
                entityManager.clear();
            }
        }
    }
    
    // 3. 异步处理
    @Async("taskExecutor")
    public CompletableFuture<Void> processOrderAsync(Order order) {
        try {
            // 耗时的业务处理
            processOrderInternal(order);
            
            // 发送通知
            notificationService.sendOrderNotification(order);
            
            return CompletableFuture.completedFuture(null);
        } catch (Exception e) {
            log.error("异步处理订单失败: orderId={}", order.getId(), e);
            return CompletableFuture.failedFuture(e);
        }
    }
}

可扩展性与可维护性

水平扩展设计:

// 无状态服务设计
@RestController
public class StatelessController {
    
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    // ❌ 有状态设计(不可扩展)
    private Map<String, Object> localCache = new ConcurrentHashMap<>();
    
    // ✅ 无状态设计(可水平扩展)
    @GetMapping("/api/sessions/{sessionId}")
    public ResponseEntity<SessionInfo> getSession(@PathVariable String sessionId) {
        // 使用外部存储,而不是本地状态
        SessionInfo session = (SessionInfo) redisTemplate.opsForValue()
            .get("session:" + sessionId);
        
        return session != null ? 
            ResponseEntity.ok(session) : 
            ResponseEntity.notFound().build();
    }
    
    // 服务实例间负载均衡
    @PostMapping("/api/orders")
    public ResponseEntity<Order> createOrder(@RequestBody OrderRequest request) {
        // 生成全局唯一ID,避免依赖单机序列
        String orderId = generateGlobalUniqueId();
        
        Order order = new Order();
        order.setId(orderId);
        order.setUserId(request.getUserId());
        order.setAmount(request.getAmount());
        
        // 业务处理
        orderService.createOrder(order);
        
        return ResponseEntity.ok(order);
    }
    
    private String generateGlobalUniqueId() {
        // 雪花算法生成分布式ID
        return snowflakeIdGenerator.nextId();
    }
}

⚖️ 10.2 负载均衡与限流

负载均衡策略

面试重点:

面试官:"负载均衡有哪些算法?各自的优缺点是什么?"

负载均衡算法实现:

// 负载均衡器接口
public interface LoadBalancer {
    ServerInstance selectServer(List<ServerInstance> servers, String clientId);
}

// 1. 轮询算法
@Component
public class RoundRobinLoadBalancer implements LoadBalancer {
    
    private final AtomicInteger counter = new AtomicInteger(0);
    
    @Override
    public ServerInstance selectServer(List<ServerInstance> servers, String clientId) {
        if (servers.isEmpty()) {
            return null;
        }
        
        int index = counter.getAndIncrement() % servers.size();
        return servers.get(index);
    }
}

// 2. 加权轮询算法
@Component
public class WeightedRoundRobinLoadBalancer implements LoadBalancer {
    
    private final Map<String, Integer> currentWeights = new ConcurrentHashMap<>();
    
    @Override
    public ServerInstance selectServer(List<ServerInstance> servers, String clientId) {
        if (servers.isEmpty()) {
            return null;
        }
        
        int totalWeight = servers.stream().mapToInt(ServerInstance::getWeight).sum();
        ServerInstance selected = null;
        int maxCurrentWeight = 0;
        
        for (ServerInstance server : servers) {
            String serverId = server.getId();
            int weight = server.getWeight();
            
            // 增加当前权重
            int currentWeight = currentWeights.getOrDefault(serverId, 0) + weight;
            currentWeights.put(serverId, currentWeight);
            
            // 选择当前权重最大的服务器
            if (currentWeight > maxCurrentWeight) {
                maxCurrentWeight = currentWeight;
                selected = server;
            }
        }
        
        // 减少被选中服务器的权重
        if (selected != null) {
            currentWeights.put(selected.getId(), 
                currentWeights.get(selected.getId()) - totalWeight);
        }
        
        return selected;
    }
}

// 3. 一致性Hash算法
@Component
public class ConsistentHashLoadBalancer implements LoadBalancer {
    
    private final int virtualNodes = 150; // 虚拟节点数
    private final TreeMap<Long, ServerInstance> ring = new TreeMap<>();
    
    public void addServer(ServerInstance server) {
        for (int i = 0; i < virtualNodes; i++) {
            String virtualNodeId = server.getId() + "#" + i;
            long hash = hash(virtualNodeId);
            ring.put(hash, server);
        }
    }
    
    public void removeServer(ServerInstance server) {
        for (int i = 0; i < virtualNodes; i++) {
            String virtualNodeId = server.getId() + "#" + i;
            long hash = hash(virtualNodeId);
            ring.remove(hash);
        }
    }
    
    @Override
    public ServerInstance selectServer(List<ServerInstance> servers, String clientId) {
        if (ring.isEmpty()) {
            return null;
        }
        
        long hash = hash(clientId);
        
        // 找到第一个大于等于该hash值的节点
        Map.Entry<Long, ServerInstance> entry = ring.ceilingEntry(hash);
        
        // 如果没找到,则返回第一个节点(环形结构)
        if (entry == null) {
            entry = ring.firstEntry();
        }
        
        return entry.getValue();
    }
    
    private long hash(String key) {
        // 使用FNV1_32_HASH算法
        final int p = 16777619;
        int hash = (int) 2166136261L;
        
        for (byte b : key.getBytes()) {
            hash = (hash ^ b) * p;
        }
        hash += hash << 13;
        hash ^= hash >> 7;
        hash += hash << 3;
        hash ^= hash >> 17;
        hash += hash << 5;
        
        return hash < 0 ? Math.abs(hash) : hash;
    }
}

// 4. 最少连接算法
@Component
public class LeastConnectionsLoadBalancer implements LoadBalancer {
    
    private final Map<String, AtomicInteger> connectionCounts = new ConcurrentHashMap<>();
    
    @Override
    public ServerInstance selectServer(List<ServerInstance> servers, String clientId) {
        if (servers.isEmpty()) {
            return null;
        }
        
        ServerInstance selected = null;
        int minConnections = Integer.MAX_VALUE;
        
        for (ServerInstance server : servers) {
            int connections = connectionCounts.getOrDefault(server.getId(), 
                new AtomicInteger(0)).get();
            
            if (connections < minConnections) {
                minConnections = connections;
                selected = server;
            }
        }
        
        // 增加连接数
        if (selected != null) {
            connectionCounts.computeIfAbsent(selected.getId(), 
                k -> new AtomicInteger(0)).incrementAndGet();
        }
        
        return selected;
    }
    
    public void releaseConnection(String serverId) {
        AtomicInteger count = connectionCounts.get(serverId);
        if (count != null) {
            count.decrementAndGet();
        }
    }
}

限流算法实现

四种限流算法:

// 1. 固定窗口算法
@Component
public class FixedWindowRateLimiter {
    
    private final Map<String, WindowInfo> windows = new ConcurrentHashMap<>();
    
    public boolean tryAcquire(String key, int limit, long windowSizeMs) {
        long now = System.currentTimeMillis();
        long windowStart = now / windowSizeMs * windowSizeMs;
        
        WindowInfo window = windows.computeIfAbsent(key, k -> new WindowInfo());
        
        synchronized (window) {
            if (window.windowStart != windowStart) {
                // 新窗口,重置计数
                window.windowStart = windowStart;
                window.count = 0;
            }
            
            if (window.count < limit) {
                window.count++;
                return true;
            }
            
            return false;
        }
    }
    
    private static class WindowInfo {
        long windowStart;
        int count;
    }
}

// 2. 滑动窗口算法
@Component
public class SlidingWindowRateLimiter {
    
    private final Map<String, Queue<Long>> requestTimes = new ConcurrentHashMap<>();
    
    public boolean tryAcquire(String key, int limit, long windowSizeMs) {
        long now = System.currentTimeMillis();
        Queue<Long> times = requestTimes.computeIfAbsent(key, k -> new ConcurrentLinkedQueue<>());
        
        synchronized (times) {
            // 清理过期的请求时间
            while (!times.isEmpty() && now - times.peek() > windowSizeMs) {
                times.poll();
            }
            
            if (times.size() < limit) {
                times.offer(now);
                return true;
            }
            
            return false;
        }
    }
}

// 3. 令牌桶算法
@Component
public class TokenBucketRateLimiter {
    
    private final Map<String, TokenBucket> buckets = new ConcurrentHashMap<>();
    
    public boolean tryAcquire(String key, int capacity, double refillRate) {
        TokenBucket bucket = buckets.computeIfAbsent(key, 
            k -> new TokenBucket(capacity, refillRate));
        
        return bucket.tryConsume(1);
    }
    
    private static class TokenBucket {
        private final int capacity;
        private final double refillRate;
        private double tokens;
        private long lastRefillTime;
        
        public TokenBucket(int capacity, double refillRate) {
            this.capacity = capacity;
            this.refillRate = refillRate;
            this.tokens = capacity;
            this.lastRefillTime = System.currentTimeMillis();
        }
        
        public synchronized boolean tryConsume(int tokensRequested) {
            refill();
            
            if (tokens >= tokensRequested) {
                tokens -= tokensRequested;
                return true;
            }
            
            return false;
        }
        
        private void refill() {
            long now = System.currentTimeMillis();
            double tokensToAdd = (now - lastRefillTime) / 1000.0 * refillRate;
            tokens = Math.min(capacity, tokens + tokensToAdd);
            lastRefillTime = now;
        }
    }
}

// 4. 漏桶算法
@Component
public class LeakyBucketRateLimiter {
    
    private final Map<String, LeakyBucket> buckets = new ConcurrentHashMap<>();
    
    public boolean tryAcquire(String key, int capacity, double leakRate) {
        LeakyBucket bucket = buckets.computeIfAbsent(key, 
            k -> new LeakyBucket(capacity, leakRate));
        
        return bucket.tryAdd();
    }
    
    private static class LeakyBucket {
        private final int capacity;
        private final double leakRate;
        private double water;
        private long lastLeakTime;
        
        public LeakyBucket(int capacity, double leakRate) {
            this.capacity = capacity;
            this.leakRate = leakRate;
            this.water = 0;
            this.lastLeakTime = System.currentTimeMillis();
        }
        
        public synchronized boolean tryAdd() {
            leak();
            
            if (water < capacity) {
                water++;
                return true;
            }
            
            return false;
        }
        
        private void leak() {
            long now = System.currentTimeMillis();
            double leaked = (now - lastLeakTime) / 1000.0 * leakRate;
            water = Math.max(0, water - leaked);
            lastLeakTime = now;
        }
    }
}

熔断降级机制

熔断器实现:

// 熔断器状态
public enum CircuitBreakerState {
    CLOSED,    // 关闭状态,正常处理请求
    OPEN,      // 开启状态,拒绝所有请求
    HALF_OPEN  // 半开状态,允许少量请求测试服务是否恢复
}

// 熔断器实现
@Component
public class CircuitBreaker {
    
    private volatile CircuitBreakerState state = CircuitBreakerState.CLOSED;
    private final AtomicInteger failureCount = new AtomicInteger(0);
    private final AtomicInteger successCount = new AtomicInteger(0);
    private final AtomicInteger requestCount = new AtomicInteger(0);
    private volatile long lastFailureTime = 0;
    
    // 配置参数
    private final int failureThreshold = 5;      // 失败阈值
    private final int successThreshold = 3;      // 成功阈值
    private final long timeout = 60000;          // 超时时间(毫秒)
    private final double failureRateThreshold = 0.5; // 失败率阈值
    
    public <T> T execute(Supplier<T> supplier) {
        if (state == CircuitBreakerState.OPEN) {
            if (System.currentTimeMillis() - lastFailureTime > timeout) {
                // 超时后进入半开状态
                state = CircuitBreakerState.HALF_OPEN;
                successCount.set(0);
            } else {
                // 熔断器开启,直接抛出异常
                throw new CircuitBreakerOpenException("Circuit breaker is open");
            }
        }
        
        try {
            T result = supplier.get();
            onSuccess();
            return result;
        } catch (Exception e) {
            onFailure();
            throw e;
        }
    }
    
    private void onSuccess() {
        requestCount.incrementAndGet();
        
        if (state == CircuitBreakerState.HALF_OPEN) {
            int currentSuccessCount = successCount.incrementAndGet();
            if (currentSuccessCount >= successThreshold) {
                // 成功次数达到阈值,关闭熔断器
                state = CircuitBreakerState.CLOSED;
                reset();
            }
        } else if (state == CircuitBreakerState.CLOSED) {
            // 重置失败计数
            failureCount.set(0);
        }
    }
    
    private void onFailure() {
        requestCount.incrementAndGet();
        int currentFailureCount = failureCount.incrementAndGet();
        lastFailureTime = System.currentTimeMillis();
        
        if (state == CircuitBreakerState.HALF_OPEN) {
            // 半开状态下失败,立即开启熔断器
            state = CircuitBreakerState.OPEN;
        } else if (state == CircuitBreakerState.CLOSED) {
            // 检查是否达到熔断条件
            int totalRequests = requestCount.get();
            if (totalRequests >= 10) { // 最小请求数
                double failureRate = (double) currentFailureCount / totalRequests;
                if (currentFailureCount >= failureThreshold || failureRate >= failureRateThreshold) {
                    state = CircuitBreakerState.OPEN;
                }
            }
        }
    }
    
    private void reset() {
        failureCount.set(0);
        successCount.set(0);
        requestCount.set(0);
    }
    
    public CircuitBreakerState getState() {
        return state;
    }
}

// 使用示例
@Service
public class ExternalServiceClient {
    
    @Autowired
    private CircuitBreaker circuitBreaker;
    
    @Autowired
    private RestTemplate restTemplate;
    
    public String callExternalService(String param) {
        return circuitBreaker.execute(() -> {
            // 调用外部服务
            return restTemplate.getForObject("http://external-service/api?param=" + param, String.class);
        });
    }
    
    // 降级方法
    public String fallbackMethod(String param) {
        return "Service temporarily unavailable, please try again later";
    }
}

🗄️ 10.3 缓存架构设计

多级缓存体系

面试重点:

面试官:"设计一个多级缓存系统,如何保证缓存一致性?"

多级缓存架构:

// 多级缓存管理器
@Component
public class MultiLevelCacheManager {
    
    // L1缓存:本地缓存(Caffeine)
    private final Cache<String, Object> l1Cache;
    
    // L2缓存:分布式缓存(Redis)
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    // L3缓存:数据库
    @Autowired
    private UserMapper userMapper;
    
    public MultiLevelCacheManager() {
        this.l1Cache = Caffeine.newBuilder()
            .maximumSize(10000)                    // 最大缓存数量
            .expireAfterWrite(5, TimeUnit.MINUTES) // 写入后5分钟过期
            .expireAfterAccess(2, TimeUnit.MINUTES) // 访问后2分钟过期
            .recordStats()                         // 记录统计信息
            .build();
    }
    
    public User getUser(Long userId) {
        String key = "user:" + userId;
        
        // 1. 查询L1缓存
        User user = (User) l1Cache.getIfPresent(key);
        if (user != null) {
            log.debug("L1缓存命中: userId={}", userId);
            return user;
        }
        
        // 2. 查询L2缓存
        user = (User) redisTemplate.opsForValue().get(key);
        if (user != null) {
            log.debug("L2缓存命中: userId={}", userId);
            // 回填L1缓存
            l1Cache.put(key, user);
            return user;
        }
        
        // 3. 查询数据库
        user = userMapper.selectById(userId);
        if (user != null) {
            log.debug("数据库查询: userId={}", userId);
            
            // 回填L2缓存
            redisTemplate.opsForValue().set(key, user, 30, TimeUnit.MINUTES);
            
            // 回填L1缓存
            l1Cache.put(key, user);
        }
        
        return user;
    }
    
    public void updateUser(User user) {
        String key = "user:" + user.getId();
        
        // 1. 更新数据库
        userMapper.updateById(user);
        
        // 2. 删除缓存(Cache Aside模式)
        l1Cache.invalidate(key);
        redisTemplate.delete(key);
        
        // 或者更新缓存(Write Through模式)
        // l1Cache.put(key, user);
        // redisTemplate.opsForValue().set(key, user, 30, TimeUnit.MINUTES);
    }
    
    // 缓存预热
    @EventListener(ApplicationReadyEvent.class)
    public void warmUpCache() {
        log.info("开始缓存预热...");
        
        CompletableFuture.runAsync(() -> {
            // 预热热点数据
            List<Long> hotUserIds = getHotUserIds();
            
            for (Long userId : hotUserIds) {
                try {
                    getUser(userId);
                    Thread.sleep(10); // 避免瞬间压力过大
                } catch (Exception e) {
                    log.error("预热缓存失败: userId={}", userId, e);
                }
            }
            
            log.info("缓存预热完成,预热数量: {}", hotUserIds.size());
        });
    }
    
    // 缓存统计
    public CacheStats getCacheStats() {
        com.github.benmanes.caffeine.cache.stats.CacheStats stats = l1Cache.stats();
        
        return CacheStats.builder()
            .hitCount(stats.hitCount())
            .missCount(stats.missCount())
            .hitRate(stats.hitRate())
            .evictionCount(stats.evictionCount())
            .averageLoadTime(stats.averageLoadPenalty())
            .build();
    }
}

缓存一致性保证

缓存更新策略:

// 缓存一致性管理
@Service
public class CacheConsistencyService {
    
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    @Autowired
    private UserMapper userMapper;
    
    @Autowired
    private RabbitTemplate rabbitTemplate;
    
    // 1. Cache Aside模式(推荐)
    @Transactional
    public void updateUserCacheAside(User user) {
        String key = "user:" + user.getId();
        
        try {
            // 先更新数据库
            userMapper.updateById(user);
            
            // 再删除缓存
            redisTemplate.delete(key);
            
            log.info("Cache Aside更新完成: userId={}", user.getId());
            
        } catch (Exception e) {
            log.error("Cache Aside更新失败", e);
            throw e;
        }
    }
    
    // 2. Write Through模式
    @Transactional
    public void updateUserWriteThrough(User user) {
        String key = "user:" + user.getId();
        
        try {
            // 同时更新数据库和缓存
            userMapper.updateById(user);
            redisTemplate.opsForValue().set(key, user, 30, TimeUnit.MINUTES);
            
            log.info("Write Through更新完成: userId={}", user.getId());
            
        } catch (Exception e) {
            log.error("Write Through更新失败", e);
            // 回滚缓存
            redisTemplate.delete(key);
            throw e;
        }
    }
    
    // 3. Write Behind模式(异步写入)
    public void updateUserWriteBehind(User user) {
        String key = "user:" + user.getId();
        
        // 立即更新缓存
        redisTemplate.opsForValue().set(key, user, 30, TimeUnit.MINUTES);
        
        // 异步更新数据库
        CompletableFuture.runAsync(() -> {
            try {
                Thread.sleep(100); // 批量延迟
                userMapper.updateById(user);
                log.info("Write Behind异步更新完成: userId={}", user.getId());
            } catch (Exception e) {
                log.error("Write Behind异步更新失败: userId={}", user.getId(), e);
                // 可以重试或记录到失败队列
            }
        });
    }
    
    // 4. 基于消息的缓存同步
    @Transactional
    public void updateUserWithMessage(User user) {
        // 更新数据库
        userMapper.updateById(user);
        
        // 发送缓存更新消息
        CacheUpdateMessage message = new CacheUpdateMessage();
        message.setKey("user:" + user.getId());
        message.setOperation("DELETE");
        message.setTimestamp(System.currentTimeMillis());
        
        rabbitTemplate.convertAndSend("cache.update.exchange", "cache.update", message);
        
        log.info("发送缓存更新消息: userId={}", user.getId());
    }
    
    // 处理缓存更新消息
    @RabbitListener(queues = "cache.update.queue")
    public void handleCacheUpdateMessage(CacheUpdateMessage message) {
        try {
            switch (message.getOperation()) {
                case "DELETE":
                    redisTemplate.delete(message.getKey());
                    break;
                case "UPDATE":
                    // 重新加载数据
                    reloadCache(message.getKey());
                    break;
                default:
                    log.warn("未知的缓存操作: {}", message.getOperation());
            }
            
            log.info("处理缓存更新消息完成: key={}, operation={}", 
                message.getKey(), message.getOperation());
                
        } catch (Exception e) {
            log.error("处理缓存更新消息失败", e);
            // 可以重试或记录到死信队列
        }
    }
    
    private void reloadCache(String key) {
        // 从key中解析出ID
        String[] parts = key.split(":");
        if (parts.length == 2 && "user".equals(parts[0])) {
            Long userId = Long.parseLong(parts[1]);
            User user = userMapper.selectById(userId);
            if (user != null) {
                redisTemplate.opsForValue().set(key, user, 30, TimeUnit.MINUTES);
            }
        }
    }
}

🔥 10.4 数据库架构演进

分库分表策略

面试高频:

面试官:"什么时候需要分库分表?如何设计分片策略?"

分库分表实现:

// 分片策略接口
public interface ShardingStrategy {
    String determineDataSource(Object shardingValue);
    String determineTable(Object shardingValue);
}

// 用户表分片策略
@Component
public class UserShardingStrategy implements ShardingStrategy {
    
    private static final int DB_COUNT = 4;  // 4个数据库
    private static final int TABLE_COUNT = 16; // 每个库16张表
    
    @Override
    public String determineDataSource(Object shardingValue) {
        Long userId = (Long) shardingValue;
        int dbIndex = (int) (userId % DB_COUNT);
        return "ds" + dbIndex;
    }
    
    @Override
    public String determineTable(Object shardingValue) {
        Long userId = (Long) shardingValue;
        int tableIndex = (int) (userId % TABLE_COUNT);
        return "user_" + String.format("%02d", tableIndex);
    }
}

// 分片路由器
@Component
public class ShardingRouter {
    
    @Autowired
    private Map<String, DataSource> dataSourceMap;
    
    @Autowired
    private UserShardingStrategy userShardingStrategy;
    
    public User getUserById(Long userId) {
        // 确定数据源和表名
        String dataSourceName = userShardingStrategy.determineDataSource(userId);
        String tableName = userShardingStrategy.determineTable(userId);
        
        DataSource dataSource = dataSourceMap.get(dataSourceName);
        
        try (Connection conn = dataSource.getConnection()) {
            String sql = "SELECT * FROM " + tableName + " WHERE id = ?";
            try (PreparedStatement stmt = conn.prepareStatement(sql)) {
                stmt.setLong(1, userId);
                try (ResultSet rs = stmt.executeQuery()) {
                    if (rs.next()) {
                        return mapResultSetToUser(rs);
                    }
                }
            }
        } catch (SQLException e) {
            throw new RuntimeException("查询用户失败", e);
        }
        
        return null;
    }
    
    // 跨分片查询
    public List<User> getUsersByAgeRange(int minAge, int maxAge) {
        List<User> result = new ArrayList<>();
        List<CompletableFuture<List<User>>> futures = new ArrayList<>();
        
        // 并行查询所有分片
        for (String dataSourceName : dataSourceMap.keySet()) {
            CompletableFuture<List<User>> future = CompletableFuture.supplyAsync(() -> {
                return queryUsersByAgeFromDataSource(dataSourceName, minAge, maxAge);
            });
            futures.add(future);
        }
        
        // 合并结果
        for (CompletableFuture<List<User>> future : futures) {
            try {
                result.addAll(future.get());
            } catch (Exception e) {
                log.error("跨分片查询失败", e);
            }
        }
        
        return result;
    }
    
    private List<User> queryUsersByAgeFromDataSource(String dataSourceName, int minAge, int maxAge) {
        List<User> users = new ArrayList<>();
        DataSource dataSource = dataSourceMap.get(dataSourceName);
        
        try (Connection conn = dataSource.getConnection()) {
            // 查询该数据源下的所有用户表
            for (int i = 0; i < 16; i++) {
                String tableName = "user_" + String.format("%02d", i);
                String sql = "SELECT * FROM " + tableName + " WHERE age BETWEEN ? AND ?";
                
                try (PreparedStatement stmt = conn.prepareStatement(sql)) {
                    stmt.setInt(1, minAge);
                    stmt.setInt(2, maxAge);
                    
                    try (ResultSet rs = stmt.executeQuery()) {
                        while (rs.next()) {
                            users.add(mapResultSetToUser(rs));
                        }
                    }
                }
            }
        } catch (SQLException e) {
            log.error("查询数据源失败: {}", dataSourceName, e);
        }
        
        return users;
    }
}

读写分离实现

动态数据源切换:

// 数据源类型枚举
public enum DataSourceType {
    MASTER, SLAVE
}

// 数据源上下文
public class DataSourceContextHolder {
    private static final ThreadLocal<DataSourceType> contextHolder = new ThreadLocal<>();
    
    public static void setDataSourceType(DataSourceType dataSourceType) {
        contextHolder.set(dataSourceType);
    }
    
    public static DataSourceType getDataSourceType() {
        return contextHolder.get();
    }
    
    public static void clearDataSourceType() {
        contextHolder.remove();
    }
}

// 动态数据源
public class DynamicDataSource extends AbstractRoutingDataSource {
    
    @Override
    protected Object determineCurrentLookupKey() {
        DataSourceType dataSourceType = DataSourceContextHolder.getDataSourceType();
        return dataSourceType != null ? dataSourceType : DataSourceType.MASTER;
    }
}

// 读写分离AOP
@Aspect
@Component
public class ReadWriteSplitAspect {
    
    @Before("@annotation(readOnly)")
    public void setReadDataSourceType(ReadOnly readOnly) {
        DataSourceContextHolder.setDataSourceType(DataSourceType.SLAVE);
        log.debug("切换到从库");
    }
    
    @Before("execution(* com.example.service.*.select*(..)) || " +
            "execution(* com.example.service.*.find*(..)) || " +
            "execution(* com.example.service.*.get*(..)) || " +
            "execution(* com.example.service.*.query*(..))")
    public void setReadDataSourceTypeForQuery() {
        if (DataSourceContextHolder.getDataSourceType() == null) {
            DataSourceContextHolder.setDataSourceType(DataSourceType.SLAVE);
            log.debug("查询操作,切换到从库");
        }
    }
    
    @After("@annotation(readOnly) || " +
           "execution(* com.example.service.*.select*(..)) || " +
           "execution(* com.example.service.*.find*(..)) || " +
           "execution(* com.example.service.*.get*(..)) || " +
           "execution(* com.example.service.*.query*(..))")
    public void clearDataSourceType() {
        DataSourceContextHolder.clearDataSourceType();
        log.debug("清除数据源类型");
    }
    
    // 强制使用主库的注解处理
    @Before("@annotation(forceMaster)")
    public void setMasterDataSourceType(ForceMaster forceMaster) {
        DataSourceContextHolder.setDataSourceType(DataSourceType.MASTER);
        log.debug("强制使用主库");
    }
}

// 主从延迟处理
@Service
public class MasterSlaveDelayHandler {
    
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    private static final String WRITE_FLAG_PREFIX = "write_flag:";
    private static final int DELAY_THRESHOLD_MS = 100; // 主从延迟阈值
    
    @Transactional
    public void updateUser(User user) {
        // 更新数据库
        userMapper.updateById(user);
        
        // 设置写标记,短时间内强制读主库
        String writeFlag = WRITE_FLAG_PREFIX + "user:" + user.getId();
        redisTemplate.opsForValue().set(writeFlag, "1", DELAY_THRESHOLD_MS, TimeUnit.MILLISECONDS);
        
        log.info("更新用户并设置写标记: userId={}", user.getId());
    }
    
    public User getUser(Long userId) {
        String writeFlag = WRITE_FLAG_PREFIX + "user:" + userId;
        
        // 检查是否有写标记
        if (redisTemplate.hasKey(writeFlag)) {
            // 有写标记,强制读主库
            DataSourceContextHolder.setDataSourceType(DataSourceType.MASTER);
            log.debug("检测到写标记,强制读主库: userId={}", userId);
        }
        
        return userMapper.selectById(userId);
    }
}

💡 阿里真题:设计一个支持千万用户的社交系统

面试场景:

面试官:"设计一个类似微博的社交系统,支持千万用户,需要考虑哪些问题?请给出详细的架构设计。"

系统架构设计:

// 社交系统整体架构设计
/*
1. 用户规模:千万级用户
2. 核心功能:发布动态、关注/粉丝、时间线、点赞评论
3. 性能要求:
   - 发布QPS: 1万
   - 读取QPS: 10万
   - 响应时间: <200ms
   - 可用性: 99.9%

架构设计:
┌─────────────────────────────────────────────────────────────┐
│                        CDN + 负载均衡                          │
├─────────────────────────────────────────────────────────────┤
│  API网关 (限流、鉴权、路由)                                      │
├─────────────────────────────────────────────────────────────┤
│  微服务集群                                                    │
│  ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐              │
│  │用户服务  │ │内容服务  │ │关系服务  │ │时间线服务│              │
│  └─────────┘ └─────────┘ └─────────┘ └─────────┘              │
├─────────────────────────────────────────────────────────────┤
│  缓存层 (Redis集群)                                           │
├─────────────────────────────────────────────────────────────┤
│  数据库层 (MySQL分库分表 + ES搜索)                             │
├─────────────────────────────────────────────────────────────┤
│  消息队列 (Kafka) + 对象存储 (OSS)                            │
└─────────────────────────────────────────────────────────────┘
*/

// 1. 用户服务设计
@RestController
@RequestMapping("/api/users")
public class UserController {
    
    @Autowired
    private UserService userService;
    
    @Autowired
    private RateLimiter rateLimiter;
    
    // 用户注册
    @PostMapping("/register")
    public ResponseEntity<UserResponse> register(@RequestBody @Valid UserRegisterRequest request) {
        // 限流保护
        if (!rateLimiter.tryAcquire()) {
            return ResponseEntity.status(HttpStatus.TOO_MANY_REQUESTS).build();
        }
        
        User user = userService.register(request);
        return ResponseEntity.ok(UserResponse.from(user));
    }
    
    // 获取用户信息
    @GetMapping("/{userId}")
    @Cacheable(value = "user", key = "#userId")
    public ResponseEntity<UserResponse> getUser(@PathVariable Long userId) {
        User user = userService.getById(userId);
        return user != null ? 
            ResponseEntity.ok(UserResponse.from(user)) : 
            ResponseEntity.notFound().build();
    }
}

// 2. 内容服务设计
@RestController
@RequestMapping("/api/posts")
public class PostController {
    
    @Autowired
    private PostService postService;
    
    @Autowired
    private TimelineService timelineService;
    
    // 发布动态
    @PostMapping
    public ResponseEntity<PostResponse> createPost(@RequestBody @Valid CreatePostRequest request,
                                                  @RequestHeader("X-User-Id") Long userId) {
        Post post = postService.createPost(userId, request);
        
        // 异步推送到粉丝时间线
        timelineService.pushToFollowersAsync(userId, post);
        
        return ResponseEntity.ok(PostResponse.from(post));
    }
    
    // 获取动态详情
    @GetMapping("/{postId}")
    @Cacheable(value = "post", key = "#postId")
    public ResponseEntity<PostResponse> getPost(@PathVariable Long postId) {
        Post post = postService.getById(postId);
        return post != null ? 
            ResponseEntity.ok(PostResponse.from(post)) : 
            ResponseEntity.notFound().build();
    }
}

// 3. 关系服务设计
@Service
public class RelationshipService {
    
    @Autowired
    private RelationshipMapper relationshipMapper;
    
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    // 关注用户
    @Transactional
    public boolean followUser(Long followerId, Long followeeId) {
        if (followerId.equals(followeeId)) {
            throw new BusinessException("不能关注自己");
        }
        
        // 检查是否已关注
        if (isFollowing(followerId, followeeId)) {
            return true;
        }
        
        // 数据库操作
        Relationship relationship = new Relationship();
        relationship.setFollowerId(followerId);
        relationship.setFolloweeId(followeeId);
        relationship.setCreateTime(LocalDateTime.now());
        
        relationshipMapper.insert(relationship);
        
        // 更新缓存
        updateFollowingCache(followerId, followeeId, true);
        updateFollowersCache(followeeId, followerId, true);
        
        // 更新计数
        incrementFollowingCount(followerId);
        incrementFollowersCount(followeeId);
        
        return true;
    }
    
    // 检查关注关系(高频查询,重点优化)
    public boolean isFollowing(Long followerId, Long followeeId) {
        String key = "following:" + followerId;
        
        // 先查缓存
        Boolean cached = redisTemplate.opsForSet().isMember(key, followeeId);
        if (cached != null) {
            return cached;
        }
        
        // 查数据库
        boolean isFollowing = relationshipMapper.existsRelationship(followerId, followeeId);
        
        // 更新缓存
        if (isFollowing) {
            redisTemplate.opsForSet().add(key, followeeId);
        }
        redisTemplate.expire(key, 1, TimeUnit.HOURS);
        
        return isFollowing;
    }
    
    // 获取关注列表(分页)
    public List<User> getFollowingList(Long userId, int page, int size) {
        String key = "following_list:" + userId + ":" + page;
        
        // 查缓存
        List<User> cached = (List<User>) redisTemplate.opsForValue().get(key);
        if (cached != null) {
            return cached;
        }
        
        // 查数据库
        int offset = (page - 1) * size;
        List<Long> followeeIds = relationshipMapper.getFolloweeIds(userId, offset, size);
        List<User> users = userService.getByIds(followeeIds);
        
        // 缓存结果
        redisTemplate.opsForValue().set(key, users, 10, TimeUnit.MINUTES);
        
        return users;
    }
}

// 4. 时间线服务设计
@Service
public class TimelineService {
    
    @Autowired
    private RedisTemplate<String, Object> redisTemplate;
    
    @Autowired
    private PostService postService;
    
    @Autowired
    private RelationshipService relationshipService;
    
    // 推模式:发布时推送到所有粉丝的时间线
    @Async("timelineExecutor")
    public void pushToFollowersAsync(Long userId, Post post) {
        // 获取粉丝列表
        List<Long> followerIds = relationshipService.getFollowerIds(userId);
        
        // 分批推送,避免一次性推送过多
        int batchSize = 1000;
        for (int i = 0; i < followerIds.size(); i += batchSize) {
            int endIndex = Math.min(i + batchSize, followerIds.size());
            List<Long> batch = followerIds.subList(i, endIndex);
            
            pushToFollowersBatch(batch, post);
        }
    }
    
    private void pushToFollowersBatch(List<Long> followerIds, Post post) {
        for (Long followerId : followerIds) {
            String timelineKey = "timeline:" + followerId;
            
            // 添加到时间线
            redisTemplate.opsForZSet().add(timelineKey, post.getId(), post.getCreateTime().toEpochSecond(ZoneOffset.UTC));
            
            // 保持时间线长度,只保留最新的1000条
            redisTemplate.opsForZSet().removeRange(timelineKey, 0, -1001);
            
            // 设置过期时间
            redisTemplate.expire(timelineKey, 7, TimeUnit.DAYS);
        }
    }
    
    // 拉模式:实时获取关注用户的最新动态
    public List<Post> getTimelinePull(Long userId, int page, int size) {
        // 获取关注的用户列表
        List<Long> followingIds = relationshipService.getFollowingIds(userId);
        followingIds.add(userId); // 包含自己的动态
        
        // 从各个用户的动态中获取最新的
        List<Post> allPosts = new ArrayList<>();
        
        for (Long followingId : followingIds) {
            List<Post> userPosts = postService.getUserRecentPosts(followingId, 20);
            allPosts.addAll(userPosts);
        }
        
        // 按时间排序
        allPosts.sort((p1, p2) -> p2.getCreateTime().compareTo(p1.getCreateTime()));
        
        // 分页
        int start = (page - 1) * size;
        int end = Math.min(start + size, allPosts.size());
        
        return start < allPosts.size() ? allPosts.subList(start, end) : Collections.emptyList();
    }
    
    // 混合模式:大V用拉模式,普通用户用推模式
    public List<Post> getTimelineHybrid(Long userId, int page, int size) {
        String timelineKey = "timeline:" + userId;
        
        // 先从推送的时间线获取
        Set<Object> postIds = redisTemplate.opsForZSet().reverseRange(timelineKey, 
            (page - 1) * size, page * size - 1);
        
        List<Post> posts = new ArrayList<>();
        if (postIds != null && !postIds.isEmpty()) {
            List<Long> ids = postIds.stream()
                .map(id -> Long.parseLong(id.toString()))
                .collect(Collectors.toList());
            posts = postService.getByIds(ids);
        }
        
        // 如果推送时间线数据不足,补充拉取大V的动态
        if (posts.size() < size) {
            List<Long> bigVIds = relationshipService.getBigVFollowingIds(userId);
            List<Post> bigVPosts = getBigVRecentPosts(bigVIds, size - posts.size());
            posts.addAll(bigVPosts);
        }
        
        return posts;
    }
}

// 5. 数据分片策略
@Component
public class SocialSystemShardingStrategy {
    
    // 用户表按用户ID分片
    public String getUserTableName(Long userId) {
        return "user_" + (userId % 64);
    }
    
    // 动态表按用户ID分片(同一用户的动态在同一分片)
    public String getPostTableName(Long userId) {
        return "post_" + (userId % 256);
    }
    
    // 关系表按关注者ID分片
    public String getRelationshipTableName(Long followerId) {
        return "relationship_" + (followerId % 128);
    }
    
    // 时间线表按用户ID分片
    public String getTimelineTableName(Long userId) {
        return "timeline_" + (userId % 512);
    }
}

// 6. 性能监控和告警
@Component
public class SocialSystemMonitor {
    
    @Autowired
    private MeterRegistry meterRegistry;
    
    // 监控关键指标
    public void recordPostCreation(Long userId, long duration) {
        Timer.Sample sample = Timer.start(meterRegistry);
        sample.stop(Timer.builder("post.creation.duration")
            .tag("user_type", getUserType(userId))
            .register(meterRegistry));
        
        meterRegistry.counter("post.creation.count",
            "user_type", getUserType(userId)).increment();
    }
    
    public void recordTimelineQuery(Long userId, int size, long duration) {
        Timer.builder("timeline.query.duration")
            .tag("page_size", String.valueOf(size))
            .register(meterRegistry)
            .record(duration, TimeUnit.MILLISECONDS);
    }
    
    private String getUserType(Long userId) {
        // 根据粉丝数判断用户类型
        long followersCount = relationshipService.getFollowersCount(userId);
        if (followersCount > 1000000) {
            return "big_v";
        } else if (followersCount > 10000) {
            return "kol";
        } else {
            return "normal";
        }
    }
}

总结

高并发系统设计是后端架构师必备的核心技能,涉及的知识面很广,需要综合考虑性能、可用性、一致性等多个方面。面试中这部分内容的考察重点:

核心要点回顾:

  1. 设计原则:高可用、高并发、高性能的平衡
  2. 负载均衡:多种算法的选择和实现
  3. 限流熔断:保护系统稳定性的关键手段
  4. 缓存架构:多级缓存和一致性保证
  5. 数据库优化:分库分表和读写分离

面试建议:

  • 从业务需求出发,逐步分析技术方案
  • 考虑系统的可扩展性和可维护性
  • 能够权衡不同方案的优缺点
  • 结合实际项目经验,展现解决问题的能力
  • 关注系统监控和运维,体现全栈思维

本章核心要点:

  • ✅ 高并发系统设计三大原则
  • ✅ 负载均衡算法和限流策略
  • ✅ 多级缓存架构设计
  • ✅ 分库分表和读写分离
  • ✅ 大型社交系统架构实战

下一章预告: 云原生与容器化 - Docker、Kubernetes、微服务治理等现代化部署方案

#java##实习/项目/竞赛奖项,哪个对找工作更重要?##职场情商大赛#
Java面试圣经 文章被收录于专栏

Java面试圣经,带你练透java圣经

全部评论
考虑南京OD的宝子们看过来,你就是我们要找的人,一对一指导,可私信
点赞 回复 分享
发布于 08-09 16:48 贵州

相关推荐

评论
2
4
分享

创作者周榜

更多
牛客网
牛客网在线编程
牛客网题解
牛客企业服务