Redis多级缓存架构的升级之路
一、基础版(包含基础操作)
代码实现细节如下:
public class UserServiceImpl implements UserService {
@Autowired
UserMapper userMapper;
@Override
public User insetUser(User user) {
User insertRet = userMapper.insetUser(user);
RedisUtil.add(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
return insertRet;
}
@Override
public Integer deleteUser(Integer userId) {
Integer num = userMapper.deleteUser(userId);
RedisUtil.delete(RedisKeyPreConst.USER_CACHE + userId);
return num;
}
@Override
public User updateUser(User user) {
User updateRet = userMapper.updateUser(user);
RedisUtil.update(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
return updateRet;
}
@Override
public User findUser(Integer userId) {
User user = null;
// 1、redis缓存中查询
String userCacheKey = RedisKeyPreConst.USER_CACHE + userId;
String jsonUser = RedisUtil.get(userCacheKey);
if (!StringUtils.isEmpty(jsonUser)) {
user = JSON.parseObject(jsonUser, User.class);
return user;
}
// 2、数据库中查询
user = userMapper.findUser(userId);
if (user != null) {
RedisUtil.add(userCacheKey, JSON.toJSONString(user));
}
return user;
}
}
二、升级版1.1(处理缓存数据一致性问题)
补充知识点(重要):
- 缓存击穿(失效):redis数据集中失效,请求直接穿过缓存到达数据库。设置不同失效时间。
- 缓存穿透:查询一个根本不存在的数据,就会出现不仅在redis缓存中没有找到,在数据库中也没有找到。缓存空对象,布隆过滤器。
- 缓存雪崩:缓存击穿会引发后续的一系列对整个系统造成的影响。限流,数据预热。
代码实现细节如下:
public class UserServiceImpl2 implements UserService {
@Autowired
private Redisson redisson;
private String nullUser = JSON.toJSONString(new User());
private static final String HOT_USER_LOCK = "HOT_USER_LOCK_PRE";
@Autowired
UserMapper userMapper;
@Override
public User insetUser(User user) {
User insertRet = userMapper.insetUser(user);
RedisUtil.add(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
return insertRet;
}
@Override
public Integer deleteUser(Integer userId) {
Integer num = userMapper.deleteUser(userId);
RedisUtil.delete(RedisKeyPreConst.USER_CACHE + userId);
return num;
}
@Override
public User updateUser(User user) {
User updateRet = userMapper.updateUser(user);
RedisUtil.update(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
return updateRet;
}
@Override
public User findUser(Integer userId) {
User user = null;
String userCacheKey = RedisKeyPreConst.USER_CACHE + userId;
// 解决问题五:第一次redis缓存中查询
user = getUserAndSetExpire(userCacheKey);
if (user != null) {
return user;
}
// 解决问题五:添加分布式锁,解决热点数据并发数据安全问题
RLock lock = redisson.getLock(HOT_USER_LOCK + userId);
lock.lock();
try {
// 解决问题五:第二次redis缓存中查询
user = getUserAndSetExpire(userCacheKey);
if (user != null) {
return user;
}
// 数据库中查询数据
user = userMapper.findUser(userId);
if (user != null) {
RedisUtil.add(userCacheKey, JSON.toJSONString(user));
} else {
// 解决问题三:缓存一个空对象 给空对象设置过期时间,避免空间浪费
RedisUtil.add(userCacheKey, nullUser, getExpireTime(60), TimeUnit.SECONDS);
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
lock.unlock();
}
return user;
}
private int getExpireTime(int time) {
// 解决问题二:在基础过期时间上增加一个随机数,用来防止数据集中过期,出现缓存击穿。
return time + new Random().nextInt(30);
}
// redis缓存中查询数据
private User getUserAndSetExpire(String userCacheKey) {
User user = null;
String jsonUser = RedisUtil.get(userCacheKey);
if (!StringUtils.isEmpty(jsonUser)) {
if (nullUser.equals(jsonUser)) {
// 解决问题四:如果是缓存过的空对象,则延长空对象的过期时间
RedisUtil.expire(userCacheKey, getExpireTime(60), TimeUnit.SECONDS);
return new User();
}
user = JSON.parseObject(jsonUser, User.class);
// 解决问题一:每访问一次数据,就重新设置过期时间。不活跃的数据就会失效
RedisUtil.expire(userCacheKey, getExpireTime(60 * 60 * 24), TimeUnit.SECONDS);
}
return user;
}
}
三、升级版1.2(处理缓存数据一致性问题)
代码实现细节如下:
public class UserServiceImpl3 implements UserService {
@Autowired
private Redisson redisson;
private String nullUser = JSON.toJSONString(new User());
private static final String HOT_USER_LOCK = "HOT_USER_LOCK_PRE";
private static final String UPDATE_USER_LOCK = "UPDATE_USER_LOCK";
@Autowired
UserMapper userMapper;
@Override
public User insetUser(User user) {
User insertRet = new User();
// 解决问题六:在新增数据的时候也添加锁,用来解决双写不一致问题
RLock updateLock = redisson.getLock(UPDATE_USER_LOCK + user.getUserId());
updateLock.lock();
try {
insertRet = userMapper.insetUser(user);
RedisUtil.add(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
updateLock.unlock();
}
return insertRet;
}
@Override
public Integer deleteUser(Integer userId) {
Integer num;
// 解决问题六:在删除数据的时候也添加锁,用来解决双写不一致问题
RLock updateLock = redisson.getLock(UPDATE_USER_LOCK + userId);
updateLock.lock();
try {
num = userMapper.deleteUser(userId);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
updateLock.unlock();
}
return num;
}
@Override
public User updateUser(User user) {
// 解决问题六:在修改数据的时候也添加锁,用来解决双写不一致问题
User updateRet = new User();
RLock updateLock = redisson.getLock(UPDATE_USER_LOCK + user.getUserId());
updateLock.lock();
try {
updateRet = userMapper.updateUser(user);
RedisUtil.update(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
updateLock.unlock();
}
return updateRet;
}
@Override
public User findUser(Integer userId) {
User user = null;
String userCacheKey = RedisKeyPreConst.USER_CACHE + userId;
// 解决问题五:第一次redis缓存中查询
user = getUserAndSetExpire(userCacheKey);
if (user != null) {
return user;
}
// 解决问题五:添加分布式锁,解决热点数据并发数据安全问题
RLock lock = redisson.getLock(HOT_USER_LOCK + userId);
lock.lock();
try {
// 解决问题五:第二次redis缓存中查询
user = getUserAndSetExpire(userCacheKey);
if (user != null) {
return user;
}
// 解决问题六:添加第二把分布式锁,用来处理双写不一致问题
RLock updateLock = redisson.getLock(UPDATE_USER_LOCK + userId);
updateLock.lock();
try {
// 数据库中查询数据
user = userMapper.findUser(userId);
if (user != null) {
RedisUtil.add(userCacheKey, JSON.toJSONString(user));
} else {
// 解决问题三:缓存一个空对象 给空对象设置过期时间,避免空间浪费
RedisUtil.add(userCacheKey, nullUser, getExpireTime(60), TimeUnit.SECONDS);
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
updateLock.unlock();
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
lock.unlock();
}
return user;
}
private int getExpireTime(int time) {
// 解决问题二:在基础过期时间上增加一个随机数,用来防止数据集中过期,出现缓存击穿。
return time + new Random().nextInt(30);
}
// redis缓存中查询数据
private User getUserAndSetExpire(String userCacheKey) {
User user = null;
// 1、redis缓存中查询
String jsonUser = RedisUtil.get(userCacheKey);
if (!StringUtils.isEmpty(jsonUser)) {
if (nullUser.equals(jsonUser)) {
// 解决问题四:如果是缓存过的空对象,则延长空对象的过期时间,
RedisUtil.expire(userCacheKey, getExpireTime(60), TimeUnit.SECONDS);
return new User();
}
user = JSON.parseObject(jsonUser, User.class);
// 解决问题一:每访问一次数据,就重新设置过期时间。不活跃的数据就会失效
RedisUtil.expire(userCacheKey, getExpireTime(60 * 60 * 24), TimeUnit.SECONDS);
}
return user;
}
}
四、优化版(优化多级缓存架构)
代码实现细节如下:
public class UserServiceImpl4 implements UserService {
@Autowired
private Redisson redisson;
private String nullUser = JSON.toJSONString(new User());
private static final String HOT_USER_LOCK = "HOT_USER_LOCK_PRE";
private static final String UPDATE_USER_LOCK = "UPDATE_USER_LOCK";
// 优化方案三:使用JVM自带的内存Map处理更大的并发请求
private static Map<String, User> userMap = new HashMap<>();
@Autowired
UserMapper userMapper;
@Override
public User insetUser(User user) {
User insertRet = new User();
// 优化方案二:使用读写锁替代粒度更大的锁
RReadWriteLock readWriteLock = redisson.getReadWriteLock(UPDATE_USER_LOCK + user.getUserId());
RLock insertWriteLock = readWriteLock.readLock();
insertWriteLock.lock();
try {
insertRet = userMapper.insetUser(user);
RedisUtil.add(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
// 优化方案三:使用JVM自带的内存Map处理更大的并发请求
userMap.put(RedisKeyPreConst.USER_CACHE + user.getUserId(), user);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
insertWriteLock.unlock();
}
return insertRet;
}
@Override
public Integer deleteUser(Integer userId) {
Integer num;
// 优化方案二:使用读写锁替代粒度更大的锁
RReadWriteLock readWriteLock = redisson.getReadWriteLock(UPDATE_USER_LOCK + userId);
RLock deleteWriteLock = readWriteLock.readLock();
deleteWriteLock.lock();
try {
num = userMapper.deleteUser(userId);
RedisUtil.delete(RedisKeyPreConst.USER_CACHE + userId);
// 优化方案三:使用JVM自带的内存Map处理更大的并发请求
userMap.remove(RedisKeyPreConst.USER_CACHE + userId);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
deleteWriteLock.unlock();
}
return num;
}
@Override
public User updateUser(User user) {
// 优化方案二:使用读写锁替代粒度更大的锁
User updateRet = new User();
RReadWriteLock readWriteLock = redisson.getReadWriteLock(UPDATE_USER_LOCK + user.getUserId());
RLock updateWriteLock = readWriteLock.readLock();
updateWriteLock.lock();
try {
updateRet = userMapper.updateUser(user);
RedisUtil.update(RedisKeyPreConst.USER_CACHE + user.getUserId(), JSON.toJSONString(user));
// 优化方案三:使用JVM自带的内存Map处理更大的并发请求
userMap.put(RedisKeyPreConst.USER_CACHE + user.getUserId(), user);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
updateWriteLock.unlock();
}
return updateRet;
}
@Override
public User findUser(Integer userId) {
User user = null;
String userCacheKey = RedisKeyPreConst.USER_CACHE + userId;
// 解决问题五:第一次redis缓存中查询
user = getUserAndSetExpire(userCacheKey);
if (user != null) {
return user;
}
// 解决问题五:添加分布式锁,解决热点数据并发数据安全问题
RLock lock = redisson.getLock(HOT_USER_LOCK + userId);
try {
lock.lock();
// 优化方案一:预估后续业务执行时间,如2秒,使用下面的加锁方式能够达到串行转并发的效果,
// 不过该方法比较鸡肋,玩意项目出现卡顿数据就会出现一致性问题
lock.tryLock(2, TimeUnit.SECONDS);
user = getUserAndSetExpire(userCacheKey);
if (user != null) {
return user;
}
// 优化方案二:使用读写锁替代粒度更大的锁
RReadWriteLock readWriteLock = redisson.getReadWriteLock(UPDATE_USER_LOCK + userId);
RLock findReadLock = readWriteLock.readLock();
findReadLock.lock();
try {
// 数据库中查询数据
user = userMapper.findUser(userId);
if (user != null) {
RedisUtil.add(userCacheKey, JSON.toJSONString(user));
// 优化方案三:使用JVM自带的内存Map处理更大的并发请求
userMap.put(userCacheKey, user);
} else {
// 解决问题三:缓存一个空对象 给空对象设置过期时间,避免空间浪费
RedisUtil.add(userCacheKey, nullUser, getExpireTime(60), TimeUnit.SECONDS);
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
findReadLock.unlock();
}
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
lock.unlock();
}
return user;
}
private int getExpireTime(int time) {
// 解决问题二:在基础过期时间上增加一个随机数,用来防止数据集中过期,出现缓存击穿。
return time + new Random().nextInt(30);
}
private User getUserAndSetExpire(String userCacheKey) {
User user = null;
// 优化方案三:使用JVM自带的内存Map处理更大的并发请求
user = userMap.get(userCacheKey);
if (user != null) {
return user;
}
// redis缓存中查询数据
String jsonUser = RedisUtil.get(userCacheKey);
if (!StringUtils.isEmpty(jsonUser)) {
if (nullUser.equals(jsonUser)) {
// 解决问题四:如果是缓存过的空对象,则延长空对象的过期时间
RedisUtil.expire(userCacheKey, getExpireTime(60), TimeUnit.SECONDS);
return new User();
}
user = JSON.parseObject(jsonUser, User.class);
// 解决问题一:每访问一次数据,就重新设置过期时间。不活跃的数据就会失效
RedisUtil.expire(userCacheKey, getExpireTime(60 * 60 * 24), TimeUnit.SECONDS);
}
return user;
}
}