导读


现在大多环境都是采用的前后端分离,数据库也采用分库分表。简单举例:用户模块对应一个用户库,订单模块对应一个订单库,分别存储在不同数据库中,下面简单介绍下。

使用


配置数据源

我这里配置了三个数据源,默认base数据源,另外有firstdb数据源和seconddb数据源。

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  3. xsi:schemaLocation="http://www.springframework.org/schema/beans
  4. classpath:/org/springframework/beans/factory/xml/spring-beans-3.0.xsd"
  5. default-autowire="byName">
  6. <!-- firstDataSource -->
  7. <bean id="firstDataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">
  8. <property name="url" value="${firstDatasource.url}"/>
  9. <property name="username" value="${firstDatasource.name}"/>
  10. <property name="password" value="${firstDatasource.password}"/>
  11. <property name="filters" value="log4j"/>
  12. <property name="maxActive" value="${firstDatasource.maxActive}"/>
  13. <property name="initialSize" value="${firstDatasource.initialiSize}"/>
  14. <property name="maxWait" value="60000"/>
  15. <property name="minIdle" value="1"/>
  16. <property name="timeBetweenEvictionRunsMillis" value="2000"/>
  17. <property name="minEvictableIdleTimeMillis" value="200000"/>
  18. <property name="poolPreparedStatements" value="false"/>
  19. <property name="maxPoolPreparedStatementPerConnectionSize" value="20"/>
  20. <property name="blockingTimeoutMillis" value="500"/>
  21. <property name="idleTimeoutMinutes" value="30"/>
  22. <property name="preparedStatementCacheSize" value="0"/>
  23. <property name="queryTimeout" value="30"/>
  24. <property name="prefill" value="true"/>
  25. <property name="maxReadThreshold" value="100"/>
  26. <property name="maxWriteThreshold" value="100"/>
  27. <property name="newConnectionSQL" value="set names utf8mb4;"/>
  28. </bean>
  29. <!-- secondDataSource -->
  30. <bean id="secondDataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init"
  31. destroy-method="close">
  32. <property name="url" value="${secondDatasource.url}"/>
  33. <property name="username" value="${secondDatasource.name}"/>
  34. <property name="password" value="${secondDatasource.password}"/>
  35. <property name="filters" value="log4j"/>
  36. <property name="maxActive" value="${secondDatasource.maxActive}"/>
  37. <property name="initialSize" value="${secondDatasource.initialiSize}"/>
  38. <property name="maxWait" value="60000"/>
  39. <property name="minIdle" value="1"/>
  40. <property name="timeBetweenEvictionRunsMillis" value="2000"/>
  41. <property name="minEvictableIdleTimeMillis" value="200000"/>
  42. <property name="poolPreparedStatements" value="false"/>
  43. <property name="maxPoolPreparedStatementPerConnectionSize" value="20"/>
  44. <property name="blockingTimeoutMillis" value="500"/>
  45. <property name="idleTimeoutMinutes" value="30"/>
  46. <property name="preparedStatementCacheSize" value="0"/>
  47. <property name="queryTimeout" value="30"/>
  48. <property name="prefill" value="true"/>
  49. <property name="maxReadThreshold" value="100"/>
  50. <property name="maxWriteThreshold" value="100"/>
  51. <property name="newConnectionSQL" value="set names utf8mb4;"/>
  52. </bean>
  53. <!-- baseDataSource -->
  54. <bean id="baseDataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init"
  55. destroy-method="close">
  56. <property name="url" value="${baseDatasource.url}"/>
  57. <property name="username" value="${baseDatasource.name}"/>
  58. <property name="password" value="${baseDatasource.password}"/>
  59. <property name="filters" value="log4j"/>
  60. <property name="maxActive" value="${baseDatasource.maxActive}"/>
  61. <property name="initialSize" value="${baseDatasource.initialiSize}"/>
  62. <property name="maxWait" value="60000"/>
  63. <property name="minIdle" value="1"/>
  64. <property name="timeBetweenEvictionRunsMillis" value="2000"/>
  65. <property name="minEvictableIdleTimeMillis" value="200000"/>
  66. <property name="poolPreparedStatements" value="false"/>
  67. <property name="maxPoolPreparedStatementPerConnectionSize" value="20"/>
  68. <property name="blockingTimeoutMillis" value="500"/>
  69. <property name="idleTimeoutMinutes" value="30"/>
  70. <property name="preparedStatementCacheSize" value="0"/>
  71. <property name="queryTimeout" value="30"/>
  72. <property name="prefill" value="true"/>
  73. <property name="maxReadThreshold" value="100"/>
  74. <property name="maxWriteThreshold" value="100"/>
  75. <property name="newConnectionSQL" value="set names utf8mb4;"/>
  76. </bean>
  77. </beans>

AOP切面拦截

由于我是在Service层的某一个方法内需要调用其他两个库的表,所以我在mapper层做aop切面设置,如果是不同的Service层的不同方法,切面可以设置成Service层,这个由自己的业务决定。

  1. import com.alibaba.common.logging.Logger;
  2. import com.alibaba.common.logging.LoggerFactory;
  3. import com.demo.second.common.dal.support.datasource.DdsDsModeEnum;
  4. import com.demo.second.common.dal.support.datasource.DynamicRoutingDataSourceFactory;
  5. import org.aspectj.lang.JoinPoint;
  6. import org.aspectj.lang.annotation.After;
  7. import org.aspectj.lang.annotation.Aspect;
  8. import org.aspectj.lang.annotation.Before;
  9. import org.aspectj.lang.annotation.Pointcut;
  10. import org.aspectj.lang.reflect.MethodSignature;
  11. import org.springframework.core.annotation.Order;
  12. import org.springframework.stereotype.Component;
  13. import java.lang.reflect.Method;
  14. /**
  15. * 切面配置数据源
  16. */
  17. @Aspect
  18. @Component
  19. @Order(1) //@order注解控制切面的执行顺序,如果有多个切面配置,可以设置order的大小
  20. public class DataSourceAspect {
  21. private static final Logger LOGGER = LoggerFactory.getLogger(DataSourceAspect.class);
  22. //定义切面,当执行某个mapper时,就会拦截,切换到相应的数据源,这里切换firstdb数据源
  23. @Pointcut("execution(* com.demo.second.common.dal.mapper.first.*.*(..))")
  24. public void firstDataSource() {
  25. }
  26. //定义切面,当执行某个mapper时,就会拦截,切换到相应的数据源,这里切换seconddb数据源
  27. @Pointcut("execution(* com.demo.second.common.dal.mapper.second.*.*(..))")
  28. public void secondDataSource() {
  29. }
  30. @Before("firstDataSource()")
  31. public void beforefirst(JoinPoint point) {
  32. LOGGER.info("切面捕获到修改数据源信息");
  33. MethodSignature signa = (MethodSignature) point.getSignature();
  34. Method method = signa.getMethod();
  35. //切换到firstdb数据源
  36. DynamicRoutingDataSourceFactory.switchCurrDataSource(DdsDsModeEnum.FIRSTDB);
  37. LOGGER.info("AOP动态切换数据源,className" + point.getTarget().getClass().getName() + ";methodName:" + method.getName());
  38. }
  39. @Before("secondDataSource()")
  40. public void beforesecond(JoinPoint point) {
  41. LOGGER.info("切面捕获到修改数据源信息");
  42. MethodSignature signa = (MethodSignature) point.getSignature();
  43. Method method = signa.getMethod();
  44. //切换到seconddb数据源
  45. DynamicRoutingDataSourceFactory.switchCurrDataSource(DdsDsModeEnum.SECONDDB);
  46. LOGGER.info("AOP动态切换数据源,className" + point.getTarget().getClass().getName() + ";methodName:" + method.getName());
  47. }
  48. /**
  49. * 清理掉当前设置的数据源,让默认的base数据源不受影响
  50. */
  51. @After("firstDataSource()")
  52. public void afterfirst(JoinPoint point) {
  53. DynamicRoutingDataSourceFactory.releaseCurrDataSource();
  54. }
  55. /**
  56. * 清理掉当前设置的数据源,让默认的数据源不受影响
  57. */
  58. @After("secondDataSource()")
  59. public void aftersecond(JoinPoint point) {
  60. DynamicRoutingDataSourceFactory.releaseCurrDataSource();
  61. }
  62. }

注: 我这里使用的切面拦截的 ,还有一种使用注解方式,没有列出了,和这个类似,只是需要在使用的时候在指定方法上面添加注解,指定下数据源即可。
_

动态数据源工厂

  1. import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
  2. import java.util.logging.Logger;
  3. /**
  4. * 动态数据源工厂.
  5. */
  6. public class DynamicRoutingDataSourceFactory extends AbstractRoutingDataSource {
  7. private static final ThreadLocal<String> currentDataSourceKey = new ThreadLocal<>();
  8. @Override
  9. public Logger getParentLogger() {
  10. return null;
  11. }
  12. @Override
  13. protected Object determineCurrentLookupKey() {
  14. return currentDataSourceKey.get();
  15. }
  16. /**
  17. * 让外部业务设置当前线程所用数据源的方法
  18. */
  19. public static void switchCurrDataSource(DdsDsModeEnum ds) {
  20. if (ds == null) {
  21. throw new RuntimeException("设置数据源失败, 目标数据源为 [null].");
  22. }
  23. currentDataSourceKey.set(ds.getDsName());
  24. }
  25. /**
  26. * 扩展: 释放当前线程设置的数据源
  27. */
  28. public static void releaseCurrDataSource() {
  29. currentDataSourceKey.remove();
  30. }
  31. }

业务数据源枚举类定义

  1. /**
  2. * 业务数据源枚举类定义.
  3. */
  4. public enum DdsDsModeEnum {
  5. BASE("base"),
  6. FIRSTDB("firstdb"),
  7. SECONDDB("seconddb");
  8. private final String dsName;
  9. DdsDsModeEnum(String dsName) {
  10. this.dsName = dsName;
  11. }
  12. public String getDsName() {
  13. return dsName;
  14. }
  15. }

在service层中使用

  1. @Service
  2. public class TransferFailedServiceImpl implements TransferFailedService {
  3. @Autowired
  4. private TransferOrderMapper transferOrderMapper;
  5. @Autowired
  6. private ChannelInvokeRecordMapper channelInvokeRecordMapper;
  7. @Autowired
  8. private UserMapper userMapper;
  9. @Override
  10. public PageVO<TransferOrderVO> listTransferOrder(TransferOrderFilterParams params) {
  11. //判断userId是否为空
  12. if (StringUtils.isNotBlank(params.getUserId())) {
  13. User userInfo = userMapper.selectUserByUserId(params.getUserId());
  14. if (userInfo != null) {
  15. params.setUserId(userInfo.getUid());
  16. }
  17. }
  18. PageHelper.startPage(params.getPageNum(), params.getPageSize());
  19. //查询数据库firstdb
  20. List<TransferOrder> transferOrders = transferOrderMapper.listTransferOrder(params.getBizOrderNo(), params.getUserId());
  21. PageInfo pageInfo = new PageInfo<>(transferOrders);
  22. List<TransferOrderVO> voList = OrikaMapperUtils.mapList(transferOrders, TransferOrder.class,
  23. TransferOrderVO.class);
  24. for (TransferOrderVO order : voList) {
  25. order.setUserId(order.getAccount());
  26. //查询数据库firstdb
  27. ChannelInvokeRecord channelInvokeRecord = channelInvokeRecordMapper.selectCirByBizOrderNo(order.getBizOrderNo());
  28. if (channelInvokeRecord != null) {
  29. //失败原因
  30. String remark = JSON.parseObject(channelInvokeRecord.getRemark()).getString("subMsg");
  31. order.setSubMsg(remark);
  32. }
  33. }
  34. for (TransferOrderVO order : voList) {
  35. //查询数据库seconddb
  36. User user = userMapper.selectUserByUserId(order.getAccount());
  37. if (user != null) {
  38. order.setUserId(String.valueOf(user.getUserId()));
  39. }
  40. }
  41. return new PageVO(params.getPageSize(), params.getPageNum(), pageInfo.getTotal(), voList);
  42. }
  43. }

参考


更多其他的方式可以查看其他博主记录的,如下: