导读
现在大多环境都是采用的前后端分离,数据库也采用分库分表。简单举例:用户模块对应一个用户库,订单模块对应一个订单库,分别存储在不同数据库中,下面简单介绍下。
使用
配置数据源
我这里配置了三个数据源,默认base数据源,另外有firstdb数据源和seconddb数据源。
<?xml version="1.0" encoding="UTF-8"?><beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://www.springframework.org/schema/beansclasspath:/org/springframework/beans/factory/xml/spring-beans-3.0.xsd"default-autowire="byName"><!-- firstDataSource --><bean id="firstDataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"><property name="url" value="${firstDatasource.url}"/><property name="username" value="${firstDatasource.name}"/><property name="password" value="${firstDatasource.password}"/><property name="filters" value="log4j"/><property name="maxActive" value="${firstDatasource.maxActive}"/><property name="initialSize" value="${firstDatasource.initialiSize}"/><property name="maxWait" value="60000"/><property name="minIdle" value="1"/><property name="timeBetweenEvictionRunsMillis" value="2000"/><property name="minEvictableIdleTimeMillis" value="200000"/><property name="poolPreparedStatements" value="false"/><property name="maxPoolPreparedStatementPerConnectionSize" value="20"/><property name="blockingTimeoutMillis" value="500"/><property name="idleTimeoutMinutes" value="30"/><property name="preparedStatementCacheSize" value="0"/><property name="queryTimeout" value="30"/><property name="prefill" value="true"/><property name="maxReadThreshold" value="100"/><property name="maxWriteThreshold" value="100"/><property name="newConnectionSQL" value="set names utf8mb4;"/></bean><!-- secondDataSource --><bean id="secondDataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init"destroy-method="close"><property name="url" value="${secondDatasource.url}"/><property name="username" value="${secondDatasource.name}"/><property name="password" value="${secondDatasource.password}"/><property name="filters" value="log4j"/><property name="maxActive" value="${secondDatasource.maxActive}"/><property name="initialSize" value="${secondDatasource.initialiSize}"/><property name="maxWait" value="60000"/><property name="minIdle" value="1"/><property name="timeBetweenEvictionRunsMillis" value="2000"/><property name="minEvictableIdleTimeMillis" value="200000"/><property name="poolPreparedStatements" value="false"/><property name="maxPoolPreparedStatementPerConnectionSize" value="20"/><property name="blockingTimeoutMillis" value="500"/><property name="idleTimeoutMinutes" value="30"/><property name="preparedStatementCacheSize" value="0"/><property name="queryTimeout" value="30"/><property name="prefill" value="true"/><property name="maxReadThreshold" value="100"/><property name="maxWriteThreshold" value="100"/><property name="newConnectionSQL" value="set names utf8mb4;"/></bean><!-- baseDataSource --><bean id="baseDataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init"destroy-method="close"><property name="url" value="${baseDatasource.url}"/><property name="username" value="${baseDatasource.name}"/><property name="password" value="${baseDatasource.password}"/><property name="filters" value="log4j"/><property name="maxActive" value="${baseDatasource.maxActive}"/><property name="initialSize" value="${baseDatasource.initialiSize}"/><property name="maxWait" value="60000"/><property name="minIdle" value="1"/><property name="timeBetweenEvictionRunsMillis" value="2000"/><property name="minEvictableIdleTimeMillis" value="200000"/><property name="poolPreparedStatements" value="false"/><property name="maxPoolPreparedStatementPerConnectionSize" value="20"/><property name="blockingTimeoutMillis" value="500"/><property name="idleTimeoutMinutes" value="30"/><property name="preparedStatementCacheSize" value="0"/><property name="queryTimeout" value="30"/><property name="prefill" value="true"/><property name="maxReadThreshold" value="100"/><property name="maxWriteThreshold" value="100"/><property name="newConnectionSQL" value="set names utf8mb4;"/></bean></beans>
AOP切面拦截
由于我是在Service层的某一个方法内需要调用其他两个库的表,所以我在mapper层做aop切面设置,如果是不同的Service层的不同方法,切面可以设置成Service层,这个由自己的业务决定。
import com.alibaba.common.logging.Logger;import com.alibaba.common.logging.LoggerFactory;import com.demo.second.common.dal.support.datasource.DdsDsModeEnum;import com.demo.second.common.dal.support.datasource.DynamicRoutingDataSourceFactory;import org.aspectj.lang.JoinPoint;import org.aspectj.lang.annotation.After;import org.aspectj.lang.annotation.Aspect;import org.aspectj.lang.annotation.Before;import org.aspectj.lang.annotation.Pointcut;import org.aspectj.lang.reflect.MethodSignature;import org.springframework.core.annotation.Order;import org.springframework.stereotype.Component;import java.lang.reflect.Method;/*** 切面配置数据源*/@Aspect@Component@Order(1) //@order注解控制切面的执行顺序,如果有多个切面配置,可以设置order的大小public class DataSourceAspect {private static final Logger LOGGER = LoggerFactory.getLogger(DataSourceAspect.class);//定义切面,当执行某个mapper时,就会拦截,切换到相应的数据源,这里切换firstdb数据源@Pointcut("execution(* com.demo.second.common.dal.mapper.first.*.*(..))")public void firstDataSource() {}//定义切面,当执行某个mapper时,就会拦截,切换到相应的数据源,这里切换seconddb数据源@Pointcut("execution(* com.demo.second.common.dal.mapper.second.*.*(..))")public void secondDataSource() {}@Before("firstDataSource()")public void beforefirst(JoinPoint point) {LOGGER.info("切面捕获到修改数据源信息");MethodSignature signa = (MethodSignature) point.getSignature();Method method = signa.getMethod();//切换到firstdb数据源DynamicRoutingDataSourceFactory.switchCurrDataSource(DdsDsModeEnum.FIRSTDB);LOGGER.info("AOP动态切换数据源,className" + point.getTarget().getClass().getName() + ";methodName:" + method.getName());}@Before("secondDataSource()")public void beforesecond(JoinPoint point) {LOGGER.info("切面捕获到修改数据源信息");MethodSignature signa = (MethodSignature) point.getSignature();Method method = signa.getMethod();//切换到seconddb数据源DynamicRoutingDataSourceFactory.switchCurrDataSource(DdsDsModeEnum.SECONDDB);LOGGER.info("AOP动态切换数据源,className" + point.getTarget().getClass().getName() + ";methodName:" + method.getName());}/*** 清理掉当前设置的数据源,让默认的base数据源不受影响*/@After("firstDataSource()")public void afterfirst(JoinPoint point) {DynamicRoutingDataSourceFactory.releaseCurrDataSource();}/*** 清理掉当前设置的数据源,让默认的数据源不受影响*/@After("secondDataSource()")public void aftersecond(JoinPoint point) {DynamicRoutingDataSourceFactory.releaseCurrDataSource();}}
注: 我这里使用的切面拦截的 ,还有一种使用注解方式,没有列出了,和这个类似,只是需要在使用的时候在指定方法上面添加注解,指定下数据源即可。
_
动态数据源工厂
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;import java.util.logging.Logger;/*** 动态数据源工厂.*/public class DynamicRoutingDataSourceFactory extends AbstractRoutingDataSource {private static final ThreadLocal<String> currentDataSourceKey = new ThreadLocal<>();@Overridepublic Logger getParentLogger() {return null;}@Overrideprotected Object determineCurrentLookupKey() {return currentDataSourceKey.get();}/*** 让外部业务设置当前线程所用数据源的方法*/public static void switchCurrDataSource(DdsDsModeEnum ds) {if (ds == null) {throw new RuntimeException("设置数据源失败, 目标数据源为 [null].");}currentDataSourceKey.set(ds.getDsName());}/*** 扩展: 释放当前线程设置的数据源*/public static void releaseCurrDataSource() {currentDataSourceKey.remove();}}
业务数据源枚举类定义
/*** 业务数据源枚举类定义.*/public enum DdsDsModeEnum {BASE("base"),FIRSTDB("firstdb"),SECONDDB("seconddb");private final String dsName;DdsDsModeEnum(String dsName) {this.dsName = dsName;}public String getDsName() {return dsName;}}
在service层中使用
@Servicepublic class TransferFailedServiceImpl implements TransferFailedService {@Autowiredprivate TransferOrderMapper transferOrderMapper;@Autowiredprivate ChannelInvokeRecordMapper channelInvokeRecordMapper;@Autowiredprivate UserMapper userMapper;@Overridepublic PageVO<TransferOrderVO> listTransferOrder(TransferOrderFilterParams params) {//判断userId是否为空if (StringUtils.isNotBlank(params.getUserId())) {User userInfo = userMapper.selectUserByUserId(params.getUserId());if (userInfo != null) {params.setUserId(userInfo.getUid());}}PageHelper.startPage(params.getPageNum(), params.getPageSize());//查询数据库firstdbList<TransferOrder> transferOrders = transferOrderMapper.listTransferOrder(params.getBizOrderNo(), params.getUserId());PageInfo pageInfo = new PageInfo<>(transferOrders);List<TransferOrderVO> voList = OrikaMapperUtils.mapList(transferOrders, TransferOrder.class,TransferOrderVO.class);for (TransferOrderVO order : voList) {order.setUserId(order.getAccount());//查询数据库firstdbChannelInvokeRecord channelInvokeRecord = channelInvokeRecordMapper.selectCirByBizOrderNo(order.getBizOrderNo());if (channelInvokeRecord != null) {//失败原因String remark = JSON.parseObject(channelInvokeRecord.getRemark()).getString("subMsg");order.setSubMsg(remark);}}for (TransferOrderVO order : voList) {//查询数据库seconddbUser user = userMapper.selectUserByUserId(order.getAccount());if (user != null) {order.setUserId(String.valueOf(user.getUserId()));}}return new PageVO(params.getPageSize(), params.getPageNum(), pageInfo.getTotal(), voList);}}
参考
更多其他的方式可以查看其他博主记录的,如下:
