inline 配置,只支持 = IN 两种 SQL 语法
    但是业务需要 >= <= 之类的范围查询,所以需要修改分片策略

    1. 实现精准分片接口 PreciseShardingAlgorithm 和范围分片接口 RangeShardingAlgorithm
    2. 修改 application.yaml 配置文件

    配置文件:

    1. ...
    2. 省略数据源以及默认配置
    3. ...
    4. t_eth_transaction:
    5. actual-data-nodes: test$->{1}.t_eth_transaction$->{9..12}
    6. table-strategy:
    7. # inline:
    8. # sharding-column: block_number
    9. # algorithm-expression: t_eth_transaction$->{ (block_number/1000000) as int} # 分表规则为id对3取模,id%3为0表示分到user0表
    10. standard: # 单列sharidng算法,需要配合对应的 preciseShardingAlgorithm,rangeShardingAlgorithm 接口的实现使用
    11. shardingColumn: block_number # 列名,允许单列
    12. preciseAlgorithmClassName: com.paradise.sharding.strategy.RangeShardingAlgorithmImpl # preciseShardingAlgorithm接口的实现类
    13. rangeAlgorithmClassName: com.paradise.sharding.strategy.RangeShardingAlgorithmImpl # rangeShardingAlgorithm接口的实现类

    接口实现:

    1. package com.paradise.sharding.strategy;
    2. import com.google.common.collect.Range;
    3. import lombok.extern.slf4j.Slf4j;
    4. import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm;
    5. import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue;
    6. import org.apache.shardingsphere.api.sharding.standard.RangeShardingAlgorithm;
    7. import org.apache.shardingsphere.api.sharding.standard.RangeShardingValue;
    8. import java.util.Collection;
    9. import java.util.LinkedHashSet;
    10. import java.util.Map;
    11. import java.util.function.Function;
    12. import java.util.stream.Collectors;
    13. /**
    14. * 分片策略实现
    15. * 包含精准分片与范围分片
    16. *
    17. * @author Paradise
    18. */
    19. @Slf4j
    20. public class RangeShardingAlgorithmImpl implements RangeShardingAlgorithm<Long>, PreciseShardingAlgorithm<Long> {
    21. /**
    22. * 100w 分片区块
    23. */
    24. private static final long DIVISOR = 1_000_000;
    25. /**
    26. * 分片表前缀
    27. */
    28. private static final String TABLE_PREFIX = "t_eth_transaction";
    29. public RangeShardingAlgorithmImpl() {
    30. log.info("初始化 -> [{}]", "RangeShardingAlgorithmImpl ----- 范围分片算法-启用");
    31. }
    32. /**
    33. * Sharding.
    34. *
    35. * @param availableTargetNames available data sources or tables' names
    36. * @param shardingValue sharding value
    37. * @return sharding results for data sources or tables' names
    38. */
    39. @Override
    40. public Collection<String> doSharding(Collection<String> availableTargetNames, RangeShardingValue<Long> shardingValue) {
    41. // 可用表名称 Map
    42. final Map<String, String> map = availableTargetNames.stream()
    43. .collect(Collectors.toMap(Function.identity(), Function.identity()));
    44. // 构造返回结果
    45. Collection<String> result = new LinkedHashSet<>(availableTargetNames.size());
    46. // 获取待查询范围
    47. Range<Long> range = shardingValue.getValueRange();
    48. // 除法取整
    49. long low = range.lowerEndpoint() / DIVISOR;
    50. long upper = range.upperEndpoint() / DIVISOR;
    51. // 遍历获取路由表信息
    52. for (long i = low; i <= upper; i++) {
    53. final String table = TABLE_PREFIX + i;
    54. if (map.containsKey(table)) {
    55. result.add(table);
    56. }
    57. }
    58. return result;
    59. }
    60. /**
    61. * Sharding.
    62. *
    63. * @param availableTargetNames available data sources or tables' names
    64. * @param shardingValue sharding value
    65. * @return sharding result for data source or table's name
    66. */
    67. @Override
    68. public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) {
    69. // 可用表名称 Map
    70. final Map<String, String> map = availableTargetNames.stream()
    71. .collect(Collectors.toMap(Function.identity(), Function.identity()));
    72. final long value = shardingValue.getValue() / DIVISOR;
    73. final String table = TABLE_PREFIX + value;
    74. if (map.containsKey(table)) {
    75. return table;
    76. }
    77. log.info("分片表:{} 不存在", table);
    78. throw new UnsupportedOperationException(table + " Not Exist!");
    79. }
    80. }