在微服务环境中其实不推荐使用分布式事务,因为他降低性能

但是,有些场景以数据安全为主,性能就是其次的,比如涉及到钱的操作,这个时候就需要使用分布式事务

安装

以Seata 1.1.0版本为例

自1.0.0版本起conf目录下不提供sql文件了,可以从0.9.0版本里面copy过来

安装包直接在github上面下载就行了 Releases · seata/seata (github.com)

配置

registry.conf

配置seata要注册到服务中心,这里以eureka为例

可以给seata定义一个服务名

如果使用eureka配置,registry里其他的配置项可以删除,config也同理

  1. registry {
  2. # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
  3. type = "eureka"
  4. nacos {
  5. serverAddr = "localhost:8848"
  6. namespace = "public"
  7. cluster = "default"
  8. }
  9. eureka {
  10. serviceUrl = "http://localhost:8761/eureka"
  11. application = "SEATA-SERVER"
  12. weight = "1"
  13. }
  14. redis {
  15. serverAddr = "localhost:6379"
  16. db = "0"
  17. }
  18. zk {
  19. cluster = "default"
  20. serverAddr = "127.0.0.1:2181"
  21. session.timeout = 6000
  22. connect.timeout = 2000
  23. }
  24. consul {
  25. cluster = "default"
  26. serverAddr = "127.0.0.1:8500"
  27. }
  28. etcd3 {
  29. cluster = "default"
  30. serverAddr = "http://localhost:2379"
  31. }
  32. sofa {
  33. serverAddr = "127.0.0.1:9603"
  34. application = "default"
  35. region = "DEFAULT_ZONE"
  36. datacenter = "DefaultDataCenter"
  37. cluster = "default"
  38. group = "SEATA_GROUP"
  39. addressWaitTime = "3000"
  40. }
  41. file {
  42. name = "file.conf"
  43. }
  44. }
  45. config {
  46. # file、nacos 、apollo、zk、consul、etcd3
  47. # 这里制定file为配置
  48. type = "file"
  49. nacos {
  50. serverAddr = "localhost"
  51. namespace = ""
  52. group = "SEATA_GROUP"
  53. }
  54. consul {
  55. serverAddr = "127.0.0.1:8500"
  56. }
  57. apollo {
  58. app.id = "seata-server"
  59. apollo.meta = "http://192.168.1.204:8801"
  60. namespace = "application"
  61. }
  62. zk {
  63. serverAddr = "127.0.0.1:2181"
  64. session.timeout = 6000
  65. connect.timeout = 2000
  66. }
  67. etcd3 {
  68. serverAddr = "http://localhost:2379"
  69. }
  70. file {
  71. name = "file.conf"
  72. }
  73. }

制定file为配置,file.conf就需要改了

file.conf

主要作用是定义事务分组

如果以数据库存储数据,就要配置数据库的链接

  1. transport {
  2. # tcp udt unix-domain-socket
  3. type = "TCP"
  4. #NIO NATIVE
  5. server = "NIO"
  6. #enable heartbeat
  7. heartbeat = true
  8. # the client batch send request enable
  9. enableClientBatchSendRequest = false
  10. #thread factory for netty
  11. threadFactory {
  12. bossThreadPrefix = "NettyBoss"
  13. workerThreadPrefix = "NettyServerNIOWorker"
  14. serverExecutorThreadPrefix = "NettyServerBizHandler"
  15. shareBossWorker = false
  16. clientSelectorThreadPrefix = "NettyClientSelector"
  17. clientSelectorThreadSize = 1
  18. clientWorkerThreadPrefix = "NettyClientWorkerThread"
  19. # netty boss thread size,will not be used for UDT
  20. bossThreadSize = 1
  21. #auto default pin or 8
  22. workerThreadSize = "default"
  23. }
  24. shutdown {
  25. # when destroy server, wait seconds
  26. wait = 3
  27. }
  28. serialization = "seata"
  29. compressor = "none"
  30. }
  31. # service configuration, only used in client side
  32. service {
  33. #transaction service group mapping
  34. #vgroupMapping.后面定义的是事务分组
  35. vgroupMapping.fsp_tx_group = "default"
  36. #only support when registry.type=file, please don't set multiple addresses
  37. default.grouplist = "127.0.0.1:8091"
  38. #degrade, current not support
  39. enableDegrade = false
  40. #disable seata
  41. disableGlobalTransaction = false
  42. }
  43. #client transaction configuration, only used in client side
  44. client {
  45. rm {
  46. asyncCommitBufferLimit = 10000
  47. lock {
  48. retryInterval = 10
  49. retryTimes = 30
  50. retryPolicyBranchRollbackOnConflict = true
  51. }
  52. reportRetryCount = 5
  53. tableMetaCheckEnable = false
  54. reportSuccessEnable = false
  55. sqlParserType = druid
  56. }
  57. tm {
  58. commitRetryCount = 5
  59. rollbackRetryCount = 5
  60. }
  61. undo {
  62. dataValidation = true
  63. logSerialization = "jackson"
  64. logTable = "undo_log"
  65. }
  66. log {
  67. exceptionRate = 100
  68. }
  69. }
  70. ## transaction log store, only used in server side
  71. store {
  72. ## store mode: file、db
  73. #以数据库来存储
  74. mode = "db"
  75. ## file store property
  76. file {
  77. ## store location dir
  78. dir = "sessionStore"
  79. # branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
  80. maxBranchSessionSize = 16384
  81. # globe session size , if exceeded throws exceptions
  82. maxGlobalSessionSize = 512
  83. # file buffer size , if exceeded allocate new buffer
  84. fileWriteBufferCacheSize = 16384
  85. # when recover batch read size
  86. sessionReloadReadSize = 100
  87. # async, sync
  88. flushDiskMode = async
  89. }
  90. ## database store property
  91. db {
  92. ## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
  93. datasource = "dbcp"
  94. ## mysql/oracle/h2/oceanbase etc.
  95. dbType = "mysql"
  96. driverClassName = "com.mysql.jdbc.Driver"
  97. url = "jdbc:mysql://127.0.0.1:3306/seata"
  98. user = "root"
  99. password = "1234"
  100. minConn = 1
  101. maxConn = 10
  102. globalTable = "global_table"
  103. branchTable = "branch_table"
  104. lockTable = "lock_table"
  105. queryLimit = 100
  106. }
  107. }
  108. ## server configuration, only used in server side
  109. server {
  110. recovery {
  111. #schedule committing retry period in milliseconds
  112. committingRetryPeriod = 1000
  113. #schedule asyn committing retry period in milliseconds
  114. asynCommittingRetryPeriod = 1000
  115. #schedule rollbacking retry period in milliseconds
  116. rollbackingRetryPeriod = 1000
  117. #schedule timeout retry period in milliseconds
  118. timeoutRetryPeriod = 1000
  119. }
  120. undo {
  121. logSaveDays = 7
  122. #schedule delete expired undo_log in milliseconds
  123. logDeletePeriod = 86400000
  124. }
  125. #unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
  126. maxCommitRetryTimeout = "-1"
  127. maxRollbackRetryTimeout = "-1"
  128. rollbackRetryTimeoutUnlockEnable = false
  129. }
  130. ## metrics configuration, only used in server side
  131. metrics {
  132. enabled = false
  133. registryType = "compact"
  134. # multi exporters use comma divided
  135. exporterList = "prometheus"
  136. exporterPrometheusPort = 9898
  137. }

如果选择以数据库存储数据,就要在制定的jdbc:mysql://127.0.0.1:3306/seata数据库中添加三张表

  1. -- the table to store GlobalSession data
  2. drop table if exists `global_table`;
  3. create table `global_table` (
  4. `xid` varchar(128) not null,
  5. `transaction_id` bigint,
  6. `status` tinyint not null,
  7. `application_id` varchar(32),
  8. `transaction_service_group` varchar(32),
  9. `transaction_name` varchar(128),
  10. `timeout` int,
  11. `begin_time` bigint,
  12. `application_data` varchar(2000),
  13. `gmt_create` datetime,
  14. `gmt_modified` datetime,
  15. primary key (`xid`),
  16. key `idx_gmt_modified_status` (`gmt_modified`, `status`),
  17. key `idx_transaction_id` (`transaction_id`)
  18. );
  19. -- the table to store BranchSession data
  20. drop table if exists `branch_table`;
  21. create table `branch_table` (
  22. `branch_id` bigint not null,
  23. `xid` varchar(128) not null,
  24. `transaction_id` bigint ,
  25. `resource_group_id` varchar(32),
  26. `resource_id` varchar(256) ,
  27. `lock_key` varchar(128) ,
  28. `branch_type` varchar(8) ,
  29. `status` tinyint,
  30. `client_id` varchar(64),
  31. `application_data` varchar(2000),
  32. `gmt_create` datetime,
  33. `gmt_modified` datetime,
  34. primary key (`branch_id`),
  35. key `idx_xid` (`xid`)
  36. );
  37. -- the table to store lock data
  38. drop table if exists `lock_table`;
  39. create table `lock_table` (
  40. `row_key` varchar(128) not null,
  41. `xid` varchar(96),
  42. `transaction_id` long ,
  43. `branch_id` long,
  44. `resource_id` varchar(256) ,
  45. `table_name` varchar(32) ,
  46. `pk` varchar(36) ,
  47. `gmt_create` datetime ,
  48. `gmt_modified` datetime,
  49. primary key(`row_key`)
  50. );

服务启动

seata/bin目录下,一个用于windows环境一个用于linux

image.png
以linux为例,使用后台启动命令并输出运行日志,指定端口 ip 运行模式,以file方式运行,不依赖数据库

  1. nohup sh seata-server.sh -p 8091 -h 127.0.0.1 -m file > seata.log 2>&1 &

整合SpringBoot编写Demo

Demo创两个工程一个叫order 一个叫pay

Maven依赖

  1. <properties>
  2. <java.version>1.8</java.version>
  3. <alibaba.seata.version>2.2.0.RELEASE</alibaba.seata.version>
  4. <!-- ${seata.version}版本号与seata服务版本一致 -->
  5. <seata.version>1.1.0</seata.version>
  6. </properties>
  1. <!-- 集成seata分布式事务依赖 -->
  2. <dependency>
  3. <groupId>com.alibaba.cloud</groupId>
  4. <artifactId>spring-cloud-alibaba-seata</artifactId>
  5. <version>${alibaba.seata.version}</version>
  6. <exclusions>
  7. <exclusion>
  8. <groupId>io.seata</groupId>
  9. <artifactId>seata-spring-boot-starter</artifactId>
  10. </exclusion>
  11. </exclusions>
  12. </dependency>
  13. <!-- 集成seata -->
  14. <dependency>
  15. <groupId>io.seata</groupId>
  16. <artifactId>seata-spring-boot-starter</artifactId>
  17. <version>${seata.version}</version>
  18. <exclusions>
  19. <exclusion>
  20. <artifactId>protobuf-java</artifactId>
  21. <groupId>com.google.protobuf</groupId>
  22. </exclusion>
  23. </exclusions>
  24. </dependency>

application.yml配置文件

以order为例

  1. spring:
  2. application:
  3. name: order
  4. datasource:
  5. driver-class-name: com.mysql.cj.jdbc.Driver
  6. url: jdbc:mysql://localhost:3306/order?useSSL=false&useUnicode=true&characterEncoding=UTF8&serverTimezone=Asia/Shanghai
  7. username: root
  8. password: 1234
  9. cloud:
  10. alibaba:
  11. seata:
  12. # 定义事务组的名称
  13. tx-service-group: fsp_tx_group
  14. # eureka注册的相关配置
  15. eureka:
  16. instance:
  17. hostname: localhost
  18. port: 8761
  19. prefer-ip-address: true
  20. instance-id: ${spring.cloud.client.ip-address}:${server.port}
  21. lease-renewal-interval-in-seconds: 5
  22. lease-expiration-duration-in-seconds: 10
  23. client:
  24. registry-fetch-interval-seconds: 5
  25. service-url:
  26. defaultZone: http://${eureka.instance.hostname}:${eureka.instance.port}/eureka/
  27. # seata相关配置
  28. seata:
  29. address: 127.0.0.1:8091
  30. enabled: true
  31. enableAutoDataSourceProxy: false
  32. # 事务分组与上面对应
  33. tx-service-group: fsp_tx_group
  34. registry:
  35. type: eureka
  36. eureka:
  37. application: hatech-seata-server
  38. weight: 1
  39. service-url: ${eureka.client.service-url.defaultZone}
  40. transport:
  41. type: TCP
  42. server: NIO
  43. heartbeat: true
  44. thread-factory:
  45. boss-thread-prefix: NettyBoss
  46. worker-thread-prefix: NettyServerNIOWorker
  47. server-executor-thread-prefix: NettyServerBizHandler
  48. share-boss-worker: false
  49. client-selector-thread-prefix: NettyClientSelector
  50. client-selector-thread-size: 1
  51. client-worker-thread-prefix: NettyClientWorkerThread
  52. boss-thread-size: 1
  53. worker-thread-size: 8
  54. shutdown:
  55. wait: 3
  56. serialization: seata
  57. compressor: none
  58. service:
  59. # 事务群组与
  60. vgroup-mapping:
  61. fsp_tx_group: seata-server
  62. # TC服务列表
  63. grouplist:
  64. hatech-seata-server: ${seata.address}
  65. enable-degrade: false
  66. disable-global-transaction: false
  67. client:
  68. rm:
  69. async-commit-buffer-limit: 10000
  70. table-meta-check-enable: false
  71. lock:
  72. retry-times: 30
  73. retry-interval: 10
  74. report-retry-count: 5
  75. undo:
  76. # 用于回滚的表
  77. log-table: undo_log
  78. data-validation: true

创建undo_log表

由于配置了用户回滚的表undo_log,所以需要在每个工程连接的数据库里都要创建undo_log表,表名可以随意取,但要与配置文件中的对应

  1. -- the table to store seata xid data
  2. drop table `undo_log`;
  3. CREATE TABLE `undo_log` (
  4. `id` bigint(20) NOT NULL AUTO_INCREMENT,
  5. `branch_id` bigint(20) NOT NULL,
  6. `xid` varchar(100) NOT NULL,
  7. `context` varchar(128) NOT NULL,
  8. `rollback_info` longblob NOT NULL,
  9. `log_status` int(11) NOT NULL,
  10. `log_created` datetime NOT NULL,
  11. `log_modified` datetime NOT NULL,
  12. `ext` varchar(100) DEFAULT NULL,
  13. PRIMARY KEY (`id`),
  14. UNIQUE KEY `ux_undo_log` (`xid`,`branch_id`)
  15. ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

配置代理数据源

Seata的二阶段执行是通过拦截sql语句,分析语义来指定回滚策略,因此需要对DataSource做代理,每一个微服务项目都要配置

如果使用JdbcTemplate的配置

  1. @Bean
  2. public JdbcTemplate jdbcTemplate(DataSource dataSource) {
  3. return new JdbcTemplate(new DataSourceProxy(dataSource));
  4. }

如果使用MybatisPlus的配置

  1. @Bean
  2. public SqlSessionFactory sqlSessionFactoryBean(DataSource dataSource) throws Exception {
  3. // 订单服务中引入了mybatis-plus,所以要使用特殊的SqlSessionFactoryBean
  4. MybatisSqlSessionFactoryBean sqlSessionFactoryBean = new MybatisSqlSessionFactoryBean();
  5. // 代理数据源
  6. sqlSessionFactoryBean.setDataSource(new DataSourceProxy(dataSource));
  7. // 生成SqlSessionFactory
  8. return sqlSessionFactoryBean.getObject();
  9. }

如果用的是原生的mybatis,请使用SqlSessionFactoryBean

事务注解的使用

使用起来非常简单在方法上加个@GlobalTransactional注解就行了

  1. @GetMapping("/save")
  2. @GlobalTransactional
  3. public String save() {
  4. //订单
  5. this.orderService.save();
  6. //支付
  7. String forObject = this.restTemplate.getForObject("http://localhost:9000/save", String.class);
  8. int i = 10 / 0;
  9. return "success";
  10. }

版本升级1.4.0

下载地址 https://github.com/seata/seata/releases/download/v1.4.0/seata-server-1.4.0.tar.gz

配置file.conf文件

以文件的方式存储为例
主要修改store段及file段以下属性:

mode = “file” maxBranchSessionSize = 1120000 fileWriteBufferCacheSize = 1120000

[maxBranchSessionSize]: maxBranchSessionSize计算公式(单位为字节):【id字节数(32)】x【一次mapper方法可能操作的最大数据条数(5000)】x【一次业务逻辑最大可能操作业务的表的数量(7)】=1120000,如果一个事务中的主键总大小超过这个值(默认为16kb,会出现异常:Failed to store branch(这也就是前面所述的1.0.0版本中存在的问题)
[fileWriteBufferCacheSize]:建议maxBranchSessionSize与fileWriteBufferCacheSize相同

完整示例如下:

## transaction log store, only used in seata-server
store {
  ## store mode: file、db、redis
  mode = "file"
  ## file store property
  ## maxBranchSessionSize计算公式(单位为字节):【id字节数(32)】*【一次mapper方法可能操作的最大数据条数(5000)】*【一次业务逻辑最大可能操作业务的表的数量(7)】=1120000
  ## 建议maxBranchSessionSize与fileWriteBufferCacheSize相同
  ## 如果字节数超过1120000字节,会出现异常:Failed to store branch, 建议内存为1024MB,以此增加
  file {
    ## store location dir
    dir = "sessionStore"
    # branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
    maxBranchSessionSize = 1120000
    # globe session size , if exceeded throws exceptions
    maxGlobalSessionSize = 512
    # file buffer size , if exceeded allocate new buffer
    fileWriteBufferCacheSize = 1120000
    # when recover batch read size
    sessionReloadReadSize = 100
    # async, sync
    flushDiskMode = async
  }
}

修改registry.conf配置文件

以eureka为注册中心,主要是registry的配置,config的配置用file就行了
注意注册服务名的修改

registry {
  # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
  type = "eureka"
  loadBalance = "RandomLoadBalance"
  loadBalanceVirtualNodes = 10
  eureka {
    serviceUrl = "http://localhost:8761/eureka"
    application = "seata-server"
    weight = "1"
  }
}

springboot整合

maven依赖

 <!-- 集成seata分布式事务依赖 -->
 <dependency>
     <groupId>com.alibaba.cloud</groupId>
     <artifactId>spring-cloud-alibaba-seata</artifactId>
     <exclusions>
         <exclusion>
             <groupId>io.seata</groupId>
             <artifactId>seata-spring-boot-starter</artifactId>
         </exclusion>
     </exclusions>
 </dependency>
 <!-- 集成seata为最新版本 -->
 <dependency>
     <groupId>io.seata</groupId>
     <artifactId>seata-spring-boot-starter</artifactId>
     <version>1.4.0</version>
     <exclusions>
         <exclusion>
             <artifactId>protobuf-java</artifactId>
             <groupId>com.google.protobuf</groupId>
         </exclusion>
         <exclusion>
             <artifactId>guava</artifactId>
             <groupId>com.google.guava</groupId>
         </exclusion>
     </exclusions>
 </dependency>

yml配置

# seata 配置
seata:
  address: 10.27.3.140:8091
  enabled: true
  application-id: ${spring.application.name}
  tx-service-group: istorm_tx_group
  client:
    tm:
      default-global-transaction-timeout: 60000
    undo:
      log-table: seata_log
  service:
    vgroup-mapping:
        //与tx-service-group的值对应,然后与注册到注册中心的服务名对应
      istorm_tx_group: seata-server
    grouplist:
        //与上面配置的 seata-server
      seata-server: ${seata.address}
  registry:
    type: eureka
    eureka:
      application: ${seata.service.vgroup-mapping.fsp_tx_group}
      service-url: ${eureka.client.service-url.defaultZone}