eth的共识算法pow调用栈详解
核心的逻辑需要从/eth/handler.go/NewProtocolManager方法下开始,关键代码:
  1. manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer)
  2. validator := func(header *types.Header) error {
  3. return engine.VerifyHeader(blockchain, header, true)
  4. }
  5. heighter := func() uint64 {
  6. return blockchain.CurrentBlock().NumberU64()
  7. }
  8. inserter := func(blocks types.Blocks) (int, error) {
  9. // If fast sync is running, deny importing weird blocks
  10. if atomic.LoadUint32(&manager.fastSync) == 1 {
  11. log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
  12. return 0, nil
  13. }
  14. atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
  15. return manager.blockchain.InsertChain(blocks)
  16. }
  17. manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
  18. return manager, nil
该方法中生成了一个校验区块头部的对象validator
让我们继续跟进engine.VerifyHeader(blockchain, header, true)方法:
  1. // VerifyHeader checks whether a header conforms to the consensus rules of the
  2. // stock Ethereum ethash engine.
  3. func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
  4. // If we're running a full engine faking, accept any input as valid
  5. if ethash.fakeFull {
  6. return nil
  7. }
  8. // Short circuit if the header is known, or it's parent not
  9. number := header.Number.Uint64()
  10. if chain.GetHeader(header.Hash(), number) != nil {
  11. return nil
  12. }
  13. parent := chain.GetHeader(header.ParentHash, number-1)
  14. if parent == nil {
  15. return consensus.ErrUnknownAncestor
  16. }
  17. // Sanity checks passed, do a proper verification
  18. return ethash.verifyHeader(chain, header, parent, false, seal)
  19. }
首先看第一个if,它的逻辑判断是如果为true,那么就关闭所有的共识规则校验,紧跟着两个if判断是只要该block的header的hash和number或者上一个header的hash和number存在链上,那么它header的共识规则校验就通过,如果都不通过,那么该区块校验失败跑出错误.如果走到最后一个return,那么就需要做一些其他额外验证
  1. // verifyHeader checks whether a header conforms to the consensus rules of the
  2. // stock Ethereum ethash engine.
  3. // See YP section 4.3.4. "Block Header Validity"
  4. func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
  5. // Ensure that the header's extra-data section is of a reasonable size
  6. if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
  7. return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
  8. }
  9. // Verify the header's timestamp
  10. if uncle {
  11. if header.Time.Cmp(math.MaxBig256) > 0 {
  12. return errLargeBlockTime
  13. }
  14. } else {
  15. if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
  16. return consensus.ErrFutureBlock
  17. }
  18. }
  19. if header.Time.Cmp(parent.Time) <= 0 {
  20. return errZeroBlockTime
  21. }
  22. // Verify the block's difficulty based in it's timestamp and parent's difficulty
  23. expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
  24. if expected.Cmp(header.Difficulty) != 0 {
  25. return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
  26. }
  27. // Verify that the gas limit is <= 2^63-1
  28. if header.GasLimit.Cmp(math.MaxBig63) > 0 {
  29. return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, math.MaxBig63)
  30. }
  31. // Verify that the gasUsed is <= gasLimit
  32. if header.GasUsed.Cmp(header.GasLimit) > 0 {
  33. return fmt.Errorf("invalid gasUsed: have %v, gasLimit %v", header.GasUsed, header.GasLimit)
  34. }
  35. // Verify that the gas limit remains within allowed bounds
  36. diff := new(big.Int).Set(parent.GasLimit)
  37. diff = diff.Sub(diff, header.GasLimit)
  38. diff.Abs(diff)
  39. limit := new(big.Int).Set(parent.GasLimit)
  40. limit = limit.Div(limit, params.GasLimitBoundDivisor)
  41. if diff.Cmp(limit) >= 0 || header.GasLimit.Cmp(params.MinGasLimit) < 0 {
  42. return fmt.Errorf("invalid gas limit: have %v, want %v += %v", header.GasLimit, parent.GasLimit, limit)
  43. }
  44. // Verify that the block number is parent's +1
  45. if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
  46. return consensus.ErrInvalidNumber
  47. }
  48. // Verify the engine specific seal securing the block
  49. if seal {
  50. if err := ethash.VerifySeal(chain, header); err != nil {
  51. return err
  52. }
  53. }
  54. // If all checks passed, validate any special fields for hard forks
  55. if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
  56. return err
  57. }
  58. if err := misc.VerifyForkHashes(chain.Config(), header, uncle); err != nil {
  59. return err
  60. }
  61. return nil
  62. }
该方法会去校验该区块头中的extra数据是不是比约定的参数最大值还大,如果超过,则返回错误,其次会去判断该区块是不是一个uncle区块,如果header的时间戳不符合规则则返回错误,然后根据链的配置,header的时间戳以及上一个区块计算中本次区块期待的难度,如果header的难度和期待的不一致,或header的gasLimit比最大数字还大,或已用的gas超过gasLimit,则返回错误.如果gasLimit超过预定的最大值或最小值,或header的number减去上一个block的header的number不为1,则返回错误.seal为true,则会去校验该区块是否符合pow的工作量证明的要求(verifySeal方法),因为私有链一般是不需要pow.最后两个if是去判断区块是否具有正确的hash,防止用户在不同的链上,以及校验块头的额外数据字段是否符合DAO硬叉规则
uncle block:

eth允许旷工在挖到一个块的同时包含一组uncle block列表,主要有两个作用:

  1. 1. 由于网络传播的延迟原因,通过奖励那些由于不是链组成区块部分而产生陈旧或孤立区块的旷工,从而减少集权激励
  2. 2. 通过增加在主链上的工作量来增加链条的安全性(在工作中,少浪费工作在陈旧的块上)

eth的pow核心代码:

  1. // CalcDifficulty is the difficulty adjustment algorithm. It returns
  2. // the difficulty that a new block should have when created at time
  3. // given the parent block's time and difficulty.
  4. // TODO (karalabe): Move the chain maker into this package and make this private!
  5. func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
  6. next := new(big.Int).Add(parent.Number, big1)
  7. switch {
  8. case config.IsByzantium(next):
  9. return calcDifficultyByzantium(time, parent)
  10. case config.IsHomestead(next):
  11. return calcDifficultyHomestead(time, parent)
  12. default:
  13. return calcDifficultyFrontier(time, parent)
  14. }
  15. }
该方法的第一个case是根据拜占庭规则去计算新块应该具有的难度,第二个case是根据宅基地规则去计算新块应该具有的难度,第三个case是根据边界规则去计算难度

pow计算生成新块代码解析

/consensus/seal.go/seal

  1. // Seal implements consensus.Engine, attempting to find a nonce that satisfies
  2. // the block's difficulty requirements.
  3. func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
  4. // If we're running a fake PoW, simply return a 0 nonce immediately
  5. if ethash.fakeMode {
  6. header := block.Header()
  7. header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
  8. return block.WithSeal(header), nil
  9. }
  10. // If we're running a shared PoW, delegate sealing to it
  11. if ethash.shared != nil {
  12. return ethash.shared.Seal(chain, block, stop)
  13. }
  14. // Create a runner and the multiple search threads it directs
  15. abort := make(chan struct{})
  16. found := make(chan *types.Block)
  17. ethash.lock.Lock()
  18. threads := ethash.threads
  19. if ethash.rand == nil {
  20. seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
  21. if err != nil {
  22. ethash.lock.Unlock()
  23. return nil, err
  24. }
  25. ethash.rand = rand.New(rand.NewSource(seed.Int64()))
  26. }
  27. ethash.lock.Unlock()
  28. if threads == 0 {
  29. threads = runtime.NumCPU()
  30. }
  31. if threads < 0 {
  32. threads = 0 // Allows disabling local mining without extra logic around local/remote
  33. }
  34. var pend sync.WaitGroup
  35. for i := 0; i < threads; i++ {
  36. pend.Add(1)
  37. go func(id int, nonce uint64) {
  38. defer pend.Done()
  39. ethash.mine(block, id, nonce, abort, found)
  40. }(i, uint64(ethash.rand.Int63()))
  41. }
  42. // Wait until sealing is terminated or a nonce is found
  43. var result *types.Block
  44. select {
  45. case <-stop:
  46. // Outside abort, stop all miner threads
  47. close(abort)
  48. case result = <-found:
  49. // One of the threads found a block, abort all others
  50. close(abort)
  51. case <-ethash.update:
  52. // Thread count was changed on user request, restart
  53. close(abort)
  54. pend.Wait()
  55. return ethash.Seal(chain, block, stop)
  56. }
  57. // Wait for all miners to terminate and return the block
  58. pend.Wait()
  59. return result, nil
  60. }
该方法的foreach中并行挖新块,一旦停止或者找到新快,则废弃其他所有的,如果协程计算有变更,则重新调用方法
好了pow挖矿的核心方法已经出现,`ethash.mine`,如果挖取到新块,那么就写入到found channel
  1. // mine is the actual proof-of-work miner that searches for a nonce starting from
  2. // seed that results in correct final block difficulty.
  3. func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
  4. // Extract some data from the header
  5. var (
  6. header = block.Header()
  7. hash = header.HashNoNonce().Bytes()
  8. target = new(big.Int).Div(maxUint256, header.Difficulty)
  9. number = header.Number.Uint64()
  10. dataset = ethash.dataset(number)
  11. )
  12. // Start generating random nonces until we abort or find a good one
  13. var (
  14. attempts = int64(0)
  15. nonce = seed
  16. )
  17. logger := log.New("miner", id)
  18. logger.Trace("Started ethash search for new nonces", "seed", seed)
  19. for {
  20. select {
  21. case <-abort:
  22. // Mining terminated, update stats and abort
  23. logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
  24. ethash.hashrate.Mark(attempts)
  25. return
  26. default:
  27. // We don't have to update hash rate on every nonce, so update after after 2^X nonces
  28. attempts++
  29. if (attempts % (1 << 15)) == 0 {
  30. ethash.hashrate.Mark(attempts)
  31. attempts = 0
  32. }
  33. // Compute the PoW value of this nonce
  34. digest, result := hashimotoFull(dataset, hash, nonce)
  35. if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
  36. // Correct nonce found, create a new header with it
  37. header = types.CopyHeader(header)
  38. header.Nonce = types.EncodeNonce(nonce)
  39. header.MixDigest = common.BytesToHash(digest)
  40. // Seal and return a block (if still needed)
  41. select {
  42. case found <- block.WithSeal(header):
  43. logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce)
  44. case <-abort:
  45. logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
  46. }
  47. return
  48. }
  49. nonce++
  50. }
  51. }
  52. }
target是目标新块的难度,hashimotoFull方法计算出一个hash值,如果产生的hash值小于等于target的值,则hash难度符合要求,将符合要求的header写入到found channel中,并返回,否则一直循环
  1. // hashimotoFull aggregates data from the full dataset (using the full in-memory
  2. // dataset) in order to produce our final value for a particular header hash and
  3. // nonce.
  4. func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
  5. lookup := func(index uint32) []uint32 {
  6. offset := index * hashWords
  7. return dataset[offset : offset+hashWords]
  8. }
  9. return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup)
  10. }
  1. // hashimoto aggregates data from the full dataset in order to produce our final
  2. // value for a particular header hash and nonce.
  3. func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32) []uint32) ([]byte, []byte) {
  4. // Calculate the number of theoretical rows (we use one buffer nonetheless)
  5. rows := uint32(size / mixBytes)
  6. // Combine header+nonce into a 64 byte seed
  7. seed := make([]byte, 40)
  8. copy(seed, hash)
  9. binary.LittleEndian.PutUint64(seed[32:], nonce)
  10. seed = crypto.Keccak512(seed)
  11. seedHead := binary.LittleEndian.Uint32(seed)
  12. // Start the mix with replicated seed
  13. mix := make([]uint32, mixBytes/4)
  14. for i := 0; i < len(mix); i++ {
  15. mix[i] = binary.LittleEndian.Uint32(seed[i%16*4:])
  16. }
  17. // Mix in random dataset nodes
  18. temp := make([]uint32, len(mix))
  19. for i := 0; i < loopAccesses; i++ {
  20. parent := fnv(uint32(i)^seedHead, mix[i%len(mix)]) % rows
  21. for j := uint32(0); j < mixBytes/hashBytes; j++ {
  22. copy(temp[j*hashWords:], lookup(2*parent+j))
  23. }
  24. fnvHash(mix, temp)
  25. }
  26. // Compress mix
  27. for i := 0; i < len(mix); i += 4 {
  28. mix[i/4] = fnv(fnv(fnv(mix[i], mix[i+1]), mix[i+2]), mix[i+3])
  29. }
  30. mix = mix[:len(mix)/4]
  31. digest := make([]byte, common.HashLength)
  32. for i, val := range mix {
  33. binary.LittleEndian.PutUint32(digest[i*4:], val)
  34. }
  35. return digest, crypto.Keccak256(append(seed, digest...))
  36. }
可以看到,该方法是不断的进行sha256的hash运算,然后返回进行hash值难度的对比,如果hash的十六进制大小小于等于预定的难度,那么这个hash就是符合产块要求的
参考资料