1. func gcBgMarkWorker(_p_ *p) {
    2. gp := getg()
    3. type parkInfo struct {
    4. m muintptr // Release this m on park.
    5. attach puintptr // If non-nil, attach to this p on park.
    6. }
    7. // We pass park to a gopark unlock function, so it can't be on
    8. // the stack (see gopark). Prevent deadlock from recursively
    9. // starting GC by disabling preemption.
    10. gp.m.preemptoff = "GC worker init"
    11. park := new(parkInfo)
    12. gp.m.preemptoff = ""
    13. park.m.set(acquirem())
    14. park.attach.set(_p_)
    15. // Inform gcBgMarkStartWorkers that this worker is ready.
    16. // After this point, the background mark worker is scheduled
    17. // cooperatively by gcController.findRunnable. Hence, it must
    18. // never be preempted, as this would put it into _Grunnable
    19. // and put it on a run queue. Instead, when the preempt flag
    20. // is set, this puts itself into _Gwaiting to be woken up by
    21. // gcController.findRunnable at the appropriate time.
    22. notewakeup(&work.bgMarkReady)
    23. for {
    24. // Go to sleep until woken by gcController.findRunnable.
    25. // We can't releasem yet since even the call to gopark
    26. // may be preempted.
    27. gopark(func(g *g, parkp unsafe.Pointer) bool {
    28. park := (*parkInfo)(parkp)
    29. // The worker G is no longer running, so it's
    30. // now safe to allow preemption.
    31. releasem(park.m.ptr())
    32. // If the worker isn't attached to its P,
    33. // attach now. During initialization and after
    34. // a phase change, the worker may have been
    35. // running on a different P. As soon as we
    36. // attach, the owner P may schedule the
    37. // worker, so this must be done after the G is
    38. // stopped.
    39. if park.attach != 0 {
    40. p := park.attach.ptr()
    41. park.attach.set(nil)
    42. // cas the worker because we may be
    43. // racing with a new worker starting
    44. // on this P.
    45. if !p.gcBgMarkWorker.cas(0, guintptr(unsafe.Pointer(g))) {
    46. // The P got a new worker.
    47. // Exit this worker.
    48. return false
    49. }
    50. }
    51. return true
    52. }, unsafe.Pointer(park), waitReasonGCWorkerIdle, traceEvGoBlock, 0)
    53. // Loop until the P dies and disassociates this
    54. // worker (the P may later be reused, in which case
    55. // it will get a new worker) or we failed to associate.
    56. if _p_.gcBgMarkWorker.ptr() != gp {
    57. break
    58. }
    59. // Disable preemption so we can use the gcw. If the
    60. // scheduler wants to preempt us, we'll stop draining,
    61. // dispose the gcw, and then preempt.
    62. park.m.set(acquirem())
    63. (...)
    64. startTime := nanotime()
    65. _p_.gcMarkWorkerStartTime = startTime
    66. decnwait := atomic.Xadd(&work.nwait, -1)
    67. (...)
    68. systemstack(func() {
    69. // Mark our goroutine preemptible so its stack
    70. // can be scanned. This lets two mark workers
    71. // scan each other (otherwise, they would
    72. // deadlock). We must not modify anything on
    73. // the G stack. However, stack shrinking is
    74. // disabled for mark workers, so it is safe to
    75. // read from the G stack.
    76. casgstatus(gp, _Grunning, _Gwaiting)
    77. switch _p_.gcMarkWorkerMode {
    78. (...)
    79. case gcMarkWorkerDedicatedMode:
    80. gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
    81. if gp.preempt {
    82. // We were preempted. This is
    83. // a useful signal to kick
    84. // everything out of the run
    85. // queue so it can run
    86. // somewhere else.
    87. lock(&sched.lock)
    88. for {
    89. gp, _ := runqget(_p_)
    90. if gp == nil {
    91. break
    92. }
    93. globrunqput(gp)
    94. }
    95. unlock(&sched.lock)
    96. }
    97. // Go back to draining, this time
    98. // without preemption.
    99. gcDrain(&_p_.gcw, gcDrainFlushBgCredit)
    100. case gcMarkWorkerFractionalMode:
    101. gcDrain(&_p_.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
    102. case gcMarkWorkerIdleMode:
    103. gcDrain(&_p_.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
    104. }
    105. casgstatus(gp, _Gwaiting, _Grunning)
    106. })
    107. // Account for time.
    108. duration := nanotime() - startTime
    109. switch _p_.gcMarkWorkerMode {
    110. case gcMarkWorkerDedicatedMode:
    111. atomic.Xaddint64(&gcController.dedicatedMarkTime, duration)
    112. atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1)
    113. case gcMarkWorkerFractionalMode:
    114. atomic.Xaddint64(&gcController.fractionalMarkTime, duration)
    115. atomic.Xaddint64(&_p_.gcFractionalMarkTime, duration)
    116. case gcMarkWorkerIdleMode:
    117. atomic.Xaddint64(&gcController.idleMarkTime, duration)
    118. }
    119. // Was this the last worker and did we run out
    120. // of work?
    121. incnwait := atomic.Xadd(&work.nwait, +1)
    122. if incnwait > work.nproc {
    123. (...)
    124. }
    125. // If this worker reached a background mark completion
    126. // point, signal the main GC goroutine.
    127. if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
    128. // Make this G preemptible and disassociate it
    129. // as the worker for this P so
    130. // findRunnableGCWorker doesn't try to
    131. // schedule it.
    132. _p_.gcBgMarkWorker.set(nil)
    133. releasem(park.m.ptr())
    134. gcMarkDone()
    135. // Disable preemption and prepare to reattach
    136. // to the P.
    137. //
    138. // We may be running on a different P at this
    139. // point, so we can't reattach until this G is
    140. // parked.
    141. park.m.set(acquirem())
    142. park.attach.set(_p_)
    143. }
    144. }
    145. }