1、cache_alloc_refill

  1. static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
  2. {
  3. int batchcount;
  4. struct kmem_list3 *l3;
  5. struct array_cache *ac;
  6. int node;
  7. retry:
  8. check_irq_off();
  9. node = numa_node_id();
  10. ac = cpu_cache_get(cachep);
  11. // 初始化阶段batchcount为1
  12. batchcount = ac->batchcount;
  13. if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
  14. /*
  15. * If there was little recent activity on this cache, then
  16. * perform only a partial refill. Otherwise we could generate
  17. * refill bouncing.
  18. */
  19. batchcount = BATCHREFILL_LIMIT;
  20. }
  21. l3 = cachep->nodelists[node];
  22. spin_lock(&l3->list_lock);
  23. /* See if we can refill from the shared array */
  24. // 初始化阶段l3均为空,此处也不会执行
  25. if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
  26. l3->shared->touched = 1;
  27. goto alloc_done;
  28. }
  29. while (batchcount > 0) {
  30. struct list_head *entry;
  31. struct slab *slabp;
  32. entry = l3->slabs_partial.next;
  33. if (entry == &l3->slabs_partial) {
  34. l3->free_touched = 1;
  35. entry = l3->slabs_free.next;
  36. if (entry == &l3->slabs_free)
  37. goto must_grow;
  38. }
  39. // 初始化阶段,刚申请完page,slab均在free链表上面
  40. slabp = list_entry(entry, struct slab, list);
  41. // 检查
  42. check_slabp(cachep, slabp);
  43. check_spinlock_acquired(cachep);
  44. // 必须保证有一个可用对象
  45. BUG_ON(slabp->inuse >= cachep->num);
  46. while (slabp->inuse < cachep->num && batchcount--) {
  47. // 填充一个obj到ac
  48. ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
  49. node);
  50. }
  51. check_slabp(cachep, slabp);
  52. // 移动链表
  53. list_del(&slabp->list);
  54. if (slabp->free == BUFCTL_END)
  55. list_add(&slabp->list, &l3->slabs_full);
  56. else
  57. list_add(&slabp->list, &l3->slabs_partial);
  58. }
  59. must_grow:
  60. // obj转移到ac,则相应的free_objects要减少
  61. l3->free_objects -= ac->avail;
  62. alloc_done:
  63. spin_unlock(&l3->list_lock);
  64. if (unlikely(!ac->avail)) {
  65. int x;
  66. x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
  67. ac = cpu_cache_get(cachep);
  68. if (!x && ac->avail == 0)
  69. return NULL;
  70. if (!ac->avail)
  71. goto retry;
  72. }
  73. ac->touched = 1;
  74. return ac->entry[--ac->avail];
  75. }

2、cache_grow

  1. static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *objp)
  2. {
  3. struct slab *slabp;
  4. size_t offset;
  5. gfp_t local_flags;
  6. struct kmem_list3 *l3;
  7. /*
  8. * Be lazy and only check for valid flags here, keeping it out of the
  9. * critical path in kmem_cache_alloc().
  10. */
  11. BUG_ON(flags & GFP_SLAB_BUG_MASK);
  12. local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
  13. /* Take the l3 list lock to change the colour_next on this node */
  14. check_irq_off();
  15. l3 = cachep->nodelists[nodeid];
  16. spin_lock(&l3->list_lock);
  17. /* Get colour for the slab, and cal the next value. */
  18. offset = l3->colour_next;
  19. l3->colour_next++;
  20. if (l3->colour_next >= cachep->colour)
  21. l3->colour_next = 0;
  22. spin_unlock(&l3->list_lock);
  23. offset *= cachep->colour_off;
  24. if (local_flags & __GFP_WAIT)
  25. local_irq_enable();
  26. /*
  27. * The test for missing atomic flag is performed here, rather than
  28. * the more obvious place, simply to reduce the critical path length
  29. * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
  30. * will eventually be caught here (where it matters).
  31. */
  32. kmem_flagcheck(cachep, flags);
  33. if (!objp)
  34. objp = kmem_getpages(cachep, local_flags, nodeid);
  35. if (!objp)
  36. goto failed;
  37. slabp = alloc_slabmgmt(cachep, objp, offset,
  38. local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
  39. if (!slabp)
  40. goto opps1;
  41. slab_map_pages(cachep, slabp, objp);
  42. cache_init_objs(cachep, slabp);
  43. if (local_flags & __GFP_WAIT)
  44. local_irq_disable();
  45. check_irq_off();
  46. spin_lock(&l3->list_lock);
  47. list_add_tail(&slabp->list, &(l3->slabs_free));
  48. l3->free_objects += cachep->num;
  49. spin_unlock(&l3->list_lock);
  50. return 1;
  51. opps1:
  52. kmem_freepages(cachep, objp);
  53. failed:
  54. if (local_flags & __GFP_WAIT)
  55. local_irq_disable();
  56. return 0;
  57. }