1、cache_alloc_refill
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags){ int batchcount; struct kmem_list3 *l3; struct array_cache *ac; int node;retry: check_irq_off(); node = numa_node_id(); ac = cpu_cache_get(cachep); // 初始化阶段batchcount为1 batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* * If there was little recent activity on this cache, then * perform only a partial refill. Otherwise we could generate * refill bouncing. */ batchcount = BATCHREFILL_LIMIT; } l3 = cachep->nodelists[node]; spin_lock(&l3->list_lock); /* See if we can refill from the shared array */ // 初始化阶段l3均为空,此处也不会执行 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) { l3->shared->touched = 1; goto alloc_done; } while (batchcount > 0) { struct list_head *entry; struct slab *slabp; entry = l3->slabs_partial.next; if (entry == &l3->slabs_partial) { l3->free_touched = 1; entry = l3->slabs_free.next; if (entry == &l3->slabs_free) goto must_grow; } // 初始化阶段,刚申请完page,slab均在free链表上面 slabp = list_entry(entry, struct slab, list); // 检查 check_slabp(cachep, slabp); check_spinlock_acquired(cachep); // 必须保证有一个可用对象 BUG_ON(slabp->inuse >= cachep->num); while (slabp->inuse < cachep->num && batchcount--) { // 填充一个obj到ac ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, node); } check_slabp(cachep, slabp); // 移动链表 list_del(&slabp->list); if (slabp->free == BUFCTL_END) list_add(&slabp->list, &l3->slabs_full); else list_add(&slabp->list, &l3->slabs_partial); }must_grow: // obj转移到ac,则相应的free_objects要减少 l3->free_objects -= ac->avail;alloc_done: spin_unlock(&l3->list_lock); if (unlikely(!ac->avail)) { int x; x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); ac = cpu_cache_get(cachep); if (!x && ac->avail == 0) return NULL; if (!ac->avail) goto retry; } ac->touched = 1; return ac->entry[--ac->avail];}
2、cache_grow
static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *objp){ struct slab *slabp; size_t offset; gfp_t local_flags; struct kmem_list3 *l3; /* * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ BUG_ON(flags & GFP_SLAB_BUG_MASK); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); l3 = cachep->nodelists[nodeid]; spin_lock(&l3->list_lock); /* Get colour for the slab, and cal the next value. */ offset = l3->colour_next; l3->colour_next++; if (l3->colour_next >= cachep->colour) l3->colour_next = 0; spin_unlock(&l3->list_lock); offset *= cachep->colour_off; if (local_flags & __GFP_WAIT) local_irq_enable(); /* * The test for missing atomic flag is performed here, rather than * the more obvious place, simply to reduce the critical path length * in kmem_cache_alloc(). If a caller is seriously mis-behaving they * will eventually be caught here (where it matters). */ kmem_flagcheck(cachep, flags); if (!objp) objp = kmem_getpages(cachep, local_flags, nodeid); if (!objp) goto failed; slabp = alloc_slabmgmt(cachep, objp, offset, local_flags & ~GFP_CONSTRAINT_MASK, nodeid); if (!slabp) goto opps1; slab_map_pages(cachep, slabp, objp); cache_init_objs(cachep, slabp); if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); spin_lock(&l3->list_lock); list_add_tail(&slabp->list, &(l3->slabs_free)); l3->free_objects += cachep->num; spin_unlock(&l3->list_lock); return 1;opps1: kmem_freepages(cachep, objp);failed: if (local_flags & __GFP_WAIT) local_irq_disable(); return 0;}