1、struct pglist_data(伙伴系统顶级结构体)

  1. typedef struct pglist_data {
  2. // 64位系统支持DMA/DMA_32/NORMAL
  3. struct zone node_zones[MAX_NR_ZONES];
  4. // MAX_ZONELISTS为2
  5. // [0] -> Zonelist with fallback
  6. // [1] -> No fallback (GFP_THISNODE)
  7. struct zonelist node_zonelists[MAX_ZONELISTS];
  8. // zone的个数
  9. int nr_zones;
  10. // 按照最初的版本,page存放在一个大数组里面
  11. struct page *node_mem_map;
  12. struct page_cgroup *node_page_cgroup;
  13. // bootmem内存分配器
  14. struct bootmem_data *bdata;
  15. // 支持热插拔的锁
  16. spinlock_t node_size_lock;
  17. // 该节点起始页框号
  18. unsigned long node_start_pfn;
  19. // 物理页面的数量
  20. unsigned long node_present_pages;
  21. // 物理页面的大小,包括空洞
  22. unsigned long node_spanned_pages;
  23. // 节点ID
  24. int node_id;
  25. // kswapd进程唤醒队列
  26. wait_queue_head_t kswapd_wait;
  27. // kswapd进程task_struct
  28. struct task_struct *kswapd;
  29. // 需要回收的页面的最大大小
  30. int kswapd_max_order;
  31. } pg_data_t;
  32. /*
  33. * One allocation request operates on a zonelist. A zonelist
  34. * is a list of zones, the first one is the 'goal' of the
  35. * allocation, the other zones are fallback zones, in decreasing
  36. * priority.
  37. *
  38. * If zlcache_ptr is not NULL, then it is just the address of zlcache,
  39. * as explained above. If zlcache_ptr is NULL, there is no zlcache.
  40. * *
  41. * To speed the reading of the zonelist, the zonerefs contain the zone index
  42. * of the entry being read. Helper functions to access information given
  43. * a struct zoneref are
  44. *
  45. * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
  46. * zonelist_zone_idx() - Return the index of the zone for an entry
  47. * zonelist_node_idx() - Return the index of the node for an entry
  48. */
  49. struct zonelist {
  50. struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
  51. struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
  52. struct zonelist_cache zlcache; // optional ...
  53. };
  54. /*
  55. * This struct contains information about a zone in a zonelist. It is stored
  56. * here to avoid dereferences into large structures and lookups of tables
  57. */
  58. struct zoneref {
  59. struct zone *zone; /* Pointer to actual zone */
  60. int zone_idx; /* zone_idx(zoneref->zone) */
  61. };
  62. /*
  63. * We cache key information from each zonelist for smaller cache
  64. * footprint when scanning for free pages in get_page_from_freelist().
  65. *
  66. * 1) The BITMAP fullzones tracks which zones in a zonelist have come
  67. * up short of free memory since the last time (last_fullzone_zap)
  68. * we zero'd fullzones.
  69. * 2) The array z_to_n[] maps each zone in the zonelist to its node
  70. * id, so that we can efficiently evaluate whether that node is
  71. * set in the current tasks mems_allowed.
  72. *
  73. * Both fullzones and z_to_n[] are one-to-one with the zonelist,
  74. * indexed by a zones offset in the zonelist zones[] array.
  75. *
  76. * The get_page_from_freelist() routine does two scans. During the
  77. * first scan, we skip zones whose corresponding bit in 'fullzones'
  78. * is set or whose corresponding node in current->mems_allowed (which
  79. * comes from cpusets) is not set. During the second scan, we bypass
  80. * this zonelist_cache, to ensure we look methodically at each zone.
  81. *
  82. * Once per second, we zero out (zap) fullzones, forcing us to
  83. * reconsider nodes that might have regained more free memory.
  84. * The field last_full_zap is the time we last zapped fullzones.
  85. *
  86. * This mechanism reduces the amount of time we waste repeatedly
  87. * reexaming zones for free memory when they just came up low on
  88. * memory momentarilly ago.
  89. *
  90. * The zonelist_cache struct members logically belong in struct
  91. * zonelist. However, the mempolicy zonelists constructed for
  92. * MPOL_BIND are intentionally variable length (and usually much
  93. * shorter). A general purpose mechanism for handling structs with
  94. * multiple variable length members is more mechanism than we want
  95. * here. We resort to some special case hackery instead.
  96. *
  97. * The MPOL_BIND zonelists don't need this zonelist_cache (in good
  98. * part because they are shorter), so we put the fixed length stuff
  99. * at the front of the zonelist struct, ending in a variable length
  100. * zones[], as is needed by MPOL_BIND.
  101. *
  102. * Then we put the optional zonelist cache on the end of the zonelist
  103. * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
  104. * the fixed length portion at the front of the struct. This pointer
  105. * both enables us to find the zonelist cache, and in the case of
  106. * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
  107. * to know that the zonelist cache is not there.
  108. *
  109. * The end result is that struct zonelists come in two flavors:
  110. * 1) The full, fixed length version, shown below, and
  111. * 2) The custom zonelists for MPOL_BIND.
  112. * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
  113. *
  114. * Even though there may be multiple CPU cores on a node modifying
  115. * fullzones or last_full_zap in the same zonelist_cache at the same
  116. * time, we don't lock it. This is just hint data - if it is wrong now
  117. * and then, the allocator will still function, perhaps a bit slower.
  118. */
  119. struct zonelist_cache {
  120. unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
  121. DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
  122. unsigned long last_full_zap; /* when last zap'd (jiffies) */
  123. };

2、struct zone_struct(维护每个zone的内存)

  1. // 每个节点的内存分为3部分:
  2. // ZONE_DMA < 16 MB
  3. // ZONE_NORMAL 16-896 MB,直接按照PAGE_OFFSET进行直接映射
  4. // ZONE_HIGHMEM > 896 MB only page cache and user processes
  5. // ZONE_HIGHMEM也分为很多段,此处不细讲
  6. #define ZONE_DMA 0
  7. #define ZONE_NORMAL 1
  8. #define ZONE_HIGHMEM 2
  9. #define MAX_NR_ZONES 3
  10. #define ZONES_SHIFT 2
  11. #define GFP_ZONEMASK 0x03
  12. struct zone {
  13. spinlock_t lock;
  14. // 有多少空闲页面
  15. unsigned long free_pages;
  16. // 管理区中保留页的数目
  17. unsigned long pages_min;
  18. // 回收页框使用的下界,同时也被管理区分配器作为阀值使用,一般这个数字是pages_min的5/4
  19. unsigned long pages_low;
  20. // 回收页框使用的上界,同时也被管理区分配器作为阀值使用,一般这个数字是pages_min的3/2
  21. unsigned long pages_high;
  22. ZONE_PADDING(_pad1_)
  23. // 页面LRU相关变量
  24. spinlock_t lru_lock;
  25. struct list_head active_list;
  26. struct list_head inactive_list;
  27. unsigned long nr_active;
  28. unsigned long nr_inactive;
  29. atomic_t refill_counter;
  30. int all_unreclaimable; /* All pages pinned */
  31. unsigned long pages_scanned; /* since last reclaim */
  32. ZONE_PADDING(_pad2_)
  33. /*
  34. * prev_priority holds the scanning priority for this zone. It is
  35. * defined as the scanning priority at which we achieved our reclaim
  36. * target at the previous try_to_free_pages() or balance_pgdat()
  37. * invokation.
  38. *
  39. * We use prev_priority as a measure of how much stress page reclaim is
  40. * under - it drives the swappiness decision: whether to unmap mapped
  41. * pages.
  42. *
  43. * temp_priority is used to remember the scanning priority at which
  44. * this zone was successfully refilled to free_pages == pages_high.
  45. *
  46. * Access to both these fields is quite racy even on uniprocessor. But
  47. * it is expected to average out OK.
  48. */
  49. int temp_priority;
  50. int prev_priority;
  51. // 按照不同order组成的多个链表
  52. struct free_area free_area[MAX_ORDER];
  53. /*
  54. * wait_table -- the array holding the hash table
  55. * wait_table_size -- the size of the hash table array
  56. * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
  57. *
  58. * The purpose of all these is to keep track of the people
  59. * waiting for a page to become available and make them
  60. * runnable again when possible. The trouble is that this
  61. * consumes a lot of space, especially when so few things
  62. * wait on pages at a given time. So instead of using
  63. * per-page waitqueues, we use a waitqueue hash table.
  64. *
  65. * The bucket discipline is to sleep on the same queue when
  66. * colliding and wake all in that wait queue when removing.
  67. * When something wakes, it must check to be sure its page is
  68. * truly available, a la thundering herd. The cost of a
  69. * collision is great, but given the expected load of the
  70. * table, they should be so rare as to be outweighed by the
  71. * benefits from the saved space.
  72. *
  73. * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  74. * primary users of these fields, and in mm/page_alloc.c
  75. * free_area_init_core() performs the initialization of them.
  76. */
  77. // 进程等待队列,这些进程在等待管理区中的某页
  78. wait_queue_head_t * wait_table;
  79. // 等待队列的大小
  80. unsigned long wait_table_size;
  81. unsigned long wait_table_bits;
  82. ZONE_PADDING(_pad3_)
  83. // 每个CPU的单框页面缓存
  84. // 非NUMA结构忽略
  85. struct per_cpu_pageset pageset[NR_CPUS];
  86. // 伙伴系统顶级结构体
  87. struct pglist_data *zone_pgdat;
  88. // 属于该zone的页面数组地址
  89. struct page *zone_mem_map;
  90. // 起始页框号
  91. // zone_start_pfn == zone_start_paddr >> PAGE_SHIFT
  92. unsigned long zone_start_pfn;
  93. // zone名称
  94. char *name;
  95. // zone的总大小,包含空洞
  96. unsigned long spanned_pages;
  97. // zone的总页数,不包括空洞
  98. unsigned long present_pages;
  99. } ____cacheline_maxaligned_in_smp;
  100. // zone的不同水位
  101. enum zone_watermarks {
  102. WMARK_MIN,
  103. WMARK_LOW,
  104. WMARK_HIGH,
  105. NR_WMARK
  106. };
  107. struct zone {
  108. unsigned long watermark[NR_WMARK];
  109. /*
  110. * We don't know if the memory that we're going to allocate will be freeable
  111. * or/and it will be released eventually, so to avoid totally wasting several
  112. * GB of ram we must reserve some of the lower zone memory (otherwise we risk
  113. * to run OOM on the lower zones despite there's tons of freeable ram
  114. * on the higher zones). This array is recalculated at runtime if the
  115. * sysctl_lowmem_reserve_ratio sysctl changes.
  116. */
  117. unsigned long lowmem_reserve[MAX_NR_ZONES];
  118. // node ID
  119. int node;
  120. /*
  121. * zone reclaim becomes active if more unmapped pages exist.
  122. */
  123. unsigned long min_unmapped_pages;
  124. unsigned long min_slab_pages;
  125. struct per_cpu_pageset __percpu *pageset;
  126. /*
  127. * free areas of different sizes
  128. */
  129. spinlock_t lock;
  130. int all_unreclaimable; /* All pages pinned */
  131. /* see spanned/present_pages for more description */
  132. seqlock_t span_seqlock;
  133. struct free_area free_area[MAX_ORDER];
  134. unsigned long *pageblock_flags;
  135. ZONE_PADDING(_pad1_)
  136. /* Fields commonly accessed by the page reclaim scanner */
  137. spinlock_t lru_lock;
  138. struct zone_lru {
  139. struct list_head list;
  140. } lru[NR_LRU_LISTS];
  141. struct zone_reclaim_stat reclaim_stat;
  142. unsigned long pages_scanned; /* since last reclaim */
  143. unsigned long flags; /* zone flags, see below */
  144. /* Zone statistics */
  145. atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
  146. /*
  147. * prev_priority holds the scanning priority for this zone. It is
  148. * defined as the scanning priority at which we achieved our reclaim
  149. * target at the previous try_to_free_pages() or balance_pgdat()
  150. * invocation.
  151. *
  152. * We use prev_priority as a measure of how much stress page reclaim is
  153. * under - it drives the swappiness decision: whether to unmap mapped
  154. * pages.
  155. *
  156. * Access to both this field is quite racy even on uniprocessor. But
  157. * it is expected to average out OK.
  158. */
  159. int prev_priority;
  160. /*
  161. * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
  162. * this zone's LRU. Maintained by the pageout code.
  163. */
  164. unsigned int inactive_ratio;
  165. ZONE_PADDING(_pad2_)
  166. /* Rarely used or read-mostly fields */
  167. /*
  168. * wait_table -- the array holding the hash table
  169. * wait_table_hash_nr_entries -- the size of the hash table array
  170. * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
  171. *
  172. * The purpose of all these is to keep track of the people
  173. * waiting for a page to become available and make them
  174. * runnable again when possible. The trouble is that this
  175. * consumes a lot of space, especially when so few things
  176. * wait on pages at a given time. So instead of using
  177. * per-page waitqueues, we use a waitqueue hash table.
  178. *
  179. * The bucket discipline is to sleep on the same queue when
  180. * colliding and wake all in that wait queue when removing.
  181. * When something wakes, it must check to be sure its page is
  182. * truly available, a la thundering herd. The cost of a
  183. * collision is great, but given the expected load of the
  184. * table, they should be so rare as to be outweighed by the
  185. * benefits from the saved space.
  186. *
  187. * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
  188. * primary users of these fields, and in mm/page_alloc.c
  189. * free_area_init_core() performs the initialization of them.
  190. */
  191. wait_queue_head_t * wait_table;
  192. unsigned long wait_table_hash_nr_entries;
  193. unsigned long wait_table_bits;
  194. /*
  195. * Discontig memory support fields.
  196. */
  197. struct pglist_data *zone_pgdat;
  198. /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
  199. unsigned long zone_start_pfn;
  200. /*
  201. * zone_start_pfn, spanned_pages and present_pages are all
  202. * protected by span_seqlock. It is a seqlock because it has
  203. * to be read outside of zone->lock, and it is done in the main
  204. * allocator path. But, it is written quite infrequently.
  205. *
  206. * The lock is declared along with zone->lock because it is
  207. * frequently read in proximity to zone->lock. It's good to
  208. * give them a chance of being in the same cacheline.
  209. */
  210. unsigned long spanned_pages; /* total size, including holes */
  211. unsigned long present_pages; /* amount of memory (excluding holes) */
  212. /*
  213. * rarely used fields:
  214. */
  215. const char *name;
  216. } ____cacheline_internodealigned_in_smp;

3、struct page(每个物理页面的描述)

  1. // 每一个物理页在OS里面会有一个struct page结构体
  2. // 此结构体维护了一个物理页的状态信息
  3. struct page {
  4. unsigned long flags; // Atomic flags, some possibly updated asynchronously
  5. atomic_t _count; // Usage count, see below
  6. union {
  7. atomic_t _mapcount; /* Count of ptes mapped in mms,
  8. * to show when page is mapped
  9. * & limit reverse map searches.
  10. */
  11. struct { /* SLUB */
  12. u16 inuse;
  13. u16 objects;
  14. };
  15. };
  16. union {
  17. struct {
  18. unsigned long private; /* Mapping-private opaque data:
  19. * usually used for buffer_heads
  20. * if PagePrivate set; used for
  21. * swp_entry_t if PageSwapCache;
  22. * indicates order in the buddy
  23. * system if PG_buddy is set.
  24. */
  25. struct address_space *mapping; /* If low bit clear, points to
  26. * inode address_space, or NULL.
  27. * If page mapped as anonymous
  28. * memory, low bit is set, and
  29. * it points to anon_vma object:
  30. * see PAGE_MAPPING_ANON below.
  31. */
  32. };
  33. #if USE_SPLIT_PTLOCKS
  34. spinlock_t ptl;
  35. #endif
  36. struct kmem_cache *slab; /* SLUB: Pointer to slab */
  37. struct page *first_page; /* Compound tail pages */
  38. };
  39. union {
  40. pgoff_t index; /* Our offset within mapping. */
  41. void *freelist; /* SLUB: freelist req. slab lock */
  42. };
  43. struct list_head lru; /* Pageout list, eg. active_list
  44. * protected by zone->lru_lock !
  45. */
  46. /*
  47. * On machines where all RAM is mapped into kernel address space,
  48. * we can simply calculate the virtual address. On machines with
  49. * highmem some memory is mapped into kernel virtual memory
  50. * dynamically, so we need a place to store that address.
  51. * Note that this field could be 16 bits on x86 ... ;)
  52. *
  53. * Architectures with slow multiplication can define
  54. * WANT_PAGE_VIRTUAL in asm/page.h
  55. */
  56. #if defined(WANT_PAGE_VIRTUAL)
  57. void *virtual; /* Kernel virtual address (NULL if
  58. not kmapped, ie. highmem) */
  59. #endif /* WANT_PAGE_VIRTUAL */
  60. #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
  61. unsigned long debug_flags; /* Use atomic bitops on this */
  62. #endif
  63. #ifdef CONFIG_KMEMCHECK
  64. /*
  65. * kmemcheck wants to track the status of each byte in a page; this
  66. * is a pointer to such a status block. NULL if not tracked.
  67. */
  68. void *shadow;
  69. #endif
  70. };

4、内存分配策略

4.1、分配策略(决定从哪个node分配)

  1. // 详情参见:http://blog.sina.com.cn/s/blog_6bd2fa790102znpt.html
  2. // 分别是 交叉内存分配、优先在某节点分配、指定节点分配、(本地内存分配?)
  3. // 1.交叉(interleave):在所有节点或者指定的节点上以RR(Round Robin 轮询调度)算法交织地请求分配内存;
  4. // 2.优先(preferred):在指定节点上分配,失败则在其他节点上分配。
  5. // 3.绑定(membind):强制分配到指定节点上;
  6. // 4.缺省(default, localalloc):总是在本地节点分配(分配在当前进程运行的节点上);
  7. todo : 本地节点分配当内存不足时会怎样?如果进程运行在多个节点上时,会如何分配?)
  8. enum {
  9. MPOL_DEFAULT,
  10. MPOL_PREFERRED,
  11. MPOL_BIND,
  12. MPOL_INTERLEAVE,
  13. MPOL_MAX,
  14. };
  15. /* Flags for set_mempolicy */
  16. #define MPOL_F_STATIC_NODES (1 << 15)
  17. #define MPOL_F_RELATIVE_NODES (1 << 14)
  18. /*
  19. * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
  20. * either set_mempolicy() or mbind().
  21. */
  22. #define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
  23. /* Flags for get_mempolicy */
  24. #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
  25. #define MPOL_F_ADDR (1<<1) /* look up vma using address */
  26. #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
  27. /* Flags for mbind */
  28. #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
  29. #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
  30. #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
  31. #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
  32. /*
  33. * Internal flags that share the struct mempolicy flags word with
  34. * "mode flags". These flags are allocated from bit 0 up, as they
  35. * are never OR'ed into the mode in mempolicy API arguments.
  36. */
  37. #define MPOL_F_SHARED (1 << 0) /* identify shared policies */
  38. #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */

4.2、分配掩码(决定从哪个zone分配内存)

  1. // 分配掩码
  2. // 默认从Normal分配内存
  3. // 详情参见:https://www.cnblogs.com/arnoldlu/p/8250734.html
  4. // https://blog.csdn.net/farmwang/article/details/66975128
  5. #define __GFP_DMA ((__force gfp_t)0x01u)
  6. #define __GFP_HIGHMEM ((__force gfp_t)0x02u)
  7. #define __GFP_DMA32 ((__force gfp_t)0x04u)
  8. #define __GFP_MOVABLE ((__force gfp_t)0x08u)
  9. #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)

4.3、分配属性(决定分配过程中的行为)

  1. // 操作类型掩码
  2. // 详情参见:https://www.cnblogs.com/arnoldlu/p/8250734.html
  3. #define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
  4. #define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
  5. #define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */
  6. #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */
  7. #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */
  8. #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
  9. #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */
  10. #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */
  11. #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */
  12. #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */
  13. #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */
  14. #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
  15. #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
  16. #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
  17. #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */

4.4、内存水位相关标志位

  1. // 内存分配水位相关标志
  2. #define ALLOC_WMARK_MIN WMARK_MIN
  3. #define ALLOC_WMARK_LOW WMARK_LOW
  4. #define ALLOC_WMARK_HIGH WMARK_HIGH
  5. #define ALLOC_NO_WATERMARKS 0x04
  6. #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
  7. // 详情参见:https://www.codeleading.com/article/59964929993
  8. // 对于ALLOC_HARDER来说它把watermark水位减去了1/4
  9. // 对于ALLOC_HIGH来说,它把保留水位值减少了1/2
  10. // 这两个标记按照不同程度来减少保留内存数量,从而达到了更容易成功申请内存的目的
  11. // 从这里也可以看出ALLOC_HIGH是比ALLOC_HARDER更加激进的内存申请方式。
  12. #define ALLOC_HARDER 0x10
  13. #define ALLOC_HIGH 0x20
  14. #define ALLOC_CPUSET 0x40