Skip to content

Commit 7749058

Browse files
Muchun Songtorvalds
Muchun Song
authored andcommitted
mm: hugetlb: introduce nr_free_vmemmap_pages in the struct hstate
All the infrastructure is ready, so we introduce nr_free_vmemmap_pages field in the hstate to indicate how many vmemmap pages associated with a HugeTLB page that can be freed to buddy allocator. And initialize it in the hugetlb_vmemmap_init(). This patch is actual enablement of the feature. There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP, so add a BUILD_BUG_ON to catch invalid usage of the tail struct page. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Muchun Song <[email protected]> Acked-by: Mike Kravetz <[email protected]> Reviewed-by: Oscar Salvador <[email protected]> Reviewed-by: Miaohe Lin <[email protected]> Tested-by: Chen Huang <[email protected]> Tested-by: Bodeddula Balasubramaniam <[email protected]> Cc: Alexander Viro <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Balbir Singh <[email protected]> Cc: Barry Song <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: David Rientjes <[email protected]> Cc: HORIGUCHI NAOYA <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Joao Martins <[email protected]> Cc: Joerg Roedel <[email protected]> Cc: Jonathan Corbet <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Mina Almasry <[email protected]> Cc: Oliver Neukum <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Pawan Gupta <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Randy Dunlap <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Xiongchun Duan <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4bab496 commit 7749058

File tree

4 files changed

+43
-4
lines changed

4 files changed

+43
-4
lines changed

include/linux/hugetlb.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -608,6 +608,9 @@ struct hstate {
608608
unsigned int nr_huge_pages_node[MAX_NUMNODES];
609609
unsigned int free_huge_pages_node[MAX_NUMNODES];
610610
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
611+
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
612+
unsigned int nr_free_vmemmap_pages;
613+
#endif
611614
#ifdef CONFIG_CGROUP_HUGETLB
612615
/* cgroup control files */
613616
struct cftype cgroup_files_dfl[7];

mm/hugetlb.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3585,6 +3585,7 @@ void __init hugetlb_add_hstate(unsigned int order)
35853585
h->next_nid_to_free = first_memory_node;
35863586
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
35873587
huge_page_size(h)/1024);
3588+
hugetlb_vmemmap_init(h);
35883589

35893590
parsed_hstate = h;
35903591
}

mm/hugetlb_vmemmap.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -262,3 +262,36 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head)
262262

263263
SetHPageVmemmapOptimized(head);
264264
}
265+
266+
void __init hugetlb_vmemmap_init(struct hstate *h)
267+
{
268+
unsigned int nr_pages = pages_per_huge_page(h);
269+
unsigned int vmemmap_pages;
270+
271+
/*
272+
* There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
273+
* page structs that can be used when CONFIG_HUGETLB_PAGE_FREE_VMEMMAP,
274+
* so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
275+
*/
276+
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
277+
RESERVE_VMEMMAP_SIZE / sizeof(struct page));
278+
279+
if (!hugetlb_free_vmemmap_enabled)
280+
return;
281+
282+
vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
283+
/*
284+
* The head page and the first tail page are not to be freed to buddy
285+
* allocator, the other pages will map to the first tail page, so they
286+
* can be freed.
287+
*
288+
* Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
289+
* on some architectures (e.g. aarch64). See Documentation/arm64/
290+
* hugetlbpage.rst for more details.
291+
*/
292+
if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
293+
h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
294+
295+
pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
296+
h->name);
297+
}

mm/hugetlb_vmemmap.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,15 @@
1313
#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1414
int alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
1515
void free_huge_page_vmemmap(struct hstate *h, struct page *head);
16+
void hugetlb_vmemmap_init(struct hstate *h);
1617

1718
/*
1819
* How many vmemmap pages associated with a HugeTLB page that can be freed
1920
* to the buddy allocator.
20-
*
21-
* Todo: Returns zero for now, which means the feature is disabled. We will
22-
* enable it once all the infrastructure is there.
2321
*/
2422
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
2523
{
26-
return 0;
24+
return h->nr_free_vmemmap_pages;
2725
}
2826
#else
2927
static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
@@ -35,6 +33,10 @@ static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
3533
{
3634
}
3735

36+
static inline void hugetlb_vmemmap_init(struct hstate *h)
37+
{
38+
}
39+
3840
static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
3941
{
4042
return 0;

0 commit comments

Comments
 (0)