Skip to content

Commit 020e876

Browse files
Matthew Wilcox (Oracle)torvalds
Matthew Wilcox (Oracle)
authored andcommitted
mm: remove last argument of reuse_swap_page()
None of the callers care about the total_map_swapcount() any more. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Acked-by: Linus Torvalds <[email protected]> Reviewed-by: William Kucharski <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent d283d42 commit 020e876

File tree

5 files changed

+7
-13
lines changed

5 files changed

+7
-13
lines changed

include/linux/swap.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,7 @@ extern int __swp_swapcount(swp_entry_t entry);
514514
extern int swp_swapcount(swp_entry_t entry);
515515
extern struct swap_info_struct *page_swap_info(struct page *);
516516
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
517-
extern bool reuse_swap_page(struct page *, int *);
517+
extern bool reuse_swap_page(struct page *);
518518
extern int try_to_free_swap(struct page *);
519519
struct backing_dev_info;
520520
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
@@ -680,8 +680,8 @@ static inline int swp_swapcount(swp_entry_t entry)
680680
return 0;
681681
}
682682

683-
#define reuse_swap_page(page, total_map_swapcount) \
684-
(page_trans_huge_mapcount(page, total_map_swapcount) == 1)
683+
#define reuse_swap_page(page) \
684+
(page_trans_huge_mapcount(page, NULL) == 1)
685685

686686
static inline int try_to_free_swap(struct page *page)
687687
{

mm/huge_memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1322,7 +1322,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
13221322
* We can only reuse the page if nobody else maps the huge page or it's
13231323
* part.
13241324
*/
1325-
if (reuse_swap_page(page, NULL)) {
1325+
if (reuse_swap_page(page)) {
13261326
pmd_t entry;
13271327
entry = pmd_mkyoung(orig_pmd);
13281328
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);

mm/khugepaged.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -681,7 +681,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
681681
goto out;
682682
}
683683
if (!pte_write(pteval) && PageSwapCache(page) &&
684-
!reuse_swap_page(page, NULL)) {
684+
!reuse_swap_page(page)) {
685685
/*
686686
* Page is in the swap cache and cannot be re-used.
687687
* It cannot be collapsed into a THP.

mm/memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3627,7 +3627,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
36273627
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
36283628
dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
36293629
pte = mk_pte(page, vma->vm_page_prot);
3630-
if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3630+
if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
36313631
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
36323632
vmf->flags &= ~FAULT_FLAG_WRITE;
36333633
ret |= VM_FAULT_WRITE;

mm/swapfile.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1668,12 +1668,8 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
16681668
* to it. And as a side-effect, free up its swap: because the old content
16691669
* on disk will never be read, and seeking back there to write new content
16701670
* later would only waste time away from clustering.
1671-
*
1672-
* NOTE: total_map_swapcount should not be relied upon by the caller if
1673-
* reuse_swap_page() returns false, but it may be always overwritten
1674-
* (see the other implementation for CONFIG_SWAP=n).
16751671
*/
1676-
bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1672+
bool reuse_swap_page(struct page *page)
16771673
{
16781674
int count, total_mapcount, total_swapcount;
16791675

@@ -1682,8 +1678,6 @@ bool reuse_swap_page(struct page *page, int *total_map_swapcount)
16821678
return false;
16831679
count = page_trans_huge_map_swapcount(page, &total_mapcount,
16841680
&total_swapcount);
1685-
if (total_map_swapcount)
1686-
*total_map_swapcount = total_mapcount + total_swapcount;
16871681
if (count == 1 && PageSwapCache(page) &&
16881682
(likely(!PageTransCompound(page)) ||
16891683
/* The remaining swap count will be freed soon */

0 commit comments

Comments
 (0)