@@ -1601,31 +1601,30 @@ static bool page_swapped(struct page *page)
1601
1601
return false;
1602
1602
}
1603
1603
1604
- static int page_trans_huge_map_swapcount (struct page * page , int * total_mapcount ,
1604
+ static int page_trans_huge_map_swapcount (struct page * page ,
1605
1605
int * total_swapcount )
1606
1606
{
1607
- int i , map_swapcount , _total_mapcount , _total_swapcount ;
1607
+ int i , map_swapcount , _total_swapcount ;
1608
1608
unsigned long offset = 0 ;
1609
1609
struct swap_info_struct * si ;
1610
1610
struct swap_cluster_info * ci = NULL ;
1611
1611
unsigned char * map = NULL ;
1612
- int mapcount , swapcount = 0 ;
1612
+ int swapcount = 0 ;
1613
1613
1614
1614
/* hugetlbfs shouldn't call it */
1615
1615
VM_BUG_ON_PAGE (PageHuge (page ), page );
1616
1616
1617
1617
if (!IS_ENABLED (CONFIG_THP_SWAP ) || likely (!PageTransCompound (page ))) {
1618
- mapcount = page_trans_huge_mapcount (page , total_mapcount );
1619
1618
if (PageSwapCache (page ))
1620
1619
swapcount = page_swapcount (page );
1621
1620
if (total_swapcount )
1622
1621
* total_swapcount = swapcount ;
1623
- return mapcount + swapcount ;
1622
+ return swapcount + page_trans_huge_mapcount ( page , NULL ) ;
1624
1623
}
1625
1624
1626
1625
page = compound_head (page );
1627
1626
1628
- _total_mapcount = _total_swapcount = map_swapcount = 0 ;
1627
+ _total_swapcount = map_swapcount = 0 ;
1629
1628
if (PageSwapCache (page )) {
1630
1629
swp_entry_t entry ;
1631
1630
@@ -1639,28 +1638,22 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1639
1638
if (map )
1640
1639
ci = lock_cluster (si , offset );
1641
1640
for (i = 0 ; i < HPAGE_PMD_NR ; i ++ ) {
1642
- mapcount = atomic_read (& page [i ]._mapcount ) + 1 ;
1643
- _total_mapcount += mapcount ;
1641
+ int mapcount = atomic_read (& page [i ]._mapcount ) + 1 ;
1644
1642
if (map ) {
1645
1643
swapcount = swap_count (map [offset + i ]);
1646
1644
_total_swapcount += swapcount ;
1647
1645
}
1648
1646
map_swapcount = max (map_swapcount , mapcount + swapcount );
1649
1647
}
1650
1648
unlock_cluster (ci );
1651
- if (PageDoubleMap (page )) {
1649
+
1650
+ if (PageDoubleMap (page ))
1652
1651
map_swapcount -= 1 ;
1653
- _total_mapcount -= HPAGE_PMD_NR ;
1654
- }
1655
- mapcount = compound_mapcount (page );
1656
- map_swapcount += mapcount ;
1657
- _total_mapcount += mapcount ;
1658
- if (total_mapcount )
1659
- * total_mapcount = _total_mapcount ;
1652
+
1660
1653
if (total_swapcount )
1661
1654
* total_swapcount = _total_swapcount ;
1662
1655
1663
- return map_swapcount ;
1656
+ return map_swapcount + compound_mapcount ( page ) ;
1664
1657
}
1665
1658
1666
1659
/*
@@ -1671,13 +1664,12 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1671
1664
*/
1672
1665
bool reuse_swap_page (struct page * page )
1673
1666
{
1674
- int count , total_mapcount , total_swapcount ;
1667
+ int count , total_swapcount ;
1675
1668
1676
1669
VM_BUG_ON_PAGE (!PageLocked (page ), page );
1677
1670
if (unlikely (PageKsm (page )))
1678
1671
return false;
1679
- count = page_trans_huge_map_swapcount (page , & total_mapcount ,
1680
- & total_swapcount );
1672
+ count = page_trans_huge_map_swapcount (page , & total_swapcount );
1681
1673
if (count == 1 && PageSwapCache (page ) &&
1682
1674
(likely (!PageTransCompound (page )) ||
1683
1675
/* The remaining swap count will be freed soon */
0 commit comments