@@ -415,7 +415,7 @@ CacheAllocator<CacheTrait>::allocateInternalTier(TierId tid,
415
415
}
416
416
417
417
template <typename CacheTrait>
418
- typename CacheAllocator<CacheTrait>::ItemHandle
418
+ typename CacheAllocator<CacheTrait>::WriteHandle
419
419
CacheAllocator<CacheTrait>::allocateInternal(PoolId pid,
420
420
typename Item::Key key,
421
421
uint32_t size,
@@ -1186,14 +1186,13 @@ bool CacheAllocator<CacheTrait>::addWaitContextForMovingItem(
1186
1186
}
1187
1187
1188
1188
template <typename CacheTrait>
1189
- template <typename ItemPtr>
1190
1189
typename CacheAllocator<CacheTrait>::ItemHandle
1191
1190
CacheAllocator<CacheTrait>::moveRegularItemOnEviction(
1192
- ItemPtr& oldItemPtr, ItemHandle& newItemHdl) {
1191
+ Item& oldItem, ItemHandle& newItemHdl) {
1192
+ XDCHECK (oldItem.isMoving ());
1193
1193
// TODO: should we introduce new latency tracker. E.g. evictRegularLatency_
1194
1194
// ??? util::LatencyTracker tracker{stats_.evictRegularLatency_};
1195
1195
1196
- Item& oldItem = *oldItemPtr;
1197
1196
if (!oldItem.isAccessible () || oldItem.isExpired ()) {
1198
1197
return {};
1199
1198
}
@@ -1249,7 +1248,7 @@ CacheAllocator<CacheTrait>::moveRegularItemOnEviction(
1249
1248
// it is unsafe to replace the old item with a new one, so we should
1250
1249
// also abort.
1251
1250
if (!accessContainer_->replaceIf (oldItem, *newItemHdl,
1252
- itemEvictionPredicate )) {
1251
+ itemMovingPredicate )) {
1253
1252
return {};
1254
1253
}
1255
1254
@@ -1269,7 +1268,7 @@ CacheAllocator<CacheTrait>::moveRegularItemOnEviction(
1269
1268
1270
1269
// Inside the MM container's lock, this checks if the old item exists to
1271
1270
// make sure that no other thread removed it, and only then replaces it.
1272
- if (!replaceInMMContainer (oldItemPtr , *newItemHdl)) {
1271
+ if (!replaceInMMContainer (oldItem , *newItemHdl)) {
1273
1272
accessContainer_->remove (*newItemHdl);
1274
1273
return {};
1275
1274
}
@@ -1453,42 +1452,64 @@ CacheAllocator<CacheTrait>::findEviction(TierId tid, PoolId pid, ClassId cid) {
1453
1452
// Keep searching for a candidate until we were able to evict it
1454
1453
// or until the search limit has been exhausted
1455
1454
unsigned int searchTries = 0 ;
1456
- auto itr = mmContainer.getEvictionIterator ();
1457
1455
while ((config_.evictionSearchTries == 0 ||
1458
- config_.evictionSearchTries > searchTries) &&
1459
- itr) {
1456
+ config_.evictionSearchTries > searchTries)) {
1460
1457
++searchTries;
1461
1458
1462
- Item* candidate = itr.get ();
1459
+ Item* toRecycle = nullptr ;
1460
+ Item* candidate = nullptr ;
1461
+
1462
+ mmContainer.withEvictionIterator ([this , &candidate, &toRecycle, &searchTries](auto &&itr){
1463
+ while ((config_.evictionSearchTries == 0 ||
1464
+ config_.evictionSearchTries > searchTries) && itr) {
1465
+ ++searchTries;
1466
+
1467
+ auto *toRecycle_ = itr.get ();
1468
+ auto *candidate_ = toRecycle_->isChainedItem ()
1469
+ ? &toRecycle_->asChainedItem ().getParentItem (compressor_)
1470
+ : toRecycle_;
1471
+
1472
+ // make sure no other thead is evicting the item
1473
+ if (candidate_->getRefCount () == 0 && candidate_->markMoving ()) {
1474
+ toRecycle = toRecycle_;
1475
+ candidate = candidate_;
1476
+ return ;
1477
+ }
1478
+
1479
+ ++itr;
1480
+ }
1481
+ });
1482
+
1483
+ if (!toRecycle)
1484
+ continue ;
1485
+
1486
+ XDCHECK (toRecycle);
1487
+ XDCHECK (candidate);
1488
+
1463
1489
// for chained items, the ownership of the parent can change. We try to
1464
1490
// evict what we think as parent and see if the eviction of parent
1465
1491
// recycles the child we intend to.
1466
-
1467
- ItemHandle toReleaseHandle = tryEvictToNextMemoryTier (tid, pid, itr);
1468
- bool movedToNextTier = false ;
1469
- if (toReleaseHandle) {
1470
- movedToNextTier = true ;
1471
- } else {
1472
- toReleaseHandle =
1473
- itr->isChainedItem ()
1474
- ? advanceIteratorAndTryEvictChainedItem (tid, pid, itr)
1475
- : advanceIteratorAndTryEvictRegularItem (tid, pid, mmContainer, itr);
1476
- }
1492
+ auto toReleaseHandle =
1493
+ evictNormalItem (*candidate, true /* skipIfTokenInvalid */ );
1494
+ auto ref = candidate->unmarkMoving ();
1477
1495
1478
- if (toReleaseHandle) {
1479
- if (toReleaseHandle ->hasChainedItem ()) {
1496
+ if (toReleaseHandle || ref == 0u ) {
1497
+ if (candidate ->hasChainedItem ()) {
1480
1498
(*stats_.chainedItemEvictions )[pid][cid].inc ();
1481
1499
} else {
1482
1500
(*stats_.regularItemEvictions )[pid][cid].inc ();
1483
1501
}
1502
+ } else {
1503
+ if (candidate->hasChainedItem ()) {
1504
+ stats_.evictFailParentAC .inc ();
1505
+ } else {
1506
+ stats_.evictFailAC .inc ();
1507
+ }
1508
+ }
1484
1509
1485
- // Invalidate iterator since later on we may use this mmContainer
1486
- // again, which cannot be done unless we drop this iterator
1487
- itr.destroy ();
1488
-
1489
- // we must be the last handle and for chained items, this will be
1490
- // the parent.
1491
- XDCHECK (toReleaseHandle.get () == candidate || candidate->isChainedItem ());
1510
+ if (toReleaseHandle) {
1511
+ XDCHECK (toReleaseHandle.get () == candidate);
1512
+ XDCHECK (toRecycle == candidate || toRecycle->isChainedItem ());
1492
1513
XDCHECK_EQ (1u , toReleaseHandle->getRefCount ());
1493
1514
1494
1515
// We manually release the item here because we don't want to
@@ -1504,15 +1525,18 @@ CacheAllocator<CacheTrait>::findEviction(TierId tid, PoolId pid, ClassId cid) {
1504
1525
// recycle the candidate.
1505
1526
if (ReleaseRes::kRecycled ==
1506
1527
releaseBackToAllocator (itemToRelease, RemoveContext::kEviction ,
1507
- /* isNascent */ movedToNextTier, candidate)) {
1508
- return candidate;
1528
+ /* isNascent */ false , toRecycle)) {
1529
+ return toRecycle;
1530
+ }
1531
+ } else if (ref == 0u ) {
1532
+ // it's safe to recycle the item here as there are no more
1533
+ // references and the item could not been marked as moving
1534
+ // by other thread since it's detached from MMContainer.
1535
+ if (ReleaseRes::kRecycled ==
1536
+ releaseBackToAllocator (*candidate, RemoveContext::kEviction ,
1537
+ /* isNascent */ false , toRecycle)) {
1538
+ return toRecycle;
1509
1539
}
1510
- }
1511
-
1512
- // If we destroyed the itr to possibly evict and failed, we restart
1513
- // from the beginning again
1514
- if (!itr) {
1515
- itr.resetToBegin ();
1516
1540
}
1517
1541
}
1518
1542
return nullptr ;
@@ -1567,24 +1591,23 @@ bool CacheAllocator<CacheTrait>::shouldWriteToNvmCacheExclusive(
1567
1591
}
1568
1592
1569
1593
template <typename CacheTrait>
1570
- template <typename ItemPtr>
1571
- typename CacheAllocator<CacheTrait>::ItemHandle
1594
+ typename CacheAllocator<CacheTrait>::WriteHandle
1572
1595
CacheAllocator<CacheTrait>::tryEvictToNextMemoryTier(
1573
- TierId tid, PoolId pid, ItemPtr & item) {
1574
- if (item-> isChainedItem ()) return {}; // TODO: We do not support ChainedItem yet
1575
- if (item-> isExpired ()) return acquire (item);
1596
+ TierId tid, PoolId pid, Item & item) {
1597
+ if (item. isChainedItem ()) return {}; // TODO: We do not support ChainedItem yet
1598
+ if (item. isExpired ()) return acquire (& item);
1576
1599
1577
1600
TierId nextTier = tid; // TODO - calculate this based on some admission policy
1578
1601
while (++nextTier < numTiers_) { // try to evict down to the next memory tiers
1579
1602
// allocateInternal might trigger another eviction
1580
1603
auto newItemHdl = allocateInternalTier (nextTier, pid,
1581
- item-> getKey (),
1582
- item-> getSize (),
1583
- item-> getCreationTime (),
1584
- item-> getExpiryTime ());
1604
+ item. getKey (),
1605
+ item. getSize (),
1606
+ item. getCreationTime (),
1607
+ item. getExpiryTime ());
1585
1608
1586
1609
if (newItemHdl) {
1587
- XDCHECK_EQ (newItemHdl->getSize (), item-> getSize ());
1610
+ XDCHECK_EQ (newItemHdl->getSize (), item. getSize ());
1588
1611
1589
1612
return moveRegularItemOnEviction (item, newItemHdl);
1590
1613
}
@@ -1594,149 +1617,11 @@ CacheAllocator<CacheTrait>::tryEvictToNextMemoryTier(
1594
1617
}
1595
1618
1596
1619
template <typename CacheTrait>
1597
- typename CacheAllocator<CacheTrait>::ItemHandle
1598
- CacheAllocator<CacheTrait>::tryEvictToNextMemoryTier(Item* item) {
1599
- auto tid = getTierId (*item);
1600
- auto pid = allocator_[tid]->getAllocInfo (item->getMemory ()).poolId ;
1601
- return tryEvictToNextMemoryTier (tid, pid, item);
1602
- }
1603
-
1604
- template <typename CacheTrait>
1605
- typename CacheAllocator<CacheTrait>::ItemHandle
1606
- CacheAllocator<CacheTrait>::advanceIteratorAndTryEvictRegularItem(
1607
- TierId tid, PoolId pid, MMContainer& mmContainer, EvictionIterator& itr) {
1608
- Item& item = *itr;
1609
-
1610
- const bool evictToNvmCache = shouldWriteToNvmCache (item);
1611
-
1612
- auto token = evictToNvmCache ? nvmCache_->createPutToken (item.getKey ())
1613
- : typename NvmCacheT::PutToken{};
1614
- // record the in-flight eviciton. If not, we move on to next item to avoid
1615
- // stalling eviction.
1616
- if (evictToNvmCache && !token.isValid ()) {
1617
- ++itr;
1618
- stats_.evictFailConcurrentFill .inc ();
1619
- return ItemHandle{};
1620
- }
1621
-
1622
- // If there are other accessors, we should abort. Acquire a handle here since
1623
- // if we remove the item from both access containers and mm containers
1624
- // below, we will need a handle to ensure proper cleanup in case we end up
1625
- // not evicting this item
1626
- auto evictHandle = accessContainer_->removeIf (item, &itemEvictionPredicate);
1627
-
1628
- if (!evictHandle) {
1629
- ++itr;
1630
- stats_.evictFailAC .inc ();
1631
- return evictHandle;
1632
- }
1633
-
1634
- mmContainer.remove (itr);
1635
- XDCHECK_EQ (reinterpret_cast <uintptr_t >(evictHandle.get ()),
1636
- reinterpret_cast <uintptr_t >(&item));
1637
- XDCHECK (!evictHandle->isInMMContainer ());
1638
- XDCHECK (!evictHandle->isAccessible ());
1639
-
1640
- // If the item is now marked as moving, that means its corresponding slab is
1641
- // being released right now. So, we look for the next item that is eligible
1642
- // for eviction. It is safe to destroy the handle here since the moving bit
1643
- // is set. Iterator was already advance by the remove call above.
1644
- if (evictHandle->isMoving ()) {
1645
- stats_.evictFailMove .inc ();
1646
- return ItemHandle{};
1647
- }
1648
-
1649
- // Invalidate iterator since later on if we are not evicting this
1650
- // item, we may need to rely on the handle we created above to ensure
1651
- // proper cleanup if the item's raw refcount has dropped to 0.
1652
- // And since this item may be a parent item that has some child items
1653
- // in this very same mmContainer, we need to make sure we drop this
1654
- // exclusive iterator so we can gain access to it when we're cleaning
1655
- // up the child items
1656
- itr.destroy ();
1657
-
1658
- // Ensure that there are no accessors after removing from the access
1659
- // container
1660
- XDCHECK (evictHandle->getRefCount () == 1 );
1661
-
1662
- if (evictToNvmCache && shouldWriteToNvmCacheExclusive (item)) {
1663
- XDCHECK (token.isValid ());
1664
- nvmCache_->put (evictHandle, std::move (token));
1665
- }
1666
- return evictHandle;
1667
- }
1668
-
1669
- template <typename CacheTrait>
1670
- typename CacheAllocator<CacheTrait>::ItemHandle
1671
- CacheAllocator<CacheTrait>::advanceIteratorAndTryEvictChainedItem(
1672
- TierId tid, PoolId pid, EvictionIterator& itr) {
1673
- XDCHECK (itr->isChainedItem ());
1674
-
1675
- ChainedItem* candidate = &itr->asChainedItem ();
1676
- ++itr;
1677
-
1678
- // The parent could change at any point through transferChain. However, if
1679
- // that happens, we would realize that the releaseBackToAllocator return
1680
- // kNotRecycled and we would try another chained item, leading to transient
1681
- // failure.
1682
- auto & parent = candidate->getParentItem (compressor_);
1683
-
1684
- const bool evictToNvmCache = shouldWriteToNvmCache (parent);
1685
-
1686
- auto token = evictToNvmCache ? nvmCache_->createPutToken (parent.getKey ())
1687
- : typename NvmCacheT::PutToken{};
1688
-
1689
- // if token is invalid, return. iterator is already advanced.
1690
- if (evictToNvmCache && !token.isValid ()) {
1691
- stats_.evictFailConcurrentFill .inc ();
1692
- return ItemHandle{};
1693
- }
1694
-
1695
- // check if the parent exists in the hashtable and refcount is drained.
1696
- auto parentHandle =
1697
- accessContainer_->removeIf (parent, &itemEvictionPredicate);
1698
- if (!parentHandle) {
1699
- stats_.evictFailParentAC .inc ();
1700
- return parentHandle;
1701
- }
1702
-
1703
- // Invalidate iterator since later on we may use the mmContainer
1704
- // associated with this iterator which cannot be done unless we
1705
- // drop this iterator
1706
- //
1707
- // This must be done once we know the parent is not nullptr.
1708
- // Since we can very well be the last holder of this parent item,
1709
- // which may have a chained item that is linked in this MM container.
1710
- itr.destroy ();
1711
-
1712
- // Ensure we have the correct parent and we're the only user of the
1713
- // parent, then free it from access container. Otherwise, we abort
1714
- XDCHECK_EQ (reinterpret_cast <uintptr_t >(&parent),
1715
- reinterpret_cast <uintptr_t >(parentHandle.get ()));
1716
- XDCHECK_EQ (1u , parent.getRefCount ());
1717
-
1718
- removeFromMMContainer (*parentHandle);
1719
-
1720
- XDCHECK (!parent.isInMMContainer ());
1721
- XDCHECK (!parent.isAccessible ());
1722
-
1723
- // TODO: add multi-tier support (similar as for unchained items)
1724
-
1725
- // We need to make sure the parent is not marked as moving
1726
- // and we're the only holder of the parent item. Safe to destroy the handle
1727
- // here since moving bit is set.
1728
- if (parentHandle->isMoving ()) {
1729
- stats_.evictFailParentMove .inc ();
1730
- return ItemHandle{};
1731
- }
1732
-
1733
- if (evictToNvmCache && shouldWriteToNvmCacheExclusive (*parentHandle)) {
1734
- XDCHECK (token.isValid ());
1735
- XDCHECK (parentHandle->hasChainedItem ());
1736
- nvmCache_->put (parentHandle, std::move (token));
1737
- }
1738
-
1739
- return parentHandle;
1620
+ typename CacheAllocator<CacheTrait>::WriteHandle
1621
+ CacheAllocator<CacheTrait>::tryEvictToNextMemoryTier(Item& item) {
1622
+ auto tid = getTierId (item);
1623
+ auto pid = allocator_[tid]->getAllocInfo (item.getMemory ()).poolId ;
1624
+ return tryEvictToNextMemoryTier (tid, pid, item);
1740
1625
}
1741
1626
1742
1627
template <typename CacheTrait>
@@ -2936,7 +2821,7 @@ void CacheAllocator<CacheTrait>::evictForSlabRelease(
2936
2821
auto owningHandle =
2937
2822
item.isChainedItem ()
2938
2823
? evictChainedItemForSlabRelease (item.asChainedItem ())
2939
- : evictNormalItemForSlabRelease (item);
2824
+ : evictNormalItem (item);
2940
2825
2941
2826
// we managed to evict the corresponding owner of the item and have the
2942
2827
// last handle for the owner.
@@ -2993,14 +2878,15 @@ void CacheAllocator<CacheTrait>::evictForSlabRelease(
2993
2878
2994
2879
template <typename CacheTrait>
2995
2880
typename CacheAllocator<CacheTrait>::ItemHandle
2996
- CacheAllocator<CacheTrait>::evictNormalItemForSlabRelease(Item& item) {
2881
+ CacheAllocator<CacheTrait>::evictNormalItem(Item& item,
2882
+ bool skipIfTokenInvalid) {
2997
2883
XDCHECK (item.isMoving ());
2998
2884
2999
2885
if (item.isOnlyMoving ()) {
3000
2886
return ItemHandle{};
3001
2887
}
3002
2888
3003
- auto evictHandle = tryEvictToNextMemoryTier (& item);
2889
+ auto evictHandle = tryEvictToNextMemoryTier (item);
3004
2890
if (evictHandle) return evictHandle;
3005
2891
3006
2892
auto predicate = [](const Item& it) { return it.getRefCount () == 0 ; };
@@ -3009,6 +2895,11 @@ CacheAllocator<CacheTrait>::evictNormalItemForSlabRelease(Item& item) {
3009
2895
auto token = evictToNvmCache ? nvmCache_->createPutToken (item.getKey ())
3010
2896
: typename NvmCacheT::PutToken{};
3011
2897
2898
+ if (skipIfTokenInvalid && evictToNvmCache && !token.isValid ()) {
2899
+ stats_.evictFailConcurrentFill .inc ();
2900
+ return ItemHandle{};
2901
+ }
2902
+
3012
2903
// We remove the item from both access and mm containers. It doesn't matter
3013
2904
// if someone else calls remove on the item at this moment, the item cannot
3014
2905
// be freed as long as we have the moving bit set.
0 commit comments