0

base: Remove OwningMRUCache in favor of scoped_ptrs in MRUCache

Along with removing OwningMRUCache, this patch also removes
the Deletor concept from the cache, since it can be implemented
by storing scoped_ptrs with custom deleters.

BUG=592823

Review URL: https://codereview.chromium.org/1763273002

Cr-Commit-Position: refs/heads/master@{#380239}
This commit is contained in:
vmpstr
2016-03-09 14:08:48 -08:00
committed by Commit bot
parent cc6220fa81
commit aaab4d14f3
17 changed files with 98 additions and 251 deletions

@ -41,12 +41,9 @@ struct MRUCacheStandardMap {
};
// Base class for the MRU cache specializations defined below.
// The deletor will get called on all payloads that are being removed or
// replaced.
template <class KeyType,
class PayloadType,
class HashOrCompareType,
class DeletorType,
template <typename, typename, typename> class MapType =
MRUCacheStandardMap>
class MRUCacheBase {
@ -75,18 +72,9 @@ class MRUCacheBase {
// a new item is inserted. If the caller wants to manager this itself (for
// example, maybe it has special work to do when something is evicted), it
// can pass NO_AUTO_EVICT to not restrict the cache size.
explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {
}
explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
MRUCacheBase(size_type max_size, const DeletorType& deletor)
: max_size_(max_size), deletor_(deletor) {
}
virtual ~MRUCacheBase() {
iterator i = begin();
while (i != end())
i = Erase(i);
}
virtual ~MRUCacheBase() {}
size_type max_size() const { return max_size_; }
@ -94,14 +82,14 @@ class MRUCacheBase {
// the same key, it is removed prior to insertion. An iterator indicating the
// inserted item will be returned (this will always be the front of the list).
//
// The payload will be copied. In the case of an OwningMRUCache, this function
// will take ownership of the pointer.
iterator Put(const KeyType& key, const PayloadType& payload) {
// The payload will be forwarded.
template <typename Payload>
iterator Put(const KeyType& key, Payload&& payload) {
// Remove any existing payload with that key.
typename KeyIndex::iterator index_iter = index_.find(key);
if (index_iter != index_.end()) {
// Erase the reference to it. This will call the deletor on the removed
// element. The index reference will be replaced in the code below.
// Erase the reference to it. The index reference will be replaced in the
// code below.
Erase(index_iter->second);
} else if (max_size_ != NO_AUTO_EVICT) {
// New item is being inserted which might make it larger than the maximum
@ -109,7 +97,7 @@ class MRUCacheBase {
ShrinkToSize(max_size_ - 1);
}
ordering_.push_front(value_type(key, payload));
ordering_.push_front(value_type(key, std::forward<Payload>(payload)));
index_.insert(std::make_pair(key, ordering_.begin()));
return ordering_.begin();
}
@ -150,14 +138,12 @@ class MRUCacheBase {
void Swap(MRUCacheBase& other) {
ordering_.swap(other.ordering_);
index_.swap(other.index_);
std::swap(deletor_, other.deletor_);
std::swap(max_size_, other.max_size_);
}
// Erases the item referenced by the given iterator. An iterator to the item
// following it will be returned. The iterator must be valid.
iterator Erase(iterator pos) {
deletor_(pos->second);
index_.erase(pos->first);
return ordering_.erase(pos);
}
@ -180,9 +166,6 @@ class MRUCacheBase {
// Deletes everything from the cache.
void Clear() {
for (typename PayloadList::iterator i(ordering_.begin());
i != ordering_.end(); ++i)
deletor_(i->second);
index_.clear();
ordering_.clear();
}
@ -219,83 +202,28 @@ class MRUCacheBase {
size_type max_size_;
DeletorType deletor_;
DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
};
// MRUCache --------------------------------------------------------------------
// A functor that does nothing. Used by the MRUCache.
template<class PayloadType>
class MRUCacheNullDeletor {
public:
void operator()(const PayloadType& payload) {}
};
// A container that does not do anything to free its data. Use this when storing
// value types (as opposed to pointers) in the list.
template <class KeyType, class PayloadType>
class MRUCache : public MRUCacheBase<KeyType,
PayloadType,
std::less<KeyType>,
MRUCacheNullDeletor<PayloadType>> {
class MRUCache : public MRUCacheBase<KeyType, PayloadType, std::less<KeyType>> {
private:
typedef MRUCacheBase<KeyType,
PayloadType,
std::less<KeyType>,
MRUCacheNullDeletor<PayloadType>>
ParentType;
using ParentType = MRUCacheBase<KeyType, PayloadType, std::less<KeyType>>;
public:
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit MRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {
}
virtual ~MRUCache() {
}
: ParentType(max_size) {}
virtual ~MRUCache() {}
private:
DISALLOW_COPY_AND_ASSIGN(MRUCache);
};
// OwningMRUCache --------------------------------------------------------------
template<class PayloadType>
class MRUCachePointerDeletor {
public:
void operator()(const PayloadType& payload) { delete payload; }
};
// A cache that owns the payload type, which must be a non-const pointer type.
// The pointers will be deleted when they are removed, replaced, or when the
// cache is destroyed.
// TODO(vmpstr): This should probably go away in favor of storing scoped_ptrs.
template <class KeyType, class PayloadType>
class OwningMRUCache
: public MRUCacheBase<KeyType,
PayloadType,
std::less<KeyType>,
MRUCachePointerDeletor<PayloadType>> {
private:
typedef MRUCacheBase<KeyType,
PayloadType,
std::less<KeyType>,
MRUCachePointerDeletor<PayloadType>>
ParentType;
public:
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit OwningMRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {
}
virtual ~OwningMRUCache() {
}
private:
DISALLOW_COPY_AND_ASSIGN(OwningMRUCache);
};
// HashingMRUCache ------------------------------------------------------------
template <class KeyType, class ValueType, class HashType>
@ -307,26 +235,17 @@ struct MRUCacheHashMap {
// the map type instead of std::map. Note that your KeyType must be hashable to
// use this cache or you need to provide a hashing class.
template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>>
class HashingMRUCache : public MRUCacheBase<KeyType,
PayloadType,
HashType,
MRUCacheNullDeletor<PayloadType>,
MRUCacheHashMap> {
class HashingMRUCache
: public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap> {
private:
typedef MRUCacheBase<KeyType,
PayloadType,
HashType,
MRUCacheNullDeletor<PayloadType>,
MRUCacheHashMap>
ParentType;
using ParentType =
MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>;
public:
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit HashingMRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {
}
virtual ~HashingMRUCache() {
}
: ParentType(max_size) {}
virtual ~HashingMRUCache() {}
private:
DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);

@ -5,6 +5,7 @@
#include <stddef.h>
#include "base/containers/mru_cache.h"
#include "base/memory/scoped_ptr.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@ -187,15 +188,15 @@ TEST(MRUCacheTest, KeyReplacement) {
// Make sure that the owning version release its pointers properly.
TEST(MRUCacheTest, Owning) {
typedef base::OwningMRUCache<int, CachedItem*> Cache;
using Cache = base::MRUCache<int, scoped_ptr<CachedItem>>;
Cache cache(Cache::NO_AUTO_EVICT);
int initial_count = cached_item_live_count;
// First insert and item and then overwrite it.
static const int kItem1Key = 1;
cache.Put(kItem1Key, new CachedItem(20));
cache.Put(kItem1Key, new CachedItem(22));
cache.Put(kItem1Key, make_scoped_ptr(new CachedItem(20)));
cache.Put(kItem1Key, make_scoped_ptr(new CachedItem(22)));
// There should still be one item, and one extra live item.
Cache::iterator iter = cache.Get(kItem1Key);
@ -211,8 +212,8 @@ TEST(MRUCacheTest, Owning) {
// go away.
{
Cache cache2(Cache::NO_AUTO_EVICT);
cache2.Put(1, new CachedItem(20));
cache2.Put(2, new CachedItem(20));
cache2.Put(1, make_scoped_ptr(new CachedItem(20)));
cache2.Put(2, make_scoped_ptr(new CachedItem(20)));
}
// There should be no objects leaked.
@ -221,8 +222,8 @@ TEST(MRUCacheTest, Owning) {
// Check that Clear() also frees things correctly.
{
Cache cache2(Cache::NO_AUTO_EVICT);
cache2.Put(1, new CachedItem(20));
cache2.Put(2, new CachedItem(20));
cache2.Put(1, make_scoped_ptr(new CachedItem(20)));
cache2.Put(2, make_scoped_ptr(new CachedItem(20)));
EXPECT_EQ(initial_count + 2, cached_item_live_count);
cache2.Clear();
EXPECT_EQ(initial_count, cached_item_live_count);
@ -230,7 +231,7 @@ TEST(MRUCacheTest, Owning) {
}
TEST(MRUCacheTest, AutoEvict) {
typedef base::OwningMRUCache<int, CachedItem*> Cache;
using Cache = base::MRUCache<int, scoped_ptr<CachedItem>>;
static const Cache::size_type kMaxSize = 3;
int initial_count = cached_item_live_count;
@ -239,10 +240,10 @@ TEST(MRUCacheTest, AutoEvict) {
Cache cache(kMaxSize);
static const int kItem1Key = 1, kItem2Key = 2, kItem3Key = 3, kItem4Key = 4;
cache.Put(kItem1Key, new CachedItem(20));
cache.Put(kItem2Key, new CachedItem(21));
cache.Put(kItem3Key, new CachedItem(22));
cache.Put(kItem4Key, new CachedItem(23));
cache.Put(kItem1Key, make_scoped_ptr(new CachedItem(20)));
cache.Put(kItem2Key, make_scoped_ptr(new CachedItem(21)));
cache.Put(kItem3Key, make_scoped_ptr(new CachedItem(22)));
cache.Put(kItem4Key, make_scoped_ptr(new CachedItem(23)));
// The cache should only have kMaxSize items in it even though we inserted
// more.

@ -96,9 +96,9 @@ BitmapFetcherService::RequestId BitmapFetcherService::RequestImage(
return REQUEST_ID_INVALID;
// Check for existing images first.
base::OwningMRUCache<GURL, CacheEntry*>::iterator iter = cache_.Get(url);
auto iter = cache_.Get(url);
if (iter != cache_.end()) {
BitmapFetcherService::CacheEntry* entry = iter->second;
BitmapFetcherService::CacheEntry* entry = iter->second.get();
request->NotifyImageChanged(entry->bitmap.get());
// There is no request ID associated with this - data is already delivered.
@ -184,9 +184,9 @@ void BitmapFetcherService::OnFetchComplete(const GURL& url,
}
if (bitmap && !bitmap->isNull()) {
CacheEntry* entry = new CacheEntry;
scoped_ptr<CacheEntry> entry(new CacheEntry);
entry->bitmap.reset(new SkBitmap(*bitmap));
cache_.Put(fetcher->url(), entry);
cache_.Put(fetcher->url(), std::move(entry));
}
RemoveFetcher(fetcher);

@ -96,7 +96,7 @@ class BitmapFetcherService : public KeyedService,
scoped_ptr<const SkBitmap> bitmap;
};
base::OwningMRUCache<GURL, CacheEntry*> cache_;
base::MRUCache<GURL, scoped_ptr<CacheEntry>> cache_;
// Current request ID to be used.
int current_request_id_;

@ -26,10 +26,6 @@ const char kPeopleQueryPrefix[] = "people:";
} // namespace
void WebserviceCache::CacheDeletor::operator()(const Payload& payload) {
delete payload.result;
}
WebserviceCache::WebserviceCache(content::BrowserContext* context)
: cache_(Cache::NO_AUTO_EVICT),
cache_loaded_(false) {
@ -49,11 +45,11 @@ const CacheResult WebserviceCache::Get(QueryType type,
std::string typed_query = PrependType(type, query);
Cache::iterator iter = cache_.Get(typed_query);
if (iter != cache_.end()) {
if (base::Time::Now() - iter->second.time <=
if (base::Time::Now() - iter->second->time <=
base::TimeDelta::FromMinutes(kWebserviceCacheTimeLimitInMinutes)) {
return std::make_pair(FRESH, iter->second.result);
return std::make_pair(FRESH, iter->second->result.get());
} else {
return std::make_pair(STALE, iter->second.result);
return std::make_pair(STALE, iter->second->result.get());
}
}
return std::make_pair(STALE, static_cast<base::DictionaryValue*>(NULL));
@ -64,15 +60,17 @@ void WebserviceCache::Put(QueryType type,
scoped_ptr<base::DictionaryValue> result) {
if (result) {
std::string typed_query = PrependType(type, query);
Payload payload(base::Time::Now(), result.release());
scoped_ptr<Payload> scoped_payload(
new Payload(base::Time::Now(), std::move(result)));
Payload* payload = scoped_payload.get();
cache_.Put(typed_query, payload);
cache_.Put(typed_query, std::move(scoped_payload));
// If the cache isn't loaded yet, we're fine with losing queries since
// a 1000 entry cache should load really quickly so the chance of a user
// already having typed a 3 character search before the cache has loaded is
// very unlikely.
if (cache_loaded_) {
data_store_->cached_dict()->Set(typed_query, DictFromPayload(payload));
data_store_->cached_dict()->Set(typed_query, DictFromPayload(*payload));
data_store_->ScheduleWrite();
if (cache_.size() > kWebserviceCacheMaxSize)
TrimCache();
@ -89,16 +87,16 @@ void WebserviceCache::OnCacheLoaded(scoped_ptr<base::DictionaryValue>) {
!it.IsAtEnd();
it.Advance()) {
const base::DictionaryValue* payload_dict;
Payload payload;
scoped_ptr<Payload> payload(new Payload);
if (!it.value().GetAsDictionary(&payload_dict) ||
!payload_dict ||
!PayloadFromDict(payload_dict, &payload)) {
!PayloadFromDict(payload_dict, payload.get())) {
// In case we don't have a valid payload associated with a given query,
// clean up that query from our data store.
cleanup_keys.push_back(it.key());
continue;
}
cache_.Put(it.key(), payload);
cache_.Put(it.key(), std::move(payload));
}
if (!cleanup_keys.empty()) {
@ -125,7 +123,7 @@ bool WebserviceCache::PayloadFromDict(const base::DictionaryValue* dict,
// instead of returning the original reference. The new dictionary will be
// owned by our MRU cache.
*payload = Payload(base::Time::FromInternalValue(time_val),
result->DeepCopy());
make_scoped_ptr(result->DeepCopy()));
return true;
}
@ -163,4 +161,18 @@ std::string WebserviceCache::PrependType(
}
}
WebserviceCache::Payload::Payload(const base::Time& time,
scoped_ptr<base::DictionaryValue> result)
: time(time), result(std::move(result)) {}
WebserviceCache::Payload::Payload() = default;
WebserviceCache::Payload::~Payload() = default;
WebserviceCache::Payload& WebserviceCache::Payload::operator=(Payload&& other) {
time = std::move(other.time);
result = std::move(other.result);
return *this;
}
} // namespace app_list

@ -64,22 +64,17 @@ class WebserviceCache : public KeyedService,
private:
struct Payload {
Payload(const base::Time& time,
const base::DictionaryValue* result)
: time(time), result(result) {}
Payload() {}
Payload(const base::Time& time, scoped_ptr<base::DictionaryValue> result);
Payload();
~Payload();
Payload& operator=(Payload&& other);
base::Time time;
const base::DictionaryValue* result;
scoped_ptr<base::DictionaryValue> result;
};
class CacheDeletor {
public:
void operator()(const Payload& payload);
};
typedef base::
MRUCacheBase<std::string, Payload, std::less<std::string>, CacheDeletor>
Cache;
using Cache = base::MRUCache<std::string, scoped_ptr<Payload>>;
// Callback for when the cache is loaded from the dictionary data store.
void OnCacheLoaded(scoped_ptr<base::DictionaryValue>);
@ -87,8 +82,7 @@ class WebserviceCache : public KeyedService,
// Populates the payload parameter with the corresponding payload stored
// in the given dictionary. If the dictionary is invalid for any reason,
// this method will return false.
bool PayloadFromDict(const base::DictionaryValue* dict,
Payload* payload);
bool PayloadFromDict(const base::DictionaryValue* dict, Payload* payload);
// Returns a dictionary value for a given payload. The returned dictionary
// will be owned by the data_store_ cached_dict, and freed on the destruction

@ -9,8 +9,7 @@
namespace dom_distiller {
InMemoryContentStore::InMemoryContentStore(const int max_num_entries)
: cache_(max_num_entries, CacheDeletor(this)) {
}
: cache_(max_num_entries) {}
InMemoryContentStore::~InMemoryContentStore() {
// Clear the cache before destruction to ensure the CacheDeletor is not called
@ -52,7 +51,7 @@ void InMemoryContentStore::LoadContent(
}
scoped_ptr<DistilledArticleProto> distilled_article;
if (success) {
distilled_article.reset(new DistilledArticleProto(it->second));
distilled_article.reset(new DistilledArticleProto(*it->second));
} else {
distilled_article.reset(new DistilledArticleProto());
}
@ -63,7 +62,9 @@ void InMemoryContentStore::LoadContent(
void InMemoryContentStore::InjectContent(const ArticleEntry& entry,
const DistilledArticleProto& proto) {
cache_.Put(entry.entry_id(), proto);
cache_.Put(entry.entry_id(),
scoped_ptr<DistilledArticleProto, CacheDeletor>(
new DistilledArticleProto(proto), CacheDeletor(this)));
AddUrlToIdMapping(entry, proto);
}
@ -96,11 +97,12 @@ InMemoryContentStore::CacheDeletor::~CacheDeletor() {
}
void InMemoryContentStore::CacheDeletor::operator()(
const DistilledArticleProto& proto) {
DistilledArticleProto* proto) {
// When InMemoryContentStore is deleted, the |store_| pointer becomes invalid,
// but since the ContentMap is cleared in the InMemoryContentStore destructor,
// this should never be called after the destructor.
store_->EraseUrlToIdMapping(proto);
store_->EraseUrlToIdMapping(*proto);
delete proto;
}
} // namespace dom_distiller

@ -64,7 +64,7 @@ class InMemoryContentStore : public DistilledContentStore {
public:
explicit CacheDeletor(InMemoryContentStore* store);
~CacheDeletor();
void operator()(const DistilledArticleProto& proto);
void operator()(DistilledArticleProto* proto);
private:
InMemoryContentStore* store_;
@ -75,10 +75,9 @@ class InMemoryContentStore : public DistilledContentStore {
void EraseUrlToIdMapping(const DistilledArticleProto& proto);
typedef base::MRUCacheBase<std::string,
DistilledArticleProto,
std::less<std::string>,
InMemoryContentStore::CacheDeletor>
typedef base::MRUCache<std::string,
scoped_ptr<DistilledArticleProto, CacheDeletor>>
ContentMap;
typedef base::hash_map<std::string, std::string> UrlMap;

@ -28,9 +28,9 @@ LargeIconCache::~LargeIconCache() {}
void LargeIconCache::SetCachedResult(
const GURL& url,
const favicon_base::LargeIconResult& result) {
LargeIconCacheEntry* entry = new LargeIconCacheEntry;
scoped_ptr<LargeIconCacheEntry> entry(new LargeIconCacheEntry);
entry->result = CloneLargeIconResult(result);
cache_.Put(url, entry);
cache_.Put(url, std::move(entry));
}
scoped_ptr<favicon_base::LargeIconResult> LargeIconCache::GetCachedResult(

@ -47,7 +47,7 @@ class LargeIconCache : public KeyedService {
scoped_ptr<favicon_base::LargeIconResult> CloneLargeIconResult(
const favicon_base::LargeIconResult& large_icon_result);
base::OwningMRUCache<GURL, LargeIconCacheEntry*> cache_;
base::MRUCache<GURL, scoped_ptr<LargeIconCacheEntry>> cache_;
DISALLOW_COPY_AND_ASSIGN(LargeIconCache);
};

@ -7,21 +7,11 @@
#import <Foundation/Foundation.h>
// The LRUCache delegate is called before an item is evicted from the cache.
@protocol LRUCacheDelegate
- (void)lruCacheWillEvictObject:(id<NSObject>)object;
@end
// This class implements a cache with a limited size. Once the cache reach its
// size limit, it will start to evict items in a Least Recently Used order
// (where the term "used" is determined in terms of query to the cache).
@interface LRUCache : NSObject
// The delegate of the LRUCache called when objects are evicted from the cache.
@property(nonatomic, assign) id<LRUCacheDelegate> delegate;
// The maximum amount of items that the cache can hold before starting to
// evict. The value 0 is used to signify that the cache can hold an unlimited
// amount of elements (i.e. never evicts).
@ -43,18 +33,13 @@
// Adds the pair |key|, |obj| to the cache. If the value of the maxCacheSize
// property is non zero, the cache may evict an elements if the maximum cache
// size is reached. If the |key| is already present in the cache, the value for
// that key is replaced by |object|. For any evicted object and if the delegate
// is
// non nil, it will receive a call to the lruCacheWillEvictObject: selector.
// that key is replaced by |object|.
- (void)setObject:(id<NSObject>)object forKey:(NSObject*)key;
// Remove the key, value pair corresponding to the given |key|. If the delegate
// is non nil, it will receive a call to the lruCacheWillEvictObject: selector.
// Remove the key, value pair corresponding to the given |key|.
- (void)removeObjectForKey:(id<NSObject>)key;
// Remove all objects from the cache. For all evicted objects and if the
// delegate is non nil, it will receive a call to the lruCacheWillEvictObject:
// selector.
// Remove all objects from the cache.
- (void)removeAllObjects;
// Returns the amount of items that the cache currently hold.

@ -14,21 +14,6 @@
namespace {
class MRUCacheNSObjectDelegate {
public:
MRUCacheNSObjectDelegate(id<LRUCacheDelegate> delegate)
: delegate_(delegate) {}
MRUCacheNSObjectDelegate(const MRUCacheNSObjectDelegate& other) = default;
void operator()(const base::scoped_nsprotocol<id<NSObject>>& payload) const {
[delegate_ lruCacheWillEvictObject:payload.get()];
}
private:
id<LRUCacheDelegate> delegate_; // Weak.
};
struct NSObjectEqualTo {
bool operator()(const base::scoped_nsprotocol<id<NSObject>>& obj1,
const base::scoped_nsprotocol<id<NSObject>>& obj2) const {
@ -52,20 +37,17 @@ class NSObjectMRUCache
: public base::MRUCacheBase<base::scoped_nsprotocol<id<NSObject>>,
base::scoped_nsprotocol<id<NSObject>>,
NSObjectHash,
MRUCacheNSObjectDelegate,
MRUCacheNSObjectHashMap> {
private:
typedef base::MRUCacheBase<base::scoped_nsprotocol<id<NSObject>>,
base::scoped_nsprotocol<id<NSObject>>,
NSObjectHash,
MRUCacheNSObjectDelegate,
MRUCacheNSObjectHashMap>
ParentType;
public:
NSObjectMRUCache(typename ParentType::size_type max_size,
const MRUCacheNSObjectDelegate& deletor)
: ParentType(max_size, deletor) {}
explicit NSObjectMRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {}
private:
DISALLOW_COPY_AND_ASSIGN(NSObjectMRUCache);
@ -73,14 +55,10 @@ class NSObjectMRUCache
} // namespace
@interface LRUCache ()<LRUCacheDelegate>
@end
@implementation LRUCache {
scoped_ptr<NSObjectMRUCache> _cache;
}
@synthesize delegate = _delegate;
@synthesize maxCacheSize = _maxCacheSize;
- (instancetype)init {
@ -92,8 +70,7 @@ class NSObjectMRUCache
self = [super init];
if (self) {
_maxCacheSize = maxCacheSize;
MRUCacheNSObjectDelegate cacheDelegateDeletor(self);
_cache.reset(new NSObjectMRUCache(self.maxCacheSize, cacheDelegateDeletor));
_cache.reset(new NSObjectMRUCache(self.maxCacheSize));
}
return self;
}
@ -131,10 +108,4 @@ class NSObjectMRUCache
return _cache->empty();
}
#pragma mark - Private
- (void)lruCacheWillEvictObject:(id<NSObject>)obj {
[self.delegate lruCacheWillEvictObject:obj];
}
@end

@ -6,32 +6,10 @@
#import "ios/chrome/browser/snapshots/lru_cache.h"
#include "testing/gtest/include/gtest/gtest.h"
@interface LRUCacheTestDelegate : NSObject<LRUCacheDelegate>
@property(nonatomic, retain) id<NSObject> lastEvictedObject;
@property(nonatomic, assign) NSInteger evictedObjectsCount;
@end
@implementation LRUCacheTestDelegate
@synthesize lastEvictedObject = _lastEvictedObject;
@synthesize evictedObjectsCount = _evictedObjectsCount;
- (void)lruCacheWillEvictObject:(id<NSObject>)obj {
self.lastEvictedObject = obj;
++_evictedObjectsCount;
}
@end
namespace {
TEST(LRUCacheTest, Basic) {
base::scoped_nsobject<LRUCache> cache([[LRUCache alloc] initWithCacheSize:3]);
base::scoped_nsobject<LRUCacheTestDelegate> delegate(
[[LRUCacheTestDelegate alloc] init]);
[cache setDelegate:delegate];
base::scoped_nsobject<NSString> value1(
[[NSString alloc] initWithString:@"Value 1"]);
@ -51,8 +29,6 @@ TEST(LRUCacheTest, Basic) {
[cache setObject:value4 forKey:@"VALUE 4"];
EXPECT_TRUE([cache count] == 3);
EXPECT_TRUE([delegate evictedObjectsCount] == 1);
EXPECT_TRUE([delegate lastEvictedObject] == value1.get());
// Check LRU behaviour, the value least recently added value should have been
// evicted.

@ -40,7 +40,7 @@ ScopedSSL_SESSION SSLClientSessionCacheOpenSSL::Lookup(
CacheEntryMap::iterator iter = cache_.Get(cache_key);
if (iter == cache_.end())
return nullptr;
if (IsExpired(iter->second, clock_->Now())) {
if (IsExpired(iter->second.get(), clock_->Now())) {
cache_.Erase(iter);
return nullptr;
}
@ -52,12 +52,12 @@ void SSLClientSessionCacheOpenSSL::Insert(const std::string& cache_key,
base::AutoLock lock(lock_);
// Make a new entry.
CacheEntry* entry = new CacheEntry;
scoped_ptr<CacheEntry> entry(new CacheEntry);
entry->session.reset(SSL_SESSION_up_ref(session));
entry->creation_time = clock_->Now();
// Takes ownership.
cache_.Put(cache_key, entry);
cache_.Put(cache_key, std::move(entry));
}
void SSLClientSessionCacheOpenSSL::Flush() {
@ -88,7 +88,7 @@ void SSLClientSessionCacheOpenSSL::FlushExpiredSessions() {
base::Time now = clock_->Now();
CacheEntryMap::iterator iter = cache_.begin();
while (iter != cache_.end()) {
if (IsExpired(iter->second, now)) {
if (IsExpired(iter->second.get(), now)) {
iter = cache_.Erase(iter);
} else {
++iter;

@ -66,11 +66,7 @@ class NET_EXPORT SSLClientSessionCacheOpenSSL {
};
using CacheEntryMap =
base::MRUCacheBase<std::string,
CacheEntry*,
std::hash<std::string>,
base::MRUCachePointerDeletor<CacheEntry*>,
base::MRUCacheHashMap>;
base::HashingMRUCache<std::string, scoped_ptr<CacheEntry>>;
// Returns true if |entry| is expired as of |now|.
bool IsExpired(CacheEntry* entry, const base::Time& now);

@ -66,11 +66,7 @@ class DrmOverlayValidator {
ScanoutBufferGenerator* buffer_generator_; // Not owned.
// List of all configurations which have been validated.
base::MRUCacheBase<OverlayPlaneList,
OverlayHintsList,
std::less<OverlayPlaneList>,
base::MRUCacheNullDeletor<OverlayHintsList>>
overlay_hints_cache_;
base::MRUCache<OverlayPlaneList, OverlayHintsList> overlay_hints_cache_;
DISALLOW_COPY_AND_ASSIGN(DrmOverlayValidator);
};

@ -55,11 +55,7 @@ class DrmOverlayManager : public OverlayManagerOzone {
// List of all OverlayCheck_Params which have been validated in GPU side.
// Value is set to true if we are waiting for validation results from GPU.
base::MRUCacheBase<std::vector<OverlayCheck_Params>,
bool,
std::less<std::vector<OverlayCheck_Params>>,
base::MRUCacheNullDeletor<bool>>
cache_;
base::MRUCache<std::vector<OverlayCheck_Params>, bool> cache_;
DISALLOW_COPY_AND_ASSIGN(DrmOverlayManager);
};