mirror of
https://github.com/google/leveldb.git
synced 2024-12-04 14:28:59 +08:00
f57e23351f
This revision adds two major changes: 1. build_detect_platform which generates build_config.mk with platform-dependent flags for the build process 2. /port/atomic_pointer.h with anAtomicPointerimplementation for platforms without <cstdatomic> Some of this code is loosely based on patches submitted to the LevelDB mailing list at https://groups.google.com/forum/#!forum/leveldb Tip of the hat to Dave Smith and Edouard A, who both sent patches. The presence of Snappy (http://code.google.com/p/snappy/) and cstdatomic are now both detected in the build_detect_platform script (1.) which gets executing during make. For (2.), instead of broadly importing atomicops_* from Chromium or the Google performance tools, we chose to just implement AtomicPointer and the limited atomic load and store operations it needs. This resulted in much less code and fewer files - everything is contained in atomic_pointer.h. git-svn-id: https://leveldb.googlecode.com/svn/trunk@34 62dab493-f737-651d-591e-8d6aee1b9529
289 lines
7.0 KiB
C++
289 lines
7.0 KiB
C++
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include <assert.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
|
|
#include "leveldb/cache.h"
|
|
#include "port/port.h"
|
|
#include "util/hash.h"
|
|
#include "util/mutexlock.h"
|
|
|
|
namespace leveldb {
|
|
|
|
Cache::~Cache() {
|
|
}
|
|
|
|
namespace {
|
|
|
|
// LRU cache implementation
|
|
|
|
// An entry is a variable length heap-allocated structure. Entries
|
|
// are kept in a circular doubly linked list ordered by access time.
|
|
struct LRUHandle {
|
|
void* value;
|
|
void (*deleter)(const Slice&, void* value);
|
|
LRUHandle* next_hash;
|
|
LRUHandle* next;
|
|
LRUHandle* prev;
|
|
size_t charge; // TODO(opt): Only allow uint32_t?
|
|
size_t key_length;
|
|
size_t refs; // TODO(opt): Pack with "key_length"?
|
|
char key_data[1]; // Beginning of key
|
|
|
|
Slice key() const {
|
|
// For cheaper lookups, we allow a temporary Handle object
|
|
// to store a pointer to a key in "value".
|
|
if (next == this) {
|
|
return *(reinterpret_cast<Slice*>(value));
|
|
} else {
|
|
return Slice(key_data, key_length);
|
|
}
|
|
}
|
|
};
|
|
|
|
// We provide our own simple hash table since it removes a whole bunch
|
|
// of porting hacks and is also faster than some of the built-in hash
|
|
// table implementations in some of the compiler/runtime combinations
|
|
// we have tested. E.g., readrandom speeds up by ~5% over the g++
|
|
// 4.4.3's builtin hashtable.
|
|
class HandleTable {
|
|
public:
|
|
HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); }
|
|
~HandleTable() { delete[] list_; }
|
|
|
|
LRUHandle* Lookup(LRUHandle* h) {
|
|
return *FindPointer(h);
|
|
}
|
|
|
|
LRUHandle* Insert(LRUHandle* h) {
|
|
LRUHandle** ptr = FindPointer(h);
|
|
LRUHandle* old = *ptr;
|
|
h->next_hash = (old == NULL ? NULL : old->next_hash);
|
|
*ptr = h;
|
|
if (old == NULL) {
|
|
++elems_;
|
|
if (elems_ > length_) {
|
|
// Since each cache entry is fairly large, we aim for a small
|
|
// average linked list length (<= 1).
|
|
Resize();
|
|
}
|
|
}
|
|
return old;
|
|
}
|
|
|
|
LRUHandle* Remove(LRUHandle* h) {
|
|
LRUHandle** ptr = FindPointer(h);
|
|
LRUHandle* result = *ptr;
|
|
if (result != NULL) {
|
|
*ptr = result->next_hash;
|
|
--elems_;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
private:
|
|
// The table consists of an array of buckets where each bucket is
|
|
// a linked list of cache entries that hash into the bucket.
|
|
uint32_t length_;
|
|
uint32_t elems_;
|
|
LRUHandle** list_;
|
|
|
|
// Return a pointer to slot that points to a cache entry that
|
|
// matches *h. If there is no such cache entry, return a pointer to
|
|
// the trailing slot in the corresponding linked list.
|
|
LRUHandle** FindPointer(LRUHandle* h) {
|
|
Slice key = h->key();
|
|
uint32_t hash = Hash(key.data(), key.size(), 0);
|
|
LRUHandle** ptr = &list_[hash & (length_ - 1)];
|
|
while (*ptr != NULL && key != (*ptr)->key()) {
|
|
ptr = &(*ptr)->next_hash;
|
|
}
|
|
return ptr;
|
|
}
|
|
|
|
void Resize() {
|
|
uint32_t new_length = 4;
|
|
while (new_length < elems_) {
|
|
new_length *= 2;
|
|
}
|
|
LRUHandle** new_list = new LRUHandle*[new_length];
|
|
memset(new_list, 0, sizeof(new_list[0]) * new_length);
|
|
uint32_t count = 0;
|
|
for (int i = 0; i < length_; i++) {
|
|
LRUHandle* h = list_[i];
|
|
while (h != NULL) {
|
|
LRUHandle* next = h->next_hash;
|
|
Slice key = h->key();
|
|
uint32_t hash = Hash(key.data(), key.size(), 0);
|
|
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
|
|
h->next_hash = *ptr;
|
|
*ptr = h;
|
|
h = next;
|
|
count++;
|
|
}
|
|
}
|
|
assert(elems_ == count);
|
|
delete[] list_;
|
|
list_ = new_list;
|
|
length_ = new_length;
|
|
}
|
|
};
|
|
|
|
class LRUCache : public Cache {
|
|
public:
|
|
explicit LRUCache(size_t capacity);
|
|
virtual ~LRUCache();
|
|
|
|
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
|
|
void (*deleter)(const Slice& key, void* value));
|
|
virtual Handle* Lookup(const Slice& key);
|
|
virtual void Release(Handle* handle);
|
|
virtual void* Value(Handle* handle);
|
|
virtual void Erase(const Slice& key);
|
|
virtual uint64_t NewId();
|
|
|
|
private:
|
|
void LRU_Remove(LRUHandle* e);
|
|
void LRU_Append(LRUHandle* e);
|
|
void Unref(LRUHandle* e);
|
|
|
|
// Constructor parameters
|
|
const size_t capacity_;
|
|
|
|
// mutex_ protects the following state.
|
|
port::Mutex mutex_;
|
|
size_t usage_;
|
|
uint64_t last_id_;
|
|
|
|
// Dummy head of LRU list.
|
|
// lru.prev is newest entry, lru.next is oldest entry.
|
|
LRUHandle lru_;
|
|
|
|
HandleTable table_;
|
|
};
|
|
|
|
LRUCache::LRUCache(size_t capacity)
|
|
: capacity_(capacity),
|
|
usage_(0),
|
|
last_id_(0) {
|
|
// Make empty circular linked list
|
|
lru_.next = &lru_;
|
|
lru_.prev = &lru_;
|
|
}
|
|
|
|
LRUCache::~LRUCache() {
|
|
for (LRUHandle* e = lru_.next; e != &lru_; ) {
|
|
LRUHandle* next = e->next;
|
|
assert(e->refs == 1); // Error if caller has an unreleased handle
|
|
Unref(e);
|
|
e = next;
|
|
}
|
|
}
|
|
|
|
void LRUCache::Unref(LRUHandle* e) {
|
|
assert(e->refs > 0);
|
|
e->refs--;
|
|
if (e->refs <= 0) {
|
|
usage_ -= e->charge;
|
|
(*e->deleter)(e->key(), e->value);
|
|
free(e);
|
|
}
|
|
}
|
|
|
|
void LRUCache::LRU_Remove(LRUHandle* e) {
|
|
e->next->prev = e->prev;
|
|
e->prev->next = e->next;
|
|
}
|
|
|
|
void LRUCache::LRU_Append(LRUHandle* e) {
|
|
// Make "e" newest entry by inserting just before lru_
|
|
e->next = &lru_;
|
|
e->prev = lru_.prev;
|
|
e->prev->next = e;
|
|
e->next->prev = e;
|
|
}
|
|
|
|
Cache::Handle* LRUCache::Lookup(const Slice& key) {
|
|
MutexLock l(&mutex_);
|
|
|
|
LRUHandle dummy;
|
|
dummy.next = &dummy;
|
|
dummy.value = const_cast<Slice*>(&key);
|
|
LRUHandle* e = table_.Lookup(&dummy);
|
|
if (e != NULL) {
|
|
e->refs++;
|
|
LRU_Remove(e);
|
|
LRU_Append(e);
|
|
}
|
|
return reinterpret_cast<Handle*>(e);
|
|
}
|
|
|
|
void* LRUCache::Value(Handle* handle) {
|
|
return reinterpret_cast<LRUHandle*>(handle)->value;
|
|
}
|
|
|
|
void LRUCache::Release(Handle* handle) {
|
|
MutexLock l(&mutex_);
|
|
Unref(reinterpret_cast<LRUHandle*>(handle));
|
|
}
|
|
|
|
Cache::Handle* LRUCache::Insert(const Slice& key, void* value, size_t charge,
|
|
void (*deleter)(const Slice& key, void* value)) {
|
|
MutexLock l(&mutex_);
|
|
|
|
LRUHandle* e = reinterpret_cast<LRUHandle*>(
|
|
malloc(sizeof(LRUHandle)-1 + key.size()));
|
|
e->value = value;
|
|
e->deleter = deleter;
|
|
e->charge = charge;
|
|
e->key_length = key.size();
|
|
e->refs = 2; // One from LRUCache, one for the returned handle
|
|
memcpy(e->key_data, key.data(), key.size());
|
|
LRU_Append(e);
|
|
usage_ += charge;
|
|
|
|
LRUHandle* old = table_.Insert(e);
|
|
if (old != NULL) {
|
|
LRU_Remove(old);
|
|
Unref(old);
|
|
}
|
|
|
|
while (usage_ > capacity_ && lru_.next != &lru_) {
|
|
LRUHandle* old = lru_.next;
|
|
LRU_Remove(old);
|
|
table_.Remove(old);
|
|
Unref(old);
|
|
}
|
|
|
|
return reinterpret_cast<Handle*>(e);
|
|
}
|
|
|
|
void LRUCache::Erase(const Slice& key) {
|
|
MutexLock l(&mutex_);
|
|
|
|
LRUHandle dummy;
|
|
dummy.next = &dummy;
|
|
dummy.value = const_cast<Slice*>(&key);
|
|
LRUHandle* e = table_.Remove(&dummy);
|
|
if (e != NULL) {
|
|
LRU_Remove(e);
|
|
Unref(e);
|
|
}
|
|
}
|
|
|
|
uint64_t LRUCache::NewId() {
|
|
MutexLock l(&mutex_);
|
|
return ++(last_id_);
|
|
}
|
|
|
|
} // end anonymous namespace
|
|
|
|
Cache* NewLRUCache(size_t capacity) {
|
|
return new LRUCache(capacity);
|
|
}
|
|
|
|
}
|