Inline defaults in options.

This CL moves default values for
leveldb::{Options,ReadOptions,WriteOptions} from constructors to member
declarations, and removes now-redundant comments stating the defaults.

-------------
Created by MOE: https://github.com/google/moe
MOE_MIGRATED_REVID=239271242
This commit is contained in:
costan 2019-03-19 14:34:51 -07:00 committed by Victor Costan
parent 9ce30510d4
commit 201f77d137
3 changed files with 20 additions and 62 deletions

View File

@ -469,7 +469,6 @@ class FaultInjectionTest {
void DeleteAllData() { void DeleteAllData() {
Iterator* iter = db_->NewIterator(ReadOptions()); Iterator* iter = db_->NewIterator(ReadOptions());
WriteOptions options;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
} }

View File

@ -42,20 +42,17 @@ struct LEVELDB_EXPORT Options {
const Comparator* comparator; const Comparator* comparator;
// If true, the database will be created if it is missing. // If true, the database will be created if it is missing.
// Default: false bool create_if_missing = false;
bool create_if_missing;
// If true, an error is raised if the database already exists. // If true, an error is raised if the database already exists.
// Default: false bool error_if_exists = false;
bool error_if_exists;
// If true, the implementation will do aggressive checking of the // If true, the implementation will do aggressive checking of the
// data it is processing and will stop early if it detects any // data it is processing and will stop early if it detects any
// errors. This may have unforeseen ramifications: for example, a // errors. This may have unforeseen ramifications: for example, a
// corruption of one DB entry may cause a large number of entries to // corruption of one DB entry may cause a large number of entries to
// become unreadable or for the entire DB to become unopenable. // become unreadable or for the entire DB to become unopenable.
// Default: false bool paranoid_checks = false;
bool paranoid_checks;
// Use the specified object to interact with the environment, // Use the specified object to interact with the environment,
// e.g. to read/write files, schedule background work, etc. // e.g. to read/write files, schedule background work, etc.
@ -65,8 +62,7 @@ struct LEVELDB_EXPORT Options {
// Any internal progress/error information generated by the db will // Any internal progress/error information generated by the db will
// be written to info_log if it is non-null, or to a file stored // be written to info_log if it is non-null, or to a file stored
// in the same directory as the DB contents if info_log is null. // in the same directory as the DB contents if info_log is null.
// Default: nullptr Logger* info_log = nullptr;
Logger* info_log;
// ------------------- // -------------------
// Parameters that affect performance // Parameters that affect performance
@ -79,39 +75,30 @@ struct LEVELDB_EXPORT Options {
// so you may wish to adjust this parameter to control memory usage. // so you may wish to adjust this parameter to control memory usage.
// Also, a larger write buffer will result in a longer recovery time // Also, a larger write buffer will result in a longer recovery time
// the next time the database is opened. // the next time the database is opened.
// size_t write_buffer_size = 4 * 1024 * 1024;
// Default: 4MB
size_t write_buffer_size;
// Number of open files that can be used by the DB. You may need to // Number of open files that can be used by the DB. You may need to
// increase this if your database has a large working set (budget // increase this if your database has a large working set (budget
// one open file per 2MB of working set). // one open file per 2MB of working set).
// int max_open_files = 1000;
// Default: 1000
int max_open_files;
// Control over blocks (user data is stored in a set of blocks, and // Control over blocks (user data is stored in a set of blocks, and
// a block is the unit of reading from disk). // a block is the unit of reading from disk).
// If non-null, use the specified cache for blocks. // If non-null, use the specified cache for blocks.
// If null, leveldb will automatically create and use an 8MB internal cache. // If null, leveldb will automatically create and use an 8MB internal cache.
// Default: nullptr Cache* block_cache = nullptr;
Cache* block_cache;
// Approximate size of user data packed per block. Note that the // Approximate size of user data packed per block. Note that the
// block size specified here corresponds to uncompressed data. The // block size specified here corresponds to uncompressed data. The
// actual size of the unit read from disk may be smaller if // actual size of the unit read from disk may be smaller if
// compression is enabled. This parameter can be changed dynamically. // compression is enabled. This parameter can be changed dynamically.
// size_t block_size = 4 * 1024;
// Default: 4K
size_t block_size;
// Number of keys between restart points for delta encoding of keys. // Number of keys between restart points for delta encoding of keys.
// This parameter can be changed dynamically. Most clients should // This parameter can be changed dynamically. Most clients should
// leave this parameter alone. // leave this parameter alone.
// int block_restart_interval = 16;
// Default: 16
int block_restart_interval;
// Leveldb will write up to this amount of bytes to a file before // Leveldb will write up to this amount of bytes to a file before
// switching to a new one. // switching to a new one.
@ -121,9 +108,7 @@ struct LEVELDB_EXPORT Options {
// compactions and hence longer latency/performance hiccups. // compactions and hence longer latency/performance hiccups.
// Another reason to increase this parameter might be when you are // Another reason to increase this parameter might be when you are
// initially populating a large database. // initially populating a large database.
// size_t max_file_size = 2 * 1024 * 1024;
// Default: 2MB
size_t max_file_size;
// Compress blocks using the specified compression algorithm. This // Compress blocks using the specified compression algorithm. This
// parameter can be changed dynamically. // parameter can be changed dynamically.
@ -139,20 +124,18 @@ struct LEVELDB_EXPORT Options {
// worth switching to kNoCompression. Even if the input data is // worth switching to kNoCompression. Even if the input data is
// incompressible, the kSnappyCompression implementation will // incompressible, the kSnappyCompression implementation will
// efficiently detect that and will switch to uncompressed mode. // efficiently detect that and will switch to uncompressed mode.
CompressionType compression; CompressionType compression = kSnappyCompression;
// EXPERIMENTAL: If true, append to existing MANIFEST and log files // EXPERIMENTAL: If true, append to existing MANIFEST and log files
// when a database is opened. This can significantly speed up open. // when a database is opened. This can significantly speed up open.
// //
// Default: currently false, but may become true later. // Default: currently false, but may become true later.
bool reuse_logs; bool reuse_logs = false;
// If non-null, use the specified filter policy to reduce disk reads. // If non-null, use the specified filter policy to reduce disk reads.
// Many applications will benefit from passing the result of // Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here. // NewBloomFilterPolicy() here.
// const FilterPolicy* filter_policy = nullptr;
// Default: nullptr
const FilterPolicy* filter_policy;
// Create an Options object with default values for all fields. // Create an Options object with default values for all fields.
Options(); Options();
@ -162,26 +145,19 @@ struct LEVELDB_EXPORT Options {
struct LEVELDB_EXPORT ReadOptions { struct LEVELDB_EXPORT ReadOptions {
// If true, all data read from underlying storage will be // If true, all data read from underlying storage will be
// verified against corresponding checksums. // verified against corresponding checksums.
// Default: false bool verify_checksums = false;
bool verify_checksums;
// Should the data read for this iteration be cached in memory? // Should the data read for this iteration be cached in memory?
// Callers may wish to set this field to false for bulk scans. // Callers may wish to set this field to false for bulk scans.
// Default: true bool fill_cache = true;
bool fill_cache;
// If "snapshot" is non-null, read as of the supplied snapshot // If "snapshot" is non-null, read as of the supplied snapshot
// (which must belong to the DB that is being read and which must // (which must belong to the DB that is being read and which must
// not have been released). If "snapshot" is null, use an implicit // not have been released). If "snapshot" is null, use an implicit
// snapshot of the state at the beginning of this read operation. // snapshot of the state at the beginning of this read operation.
// Default: nullptr const Snapshot* snapshot = nullptr;
const Snapshot* snapshot;
ReadOptions() ReadOptions() = default;
: verify_checksums(false),
fill_cache(true),
snapshot(nullptr) {
}
}; };
// Options that control write operations // Options that control write operations
@ -200,13 +176,9 @@ struct LEVELDB_EXPORT WriteOptions {
// crash semantics as the "write()" system call. A DB write // crash semantics as the "write()" system call. A DB write
// with sync==true has similar crash semantics to a "write()" // with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()". // system call followed by "fsync()".
// bool sync = false;
// Default: false
bool sync;
WriteOptions() WriteOptions() = default;
: sync(false) {
}
}; };
} // namespace leveldb } // namespace leveldb

View File

@ -11,20 +11,7 @@ namespace leveldb {
Options::Options() Options::Options()
: comparator(BytewiseComparator()), : comparator(BytewiseComparator()),
create_if_missing(false), env(Env::Default()) {
error_if_exists(false),
paranoid_checks(false),
env(Env::Default()),
info_log(nullptr),
write_buffer_size(4<<20),
max_open_files(1000),
block_cache(nullptr),
block_size(4096),
block_restart_interval(16),
max_file_size(2<<20),
compression(kSnappyCompression),
reuse_logs(false),
filter_policy(nullptr) {
} }
} // namespace leveldb } // namespace leveldb