Skip to content

Commit

Permalink
Correct class/structure declaration order.
Browse files Browse the repository at this point in the history
1. Correct the class/struct declaration order to be IAW
   the Google C++ style guide[1].
2. For non-copyable classes, switched from non-implemented
   private methods to explicitly deleted[2] methods.
3. Minor const and member initialization fixes.

[1] https://google.github.io/styleguide/cppguide.html#Declaration_Order
[2] http://eel.is/c++draft/dcl.fct.def.delete

PiperOrigin-RevId: 246521844
  • Loading branch information
cmumford committed May 3, 2019
1 parent c784d63 commit 9bd23c7
Show file tree
Hide file tree
Showing 44 changed files with 412 additions and 403 deletions.
11 changes: 6 additions & 5 deletions db/autocompact_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,6 @@ namespace leveldb {

class AutoCompactTest {
public:
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;

AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test";
tiny_cache_ = NewLRUCache(100);
Expand Down Expand Up @@ -47,6 +42,12 @@ class AutoCompactTest {
}

void DoReads(int n);

private:
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;
};

static const int kValueSize = 200 * 1024;
Expand Down
32 changes: 17 additions & 15 deletions db/c.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,6 @@ struct leveldb_filelock_t {
};

struct leveldb_comparator_t : public Comparator {
void* state_;
void (*destructor_)(void*);
int (*compare_)(void*, const char* a, size_t alen, const char* b,
size_t blen);
const char* (*name_)(void*);

virtual ~leveldb_comparator_t() { (*destructor_)(state_); }

virtual int Compare(const Slice& a, const Slice& b) const {
Expand All @@ -101,18 +95,15 @@ struct leveldb_comparator_t : public Comparator {
// No-ops since the C binding does not support key shortening methods.
virtual void FindShortestSeparator(std::string*, const Slice&) const {}
virtual void FindShortSuccessor(std::string* key) const {}
};

struct leveldb_filterpolicy_t : public FilterPolicy {
void* state_;
void (*destructor_)(void*);
int (*compare_)(void*, const char* a, size_t alen, const char* b,
size_t blen);
const char* (*name_)(void*);
char* (*create_)(void*, const char* const* key_array,
const size_t* key_length_array, int num_keys,
size_t* filter_length);
unsigned char (*key_match_)(void*, const char* key, size_t length,
const char* filter, size_t filter_length);
};

struct leveldb_filterpolicy_t : public FilterPolicy {
virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }

virtual const char* Name() const { return (*name_)(state_); }
Expand All @@ -134,6 +125,15 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
return (*key_match_)(state_, key.data(), key.size(), filter.data(),
filter.size());
}

void* state_;
void (*destructor_)(void*);
const char* (*name_)(void*);
char* (*create_)(void*, const char* const* key_array,
const size_t* key_length_array, int num_keys,
size_t* filter_length);
unsigned char (*key_match_)(void*, const char* key, size_t length,
const char* filter, size_t filter_length);
};

struct leveldb_env_t {
Expand Down Expand Up @@ -470,7 +470,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
// they delegate to a NewBloomFilterPolicy() instead of user
// supplied C functions.
struct Wrapper : public leveldb_filterpolicy_t {
const FilterPolicy* rep_;
static void DoNothing(void*) {}

~Wrapper() { delete rep_; }
const char* Name() const { return rep_->Name(); }
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
Expand All @@ -479,7 +480,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
return rep_->KeyMayMatch(key, filter);
}
static void DoNothing(void*) {}

const FilterPolicy* rep_;
};
Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
Expand Down
22 changes: 12 additions & 10 deletions db/corruption_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,14 @@ static const int kValueSize = 1000;

class CorruptionTest {
public:
test::ErrorEnv env_;
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;

CorruptionTest() {
tiny_cache_ = NewLRUCache(100);
CorruptionTest()
: db_(nullptr),
dbname_("/memenv/corruption_test"),
tiny_cache_(NewLRUCache(100)) {
options_.env = &env_;
options_.block_cache = tiny_cache_;
dbname_ = "/memenv/corruption_test";
DestroyDB(dbname_, options_);

db_ = nullptr;
options_.create_if_missing = true;
Reopen();
options_.create_if_missing = false;
Expand Down Expand Up @@ -185,6 +179,14 @@ class CorruptionTest {
Random r(k);
return test::RandomString(&r, kValueSize, storage);
}

test::ErrorEnv env_;
Options options_;
DB* db_;

private:
std::string dbname_;
Cache* tiny_cache_;
};

TEST(CorruptionTest, Recovery) {
Expand Down
37 changes: 19 additions & 18 deletions db/db_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,38 +42,23 @@ const int kNumNonTableCacheFiles = 10;

// Information kept for every waiting writer
struct DBImpl::Writer {
explicit Writer(port::Mutex* mu)
: batch(nullptr), sync(false), done(false), cv(mu) {}

Status status;
WriteBatch* batch;
bool sync;
bool done;
port::CondVar cv;

explicit Writer(port::Mutex* mu)
: batch(nullptr), sync(false), done(false), cv(mu) {}
};

struct DBImpl::CompactionState {
Compaction* const compaction;

// Sequence numbers < smallest_snapshot are not significant since we
// will never have to service a snapshot below smallest_snapshot.
// Therefore if we have seen a sequence number S <= smallest_snapshot,
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;

// Files produced by compaction
struct Output {
uint64_t number;
uint64_t file_size;
InternalKey smallest, largest;
};
std::vector<Output> outputs;

// State kept for output being generated
WritableFile* outfile;
TableBuilder* builder;

uint64_t total_bytes;

Output* current_output() { return &outputs[outputs.size() - 1]; }

Expand All @@ -83,6 +68,22 @@ struct DBImpl::CompactionState {
outfile(nullptr),
builder(nullptr),
total_bytes(0) {}

Compaction* const compaction;

// Sequence numbers < smallest_snapshot are not significant since we
// will never have to service a snapshot below smallest_snapshot.
// Therefore if we have seen a sequence number S <= smallest_snapshot,
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;

std::vector<Output> outputs;

// State kept for output being generated
WritableFile* outfile;
TableBuilder* builder;

uint64_t total_bytes;
};

// Fix user-supplied options to be reasonable
Expand Down
64 changes: 33 additions & 31 deletions db/db_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ class VersionSet;
class DBImpl : public DB {
public:
DBImpl(const Options& options, const std::string& dbname);

DBImpl(const DBImpl&) = delete;
DBImpl& operator=(const DBImpl&) = delete;

virtual ~DBImpl();

// Implementations of the DB interface
Expand Down Expand Up @@ -71,6 +75,31 @@ class DBImpl : public DB {
struct CompactionState;
struct Writer;

// Information for a manual compaction
struct ManualCompaction {
int level;
bool done;
const InternalKey* begin; // null means beginning of key range
const InternalKey* end; // null means end of key range
InternalKey tmp_storage; // Used to keep track of compaction progress
};

// Per level compaction stats. stats_[level] stores the stats for
// compactions that produced data for the specified "level".
struct CompactionStats {
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}

void Add(const CompactionStats& c) {
this->micros += c.micros;
this->bytes_read += c.bytes_read;
this->bytes_written += c.bytes_written;
}

int64_t micros;
int64_t bytes_read;
int64_t bytes_written;
};

Iterator* NewInternalIterator(const ReadOptions&,
SequenceNumber* latest_snapshot,
uint32_t* seed);
Expand Down Expand Up @@ -121,6 +150,10 @@ class DBImpl : public DB {
Status InstallCompactionResults(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);

const Comparator* user_comparator() const {
return internal_comparator_.user_comparator();
}

// Constant after construction
Env* const env_;
const InternalKeyComparator internal_comparator_;
Expand Down Expand Up @@ -161,45 +194,14 @@ class DBImpl : public DB {
// Has a background compaction been scheduled or is running?
bool background_compaction_scheduled_ GUARDED_BY(mutex_);

// Information for a manual compaction
struct ManualCompaction {
int level;
bool done;
const InternalKey* begin; // null means beginning of key range
const InternalKey* end; // null means end of key range
InternalKey tmp_storage; // Used to keep track of compaction progress
};
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);

VersionSet* const versions_;

// Have we encountered a background error in paranoid mode?
Status bg_error_ GUARDED_BY(mutex_);

// Per level compaction stats. stats_[level] stores the stats for
// compactions that produced data for the specified "level".
struct CompactionStats {
int64_t micros;
int64_t bytes_read;
int64_t bytes_written;

CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}

void Add(const CompactionStats& c) {
this->micros += c.micros;
this->bytes_read += c.bytes_read;
this->bytes_written += c.bytes_written;
}
};
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);

// No copying allowed
DBImpl(const DBImpl&);
void operator=(const DBImpl&);

const Comparator* user_comparator() const {
return internal_comparator_.user_comparator();
}
};

// Sanitize db options. The caller should delete result.info_log if
Expand Down
10 changes: 4 additions & 6 deletions db/db_iter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ class DBIter : public Iterator {
valid_(false),
rnd_(seed),
bytes_until_read_sampling_(RandomCompactionPeriod()) {}

DBIter(const DBIter&) = delete;
DBIter& operator=(const DBIter&) = delete;

virtual ~DBIter() { delete iter_; }
virtual bool Valid() const { return valid_; }
virtual Slice key() const {
Expand Down Expand Up @@ -106,19 +110,13 @@ class DBIter : public Iterator {
const Comparator* const user_comparator_;
Iterator* const iter_;
SequenceNumber const sequence_;

Status status_;
std::string saved_key_; // == current key when direction_==kReverse
std::string saved_value_; // == current raw value when direction_==kReverse
Direction direction_;
bool valid_;

Random rnd_;
size_t bytes_until_read_sampling_;

// No copying allowed
DBIter(const DBIter&);
void operator=(const DBIter&);
};

inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
Expand Down
24 changes: 12 additions & 12 deletions db/db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,6 @@ static std::string RandomKey(Random* rnd) {

namespace {
class AtomicCounter {
private:
port::Mutex mu_;
int count_ GUARDED_BY(mu_);

public:
AtomicCounter() : count_(0) {}
void Increment() { IncrementBy(1); }
Expand All @@ -59,6 +55,10 @@ class AtomicCounter {
MutexLock l(&mu_);
count_ = 0;
}

private:
port::Mutex mu_;
int count_ GUARDED_BY(mu_);
};

void DelayMilliseconds(int millis) {
Expand Down Expand Up @@ -227,21 +227,14 @@ class SpecialEnv : public EnvWrapper {
};

class DBTest {
private:
const FilterPolicy* filter_policy_;

// Sequence of option configurations to try
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
int option_config_;

public:
std::string dbname_;
SpecialEnv* env_;
DB* db_;

Options last_options_;

DBTest() : option_config_(kDefault), env_(new SpecialEnv(Env::Default())) {
DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options());
Expand Down Expand Up @@ -533,6 +526,13 @@ class DBTest {
}
return files_renamed;
}

private:
// Sequence of option configurations to try
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };

const FilterPolicy* filter_policy_;
int option_config_;
};

TEST(DBTest, Empty) {
Expand Down
Loading

0 comments on commit 9bd23c7

Please sign in to comment.