diff options
Diffstat (limited to 'src/leveldb/db/version_set.cc')
-rw-r--r-- | src/leveldb/db/version_set.cc | 517 |
1 files changed, 272 insertions, 245 deletions
diff --git a/src/leveldb/db/version_set.cc b/src/leveldb/db/version_set.cc index 2cb6d80ed3..cd07346ea8 100644 --- a/src/leveldb/db/version_set.cc +++ b/src/leveldb/db/version_set.cc @@ -4,8 +4,10 @@ #include "db/version_set.h" -#include <algorithm> #include <stdio.h> + +#include <algorithm> + #include "db/filename.h" #include "db/log_reader.h" #include "db/log_writer.h" @@ -84,8 +86,7 @@ Version::~Version() { } int FindFile(const InternalKeyComparator& icmp, - const std::vector<FileMetaData*>& files, - const Slice& key) { + const std::vector<FileMetaData*>& files, const Slice& key) { uint32_t left = 0; uint32_t right = files.size(); while (left < right) { @@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp, return right; } -static bool AfterFile(const Comparator* ucmp, - const Slice* user_key, const FileMetaData* f) { - // NULL user_key occurs before all keys and is therefore never after *f - return (user_key != NULL && +static bool AfterFile(const Comparator* ucmp, const Slice* user_key, + const FileMetaData* f) { + // null user_key occurs before all keys and is therefore never after *f + return (user_key != nullptr && ucmp->Compare(*user_key, f->largest.user_key()) > 0); } -static bool BeforeFile(const Comparator* ucmp, - const Slice* user_key, const FileMetaData* f) { - // NULL user_key occurs after all keys and is therefore never before *f - return (user_key != NULL && +static bool BeforeFile(const Comparator* ucmp, const Slice* user_key, + const FileMetaData* f) { + // null user_key occurs after all keys and is therefore never before *f + return (user_key != nullptr && ucmp->Compare(*user_key, f->smallest.user_key()) < 0); } -bool SomeFileOverlapsRange( - const InternalKeyComparator& icmp, - bool disjoint_sorted_files, - const std::vector<FileMetaData*>& files, - const Slice* smallest_user_key, - const Slice* largest_user_key) { +bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, + bool disjoint_sorted_files, + const std::vector<FileMetaData*>& files, + const Slice* smallest_user_key, + const Slice* largest_user_key) { const Comparator* ucmp = icmp.user_comparator(); if (!disjoint_sorted_files) { // Need to check against all files @@ -141,10 +141,11 @@ bool SomeFileOverlapsRange( // Binary search over file list uint32_t index = 0; - if (smallest_user_key != NULL) { + if (smallest_user_key != nullptr) { // Find the earliest possible internal key for smallest_user_key - InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek); - index = FindFile(icmp, files, small.Encode()); + InternalKey small_key(*smallest_user_key, kMaxSequenceNumber, + kValueTypeForSeek); + index = FindFile(icmp, files, small_key.Encode()); } if (index >= files.size()) { @@ -164,25 +165,21 @@ class Version::LevelFileNumIterator : public Iterator { public: LevelFileNumIterator(const InternalKeyComparator& icmp, const std::vector<FileMetaData*>* flist) - : icmp_(icmp), - flist_(flist), - index_(flist->size()) { // Marks as invalid - } - virtual bool Valid() const { - return index_ < flist_->size(); + : icmp_(icmp), flist_(flist), index_(flist->size()) { // Marks as invalid } - virtual void Seek(const Slice& target) { + bool Valid() const override { return index_ < flist_->size(); } + void Seek(const Slice& target) override { index_ = FindFile(icmp_, *flist_, target); } - virtual void SeekToFirst() { index_ = 0; } - virtual void SeekToLast() { + void SeekToFirst() override { index_ = 0; } + void SeekToLast() override { index_ = flist_->empty() ? 0 : flist_->size() - 1; } - virtual void Next() { + void Next() override { assert(Valid()); index_++; } - virtual void Prev() { + void Prev() override { assert(Valid()); if (index_ == 0) { index_ = flist_->size(); // Marks as invalid @@ -190,17 +187,18 @@ class Version::LevelFileNumIterator : public Iterator { index_--; } } - Slice key() const { + Slice key() const override { assert(Valid()); return (*flist_)[index_]->largest.Encode(); } - Slice value() const { + Slice value() const override { assert(Valid()); EncodeFixed64(value_buf_, (*flist_)[index_]->number); - EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size); + EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size); return Slice(value_buf_, sizeof(value_buf_)); } - virtual Status status() const { return Status::OK(); } + Status status() const override { return Status::OK(); } + private: const InternalKeyComparator icmp_; const std::vector<FileMetaData*>* const flist_; @@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator { mutable char value_buf_[16]; }; -static Iterator* GetFileIterator(void* arg, - const ReadOptions& options, +static Iterator* GetFileIterator(void* arg, const ReadOptions& options, const Slice& file_value) { TableCache* cache = reinterpret_cast<TableCache*>(arg); if (file_value.size() != 16) { return NewErrorIterator( Status::Corruption("FileReader invoked with unexpected value")); } else { - return cache->NewIterator(options, - DecodeFixed64(file_value.data()), + return cache->NewIterator(options, DecodeFixed64(file_value.data()), DecodeFixed64(file_value.data() + 8)); } } @@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg, Iterator* Version::NewConcatenatingIterator(const ReadOptions& options, int level) const { return NewTwoLevelIterator( - new LevelFileNumIterator(vset_->icmp_, &files_[level]), - &GetFileIterator, vset_->table_cache_, options); + new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator, + vset_->table_cache_, options); } void Version::AddIterators(const ReadOptions& options, std::vector<Iterator*>* iters) { // Merge all level zero files together since they may overlap for (size_t i = 0; i < files_[0].size(); i++) { - iters->push_back( - vset_->table_cache_->NewIterator( - options, files_[0][i]->number, files_[0][i]->file_size)); + iters->push_back(vset_->table_cache_->NewIterator( + options, files_[0][i]->number, files_[0][i]->file_size)); } // For levels > 0, we can use a concatenating iterator that sequentially @@ -264,7 +259,7 @@ struct Saver { Slice user_key; std::string* value; }; -} +} // namespace static void SaveValue(void* arg, const Slice& ikey, const Slice& v) { Saver* s = reinterpret_cast<Saver*>(arg); ParsedInternalKey parsed_key; @@ -284,10 +279,8 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) { return a->number > b->number; } -void Version::ForEachOverlapping(Slice user_key, Slice internal_key, - void* arg, +void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg, bool (*func)(void*, int, FileMetaData*)) { - // TODO(sanjay): Change Version::Get() to use this function. const Comparator* ucmp = vset_->icmp_.user_comparator(); // Search level-0 in order from newest to oldest. @@ -329,110 +322,89 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key, } } -Status Version::Get(const ReadOptions& options, - const LookupKey& k, - std::string* value, - GetStats* stats) { - Slice ikey = k.internal_key(); - Slice user_key = k.user_key(); - const Comparator* ucmp = vset_->icmp_.user_comparator(); - Status s; - - stats->seek_file = NULL; +Status Version::Get(const ReadOptions& options, const LookupKey& k, + std::string* value, GetStats* stats) { + stats->seek_file = nullptr; stats->seek_file_level = -1; - FileMetaData* last_file_read = NULL; - int last_file_read_level = -1; - // We can search level-by-level since entries never hop across - // levels. Therefore we are guaranteed that if we find data - // in an smaller level, later levels are irrelevant. - std::vector<FileMetaData*> tmp; - FileMetaData* tmp2; - for (int level = 0; level < config::kNumLevels; level++) { - size_t num_files = files_[level].size(); - if (num_files == 0) continue; + struct State { + Saver saver; + GetStats* stats; + const ReadOptions* options; + Slice ikey; + FileMetaData* last_file_read; + int last_file_read_level; - // Get the list of files to search in this level - FileMetaData* const* files = &files_[level][0]; - if (level == 0) { - // Level-0 files may overlap each other. Find all files that - // overlap user_key and process them in order from newest to oldest. - tmp.reserve(num_files); - for (uint32_t i = 0; i < num_files; i++) { - FileMetaData* f = files[i]; - if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 && - ucmp->Compare(user_key, f->largest.user_key()) <= 0) { - tmp.push_back(f); - } - } - if (tmp.empty()) continue; + VersionSet* vset; + Status s; + bool found; - std::sort(tmp.begin(), tmp.end(), NewestFirst); - files = &tmp[0]; - num_files = tmp.size(); - } else { - // Binary search to find earliest index whose largest key >= ikey. - uint32_t index = FindFile(vset_->icmp_, files_[level], ikey); - if (index >= num_files) { - files = NULL; - num_files = 0; - } else { - tmp2 = files[index]; - if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) { - // All of "tmp2" is past any data for user_key - files = NULL; - num_files = 0; - } else { - files = &tmp2; - num_files = 1; - } - } - } + static bool Match(void* arg, int level, FileMetaData* f) { + State* state = reinterpret_cast<State*>(arg); - for (uint32_t i = 0; i < num_files; ++i) { - if (last_file_read != NULL && stats->seek_file == NULL) { + if (state->stats->seek_file == nullptr && + state->last_file_read != nullptr) { // We have had more than one seek for this read. Charge the 1st file. - stats->seek_file = last_file_read; - stats->seek_file_level = last_file_read_level; + state->stats->seek_file = state->last_file_read; + state->stats->seek_file_level = state->last_file_read_level; } - FileMetaData* f = files[i]; - last_file_read = f; - last_file_read_level = level; - - Saver saver; - saver.state = kNotFound; - saver.ucmp = ucmp; - saver.user_key = user_key; - saver.value = value; - s = vset_->table_cache_->Get(options, f->number, f->file_size, - ikey, &saver, SaveValue); - if (!s.ok()) { - return s; + state->last_file_read = f; + state->last_file_read_level = level; + + state->s = state->vset->table_cache_->Get(*state->options, f->number, + f->file_size, state->ikey, + &state->saver, SaveValue); + if (!state->s.ok()) { + state->found = true; + return false; } - switch (saver.state) { + switch (state->saver.state) { case kNotFound: - break; // Keep searching in other files + return true; // Keep searching in other files case kFound: - return s; + state->found = true; + return false; case kDeleted: - s = Status::NotFound(Slice()); // Use empty error message for speed - return s; + return false; case kCorrupt: - s = Status::Corruption("corrupted key for ", user_key); - return s; + state->s = + Status::Corruption("corrupted key for ", state->saver.user_key); + state->found = true; + return false; } + + // Not reached. Added to avoid false compilation warnings of + // "control reaches end of non-void function". + return false; } - } + }; + + State state; + state.found = false; + state.stats = stats; + state.last_file_read = nullptr; + state.last_file_read_level = -1; - return Status::NotFound(Slice()); // Use an empty error message for speed + state.options = &options; + state.ikey = k.internal_key(); + state.vset = vset_; + + state.saver.state = kNotFound; + state.saver.ucmp = vset_->icmp_.user_comparator(); + state.saver.user_key = k.user_key(); + state.saver.value = value; + + ForEachOverlapping(state.saver.user_key, state.ikey, &state, &State::Match); + + return state.found ? state.s : Status::NotFound(Slice()); } bool Version::UpdateStats(const GetStats& stats) { FileMetaData* f = stats.seek_file; - if (f != NULL) { + if (f != nullptr) { f->allowed_seeks--; - if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) { + if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) { file_to_compact_ = f; file_to_compact_level_ = stats.seek_file_level; return true; @@ -479,9 +451,7 @@ bool Version::RecordReadSample(Slice internal_key) { return false; } -void Version::Ref() { - ++refs_; -} +void Version::Ref() { ++refs_; } void Version::Unref() { assert(this != &vset_->dummy_versions_); @@ -492,16 +462,14 @@ void Version::Unref() { } } -bool Version::OverlapInLevel(int level, - const Slice* smallest_user_key, +bool Version::OverlapInLevel(int level, const Slice* smallest_user_key, const Slice* largest_user_key) { return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level], smallest_user_key, largest_user_key); } -int Version::PickLevelForMemTableOutput( - const Slice& smallest_user_key, - const Slice& largest_user_key) { +int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key, + const Slice& largest_user_key) { int level = 0; if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) { // Push to next level if there is no overlap in next level, @@ -528,40 +496,39 @@ int Version::PickLevelForMemTableOutput( } // Store in "*inputs" all files in "level" that overlap [begin,end] -void Version::GetOverlappingInputs( - int level, - const InternalKey* begin, - const InternalKey* end, - std::vector<FileMetaData*>* inputs) { +void Version::GetOverlappingInputs(int level, const InternalKey* begin, + const InternalKey* end, + std::vector<FileMetaData*>* inputs) { assert(level >= 0); assert(level < config::kNumLevels); inputs->clear(); Slice user_begin, user_end; - if (begin != NULL) { + if (begin != nullptr) { user_begin = begin->user_key(); } - if (end != NULL) { + if (end != nullptr) { user_end = end->user_key(); } const Comparator* user_cmp = vset_->icmp_.user_comparator(); - for (size_t i = 0; i < files_[level].size(); ) { + for (size_t i = 0; i < files_[level].size();) { FileMetaData* f = files_[level][i++]; const Slice file_start = f->smallest.user_key(); const Slice file_limit = f->largest.user_key(); - if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) { + if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) { // "f" is completely before specified range; skip it - } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) { + } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) { // "f" is completely after specified range; skip it } else { inputs->push_back(f); if (level == 0) { // Level-0 files may overlap each other. So check if the newly // added file has expanded the range. If so, restart search. - if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) { + if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) { user_begin = file_start; inputs->clear(); i = 0; - } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) { + } else if (end != nullptr && + user_cmp->Compare(file_limit, user_end) > 0) { user_end = file_limit; inputs->clear(); i = 0; @@ -629,9 +596,7 @@ class VersionSet::Builder { public: // Initialize a builder with the files from *base and other info from *vset - Builder(VersionSet* vset, Version* base) - : vset_(vset), - base_(base) { + Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) { base_->Ref(); BySmallestKey cmp; cmp.internal_comparator = &vset_->icmp_; @@ -645,8 +610,8 @@ class VersionSet::Builder { const FileSet* added = levels_[level].added_files; std::vector<FileMetaData*> to_unref; to_unref.reserve(added->size()); - for (FileSet::const_iterator it = added->begin(); - it != added->end(); ++it) { + for (FileSet::const_iterator it = added->begin(); it != added->end(); + ++it) { to_unref.push_back(*it); } delete added; @@ -671,12 +636,9 @@ class VersionSet::Builder { } // Delete files - const VersionEdit::DeletedFileSet& del = edit->deleted_files_; - for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin(); - iter != del.end(); - ++iter) { - const int level = iter->first; - const uint64_t number = iter->second; + for (const auto& deleted_file_set_kvp : edit->deleted_files_) { + const int level = deleted_file_set_kvp.first; + const uint64_t number = deleted_file_set_kvp.second; levels_[level].deleted_files.insert(number); } @@ -699,7 +661,7 @@ class VersionSet::Builder { // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. - f->allowed_seeks = (f->file_size / 16384); + f->allowed_seeks = static_cast<int>((f->file_size / 16384U)); if (f->allowed_seeks < 100) f->allowed_seeks = 100; levels_[level].deleted_files.erase(f->number); @@ -717,20 +679,17 @@ class VersionSet::Builder { const std::vector<FileMetaData*>& base_files = base_->files_[level]; std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin(); std::vector<FileMetaData*>::const_iterator base_end = base_files.end(); - const FileSet* added = levels_[level].added_files; - v->files_[level].reserve(base_files.size() + added->size()); - for (FileSet::const_iterator added_iter = added->begin(); - added_iter != added->end(); - ++added_iter) { + const FileSet* added_files = levels_[level].added_files; + v->files_[level].reserve(base_files.size() + added_files->size()); + for (const auto& added_file : *added_files) { // Add all smaller files listed in base_ - for (std::vector<FileMetaData*>::const_iterator bpos - = std::upper_bound(base_iter, base_end, *added_iter, cmp); - base_iter != bpos; - ++base_iter) { + for (std::vector<FileMetaData*>::const_iterator bpos = + std::upper_bound(base_iter, base_end, added_file, cmp); + base_iter != bpos; ++base_iter) { MaybeAddFile(v, level, *base_iter); } - MaybeAddFile(v, level, *added_iter); + MaybeAddFile(v, level, added_file); } // Add remaining base files @@ -742,7 +701,7 @@ class VersionSet::Builder { // Make sure there is no overlap in levels > 0 if (level > 0) { for (uint32_t i = 1; i < v->files_[level].size(); i++) { - const InternalKey& prev_end = v->files_[level][i-1]->largest; + const InternalKey& prev_end = v->files_[level][i - 1]->largest; const InternalKey& this_begin = v->files_[level][i]->smallest; if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) { fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", @@ -763,7 +722,7 @@ class VersionSet::Builder { std::vector<FileMetaData*>* files = &v->files_[level]; if (level > 0 && !files->empty()) { // Must not overlap - assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest, + assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest, f->smallest) < 0); } f->refs++; @@ -772,8 +731,7 @@ class VersionSet::Builder { } }; -VersionSet::VersionSet(const std::string& dbname, - const Options* options, +VersionSet::VersionSet(const std::string& dbname, const Options* options, TableCache* table_cache, const InternalKeyComparator* cmp) : env_(options->env), @@ -786,10 +744,10 @@ VersionSet::VersionSet(const std::string& dbname, last_sequence_(0), log_number_(0), prev_log_number_(0), - descriptor_file_(NULL), - descriptor_log_(NULL), + descriptor_file_(nullptr), + descriptor_log_(nullptr), dummy_versions_(this), - current_(NULL) { + current_(nullptr) { AppendVersion(new Version(this)); } @@ -804,7 +762,7 @@ void VersionSet::AppendVersion(Version* v) { // Make "v" current assert(v->refs_ == 0); assert(v != current_); - if (current_ != NULL) { + if (current_ != nullptr) { current_->Unref(); } current_ = v; @@ -844,10 +802,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { // a temporary file that contains a snapshot of the current version. std::string new_manifest_file; Status s; - if (descriptor_log_ == NULL) { + if (descriptor_log_ == nullptr) { // No reason to unlock *mu here since we only hit this path in the // first call to LogAndApply (when opening the database). - assert(descriptor_file_ == NULL); + assert(descriptor_file_ == nullptr); new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_); edit->SetNextFile(next_file_number_); s = env_->NewWritableFile(new_manifest_file, &descriptor_file_); @@ -893,8 +851,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { if (!new_manifest_file.empty()) { delete descriptor_log_; delete descriptor_file_; - descriptor_log_ = NULL; - descriptor_file_ = NULL; + descriptor_log_ = nullptr; + descriptor_file_ = nullptr; env_->DeleteFile(new_manifest_file); } } @@ -902,10 +860,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { return s; } -Status VersionSet::Recover(bool *save_manifest) { +Status VersionSet::Recover(bool* save_manifest) { struct LogReporter : public log::Reader::Reporter { Status* status; - virtual void Corruption(size_t bytes, const Status& s) { + void Corruption(size_t bytes, const Status& s) override { if (this->status->ok()) *this->status = s; } }; @@ -916,7 +874,7 @@ Status VersionSet::Recover(bool *save_manifest) { if (!s.ok()) { return s; } - if (current.empty() || current[current.size()-1] != '\n') { + if (current.empty() || current[current.size() - 1] != '\n') { return Status::Corruption("CURRENT file does not end with newline"); } current.resize(current.size() - 1); @@ -925,6 +883,10 @@ Status VersionSet::Recover(bool *save_manifest) { SequentialFile* file; s = env_->NewSequentialFile(dscname, &file); if (!s.ok()) { + if (s.IsNotFound()) { + return Status::Corruption("CURRENT points to a non-existent file", + s.ToString()); + } return s; } @@ -941,7 +903,8 @@ Status VersionSet::Recover(bool *save_manifest) { { LogReporter reporter; reporter.status = &s; - log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/); + log::Reader reader(file, &reporter, true /*checksum*/, + 0 /*initial_offset*/); Slice record; std::string scratch; while (reader.ReadRecord(&record, &scratch) && s.ok()) { @@ -982,7 +945,7 @@ Status VersionSet::Recover(bool *save_manifest) { } } delete file; - file = NULL; + file = nullptr; if (s.ok()) { if (!have_next_file) { @@ -1040,12 +1003,12 @@ bool VersionSet::ReuseManifest(const std::string& dscname, return false; } - assert(descriptor_file_ == NULL); - assert(descriptor_log_ == NULL); + assert(descriptor_file_ == nullptr); + assert(descriptor_log_ == nullptr); Status r = env_->NewAppendableFile(dscname, &descriptor_file_); if (!r.ok()) { Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str()); - assert(descriptor_file_ == NULL); + assert(descriptor_file_ == nullptr); return false; } @@ -1066,7 +1029,7 @@ void VersionSet::Finalize(Version* v) { int best_level = -1; double best_score = -1; - for (int level = 0; level < config::kNumLevels-1; level++) { + for (int level = 0; level < config::kNumLevels - 1; level++) { double score; if (level == 0) { // We treat level-0 specially by bounding the number of files @@ -1081,7 +1044,7 @@ void VersionSet::Finalize(Version* v) { // setting, or very high compression ratios, or lots of // overwrites/deletions). score = v->files_[level].size() / - static_cast<double>(config::kL0_CompactionTrigger); + static_cast<double>(config::kL0_CompactionTrigger); } else { // Compute the ratio of current size to size limit. const uint64_t level_bytes = TotalFileSize(v->files_[level]); @@ -1137,16 +1100,12 @@ int VersionSet::NumLevelFiles(int level) const { const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const { // Update code if kNumLevels changes - assert(config::kNumLevels == 7); + static_assert(config::kNumLevels == 7, ""); snprintf(scratch->buffer, sizeof(scratch->buffer), - "files[ %d %d %d %d %d %d %d ]", - int(current_->files_[0].size()), - int(current_->files_[1].size()), - int(current_->files_[2].size()), - int(current_->files_[3].size()), - int(current_->files_[4].size()), - int(current_->files_[5].size()), - int(current_->files_[6].size())); + "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()), + int(current_->files_[1].size()), int(current_->files_[2].size()), + int(current_->files_[3].size()), int(current_->files_[4].size()), + int(current_->files_[5].size()), int(current_->files_[6].size())); return scratch->buffer; } @@ -1172,7 +1131,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { Table* tableptr; Iterator* iter = table_cache_->NewIterator( ReadOptions(), files[i]->number, files[i]->file_size, &tableptr); - if (tableptr != NULL) { + if (tableptr != nullptr) { result += tableptr->ApproximateOffsetOf(ikey.Encode()); } delete iter; @@ -1183,8 +1142,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { } void VersionSet::AddLiveFiles(std::set<uint64_t>* live) { - for (Version* v = dummy_versions_.next_; - v != &dummy_versions_; + for (Version* v = dummy_versions_.next_; v != &dummy_versions_; v = v->next_) { for (int level = 0; level < config::kNumLevels; level++) { const std::vector<FileMetaData*>& files = v->files_[level]; @@ -1207,7 +1165,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() { for (int level = 1; level < config::kNumLevels - 1; level++) { for (size_t i = 0; i < current_->files_[level].size(); i++) { const FileMetaData* f = current_->files_[level][i]; - current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest, + current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps); const int64_t sum = TotalFileSize(overlaps); if (sum > result) { @@ -1222,8 +1180,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() { // *smallest, *largest. // REQUIRES: inputs is not empty void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs, - InternalKey* smallest, - InternalKey* largest) { + InternalKey* smallest, InternalKey* largest) { assert(!inputs.empty()); smallest->Clear(); largest->Clear(); @@ -1248,8 +1205,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs, // REQUIRES: inputs is not empty void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1, const std::vector<FileMetaData*>& inputs2, - InternalKey* smallest, - InternalKey* largest) { + InternalKey* smallest, InternalKey* largest) { std::vector<FileMetaData*> all = inputs1; all.insert(all.end(), inputs2.begin(), inputs2.end()); GetRange(all, smallest, largest); @@ -1271,8 +1227,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) { if (c->level() + which == 0) { const std::vector<FileMetaData*>& files = c->inputs_[which]; for (size_t i = 0; i < files.size(); i++) { - list[num++] = table_cache_->NewIterator( - options, files[i]->number, files[i]->file_size); + list[num++] = table_cache_->NewIterator(options, files[i]->number, + files[i]->file_size); } } else { // Create concatenating iterator for the files from this level @@ -1295,11 +1251,11 @@ Compaction* VersionSet::PickCompaction() { // We prefer compactions triggered by too much data in a level over // the compactions triggered by seeks. const bool size_compaction = (current_->compaction_score_ >= 1); - const bool seek_compaction = (current_->file_to_compact_ != NULL); + const bool seek_compaction = (current_->file_to_compact_ != nullptr); if (size_compaction) { level = current_->compaction_level_; assert(level >= 0); - assert(level+1 < config::kNumLevels); + assert(level + 1 < config::kNumLevels); c = new Compaction(options_, level); // Pick the first file that comes after compact_pointer_[level] @@ -1320,7 +1276,7 @@ Compaction* VersionSet::PickCompaction() { c = new Compaction(options_, level); c->inputs_[0].push_back(current_->file_to_compact_); } else { - return NULL; + return nullptr; } c->input_version_ = current_; @@ -1342,12 +1298,94 @@ Compaction* VersionSet::PickCompaction() { return c; } +// Finds the largest key in a vector of files. Returns true if files it not +// empty. +bool FindLargestKey(const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& files, + InternalKey* largest_key) { + if (files.empty()) { + return false; + } + *largest_key = files[0]->largest; + for (size_t i = 1; i < files.size(); ++i) { + FileMetaData* f = files[i]; + if (icmp.Compare(f->largest, *largest_key) > 0) { + *largest_key = f->largest; + } + } + return true; +} + +// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and +// user_key(l2) = user_key(u1) +FileMetaData* FindSmallestBoundaryFile( + const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& level_files, + const InternalKey& largest_key) { + const Comparator* user_cmp = icmp.user_comparator(); + FileMetaData* smallest_boundary_file = nullptr; + for (size_t i = 0; i < level_files.size(); ++i) { + FileMetaData* f = level_files[i]; + if (icmp.Compare(f->smallest, largest_key) > 0 && + user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) == + 0) { + if (smallest_boundary_file == nullptr || + icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) { + smallest_boundary_file = f; + } + } + } + return smallest_boundary_file; +} + +// Extracts the largest file b1 from |compaction_files| and then searches for a +// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a +// file b2 (known as a boundary file) it adds it to |compaction_files| and then +// searches again using this new upper bound. +// +// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and +// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a +// subsequent get operation will yield an incorrect result because it will +// return the record from b2 in level i rather than from b1 because it searches +// level by level for records matching the supplied user key. +// +// parameters: +// in level_files: List of files to search for boundary files. +// in/out compaction_files: List of files to extend by adding boundary files. +void AddBoundaryInputs(const InternalKeyComparator& icmp, + const std::vector<FileMetaData*>& level_files, + std::vector<FileMetaData*>* compaction_files) { + InternalKey largest_key; + + // Quick return if compaction_files is empty. + if (!FindLargestKey(icmp, *compaction_files, &largest_key)) { + return; + } + + bool continue_searching = true; + while (continue_searching) { + FileMetaData* smallest_boundary_file = + FindSmallestBoundaryFile(icmp, level_files, largest_key); + + // If a boundary file was found advance largest_key, otherwise we're done. + if (smallest_boundary_file != NULL) { + compaction_files->push_back(smallest_boundary_file); + largest_key = smallest_boundary_file->largest; + } else { + continue_searching = false; + } + } +} + void VersionSet::SetupOtherInputs(Compaction* c) { const int level = c->level(); InternalKey smallest, largest; + + AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]); GetRange(c->inputs_[0], &smallest, &largest); - current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]); + current_->GetOverlappingInputs(level + 1, &smallest, &largest, + &c->inputs_[1]); // Get entire range covered by compaction InternalKey all_start, all_limit; @@ -1358,6 +1396,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) { if (!c->inputs_[1].empty()) { std::vector<FileMetaData*> expanded0; current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0); + AddBoundaryInputs(icmp_, current_->files_[level], &expanded0); const int64_t inputs0_size = TotalFileSize(c->inputs_[0]); const int64_t inputs1_size = TotalFileSize(c->inputs_[1]); const int64_t expanded0_size = TotalFileSize(expanded0); @@ -1367,18 +1406,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) { InternalKey new_start, new_limit; GetRange(expanded0, &new_start, &new_limit); std::vector<FileMetaData*> expanded1; - current_->GetOverlappingInputs(level+1, &new_start, &new_limit, + current_->GetOverlappingInputs(level + 1, &new_start, &new_limit, &expanded1); if (expanded1.size() == c->inputs_[1].size()) { Log(options_->info_log, "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n", - level, - int(c->inputs_[0].size()), - int(c->inputs_[1].size()), - long(inputs0_size), long(inputs1_size), - int(expanded0.size()), - int(expanded1.size()), - long(expanded0_size), long(inputs1_size)); + level, int(c->inputs_[0].size()), int(c->inputs_[1].size()), + long(inputs0_size), long(inputs1_size), int(expanded0.size()), + int(expanded1.size()), long(expanded0_size), long(inputs1_size)); smallest = new_start; largest = new_limit; c->inputs_[0] = expanded0; @@ -1395,13 +1430,6 @@ void VersionSet::SetupOtherInputs(Compaction* c) { &c->grandparents_); } - if (false) { - Log(options_->info_log, "Compacting %d '%s' .. '%s'", - level, - smallest.DebugString().c_str(), - largest.DebugString().c_str()); - } - // Update the place where we will do the next compaction for this level. // We update this immediately instead of waiting for the VersionEdit // to be applied so that if the compaction fails, we will try a different @@ -1410,14 +1438,12 @@ void VersionSet::SetupOtherInputs(Compaction* c) { c->edit_.SetCompactPointer(level, largest); } -Compaction* VersionSet::CompactRange( - int level, - const InternalKey* begin, - const InternalKey* end) { +Compaction* VersionSet::CompactRange(int level, const InternalKey* begin, + const InternalKey* end) { std::vector<FileMetaData*> inputs; current_->GetOverlappingInputs(level, begin, end, &inputs); if (inputs.empty()) { - return NULL; + return nullptr; } // Avoid compacting too much in one shot in case the range is large. @@ -1448,7 +1474,7 @@ Compaction* VersionSet::CompactRange( Compaction::Compaction(const Options* options, int level) : level_(level), max_output_file_size_(MaxFileSizeForLevel(options, level)), - input_version_(NULL), + input_version_(nullptr), grandparent_index_(0), seen_key_(false), overlapped_bytes_(0) { @@ -1458,7 +1484,7 @@ Compaction::Compaction(const Options* options, int level) } Compaction::~Compaction() { - if (input_version_ != NULL) { + if (input_version_ != nullptr) { input_version_->Unref(); } } @@ -1486,7 +1512,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) { const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator(); for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) { const std::vector<FileMetaData*>& files = input_version_->files_[lvl]; - for (; level_ptrs_[lvl] < files.size(); ) { + while (level_ptrs_[lvl] < files.size()) { FileMetaData* f = files[level_ptrs_[lvl]]; if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) { // We've advanced far enough @@ -1507,8 +1533,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) { // Scan to find earliest grandparent file that contains key. const InternalKeyComparator* icmp = &vset->icmp_; while (grandparent_index_ < grandparents_.size() && - icmp->Compare(internal_key, - grandparents_[grandparent_index_]->largest.Encode()) > 0) { + icmp->Compare(internal_key, + grandparents_[grandparent_index_]->largest.Encode()) > + 0) { if (seen_key_) { overlapped_bytes_ += grandparents_[grandparent_index_]->file_size; } @@ -1526,9 +1553,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) { } void Compaction::ReleaseInputs() { - if (input_version_ != NULL) { + if (input_version_ != nullptr) { input_version_->Unref(); - input_version_ = NULL; + input_version_ = nullptr; } } |